Coding 5: Convolutional Networks on CIFAR10

In this exercise, we will (again) improve our classifier from last week to a convolutional network.

alt text

Image credits to https://lilly021.com/wp-content/uploads/2019/05/img2.png.

Tensorboard (run this only once).

In [ ]:
%load_ext tensorboard

Load CIFAR-10.

In [ ]:
import torch
import torchvision
import torchvision.transforms as transforms

  
def fetch_dataloader(batch_size):
    """
    Iterable of (image, label) tuples.
    """
    transform = transforms.Compose([
        transforms.ToTensor(),
       transforms.Normalize((0.49, 0.47, 0.42), (0.24, 0.23, 0.24))
    ])

    trn_data = torchvision.datasets.CIFAR10(
        root='./data', train=True, download=True, transform=transform)
    trn_data.data = trn_data.data[:40000]
    trn_data.targets = trn_data.targets[:40000]
    
    trn_loader = torch.utils.data.DataLoader(
        trn_data, batch_size=batch_size, shuffle=True, num_workers=2)

    val_data = torchvision.datasets.CIFAR10(
        root='./data', train=True, download=True, transform=transform)
    val_data.data = val_data.data[40000:]
    val_data.targets = val_data.targets[40000:]
    
    val_loader = torch.utils.data.DataLoader(
        val_data, batch_size=batch_size, shuffle=False, num_workers=2)

    return trn_loader, val_loader

Implement the model here.

In [ ]:
import torch
import numpy as np


class ConvNet(torch.nn.Module):
    def __init__(self, input_dim, n_classes):
        """
        Implement.
        """
        super().__init__()
    
    def forward(self, x):
        """
        Implement.
        """
        return
    
    
def train(model, train_data, val_data, writer, device,
          lr=0.01, n_epochs=20):
    optim = torch.optim.Adam(model.parameters(), lr=lr)
    loss_func = torch.nn.CrossEntropyLoss()
    steps = 0
    
    model.to(device)

    for epoch in range(n_epochs):
        train_acc = []
        val_acc = []
        
        # Train  
        for x, y in train_data:
          x, y = x.to(device), y.to(device)
          y_pred = model(x)
          loss = loss_func(y_pred, y)
          
          train_acc.extend((y_pred.argmax(dim=1) == y).float().cpu().numpy())
        
          optim.zero_grad()
          loss.backward()
          optim.step()
          
          steps += 1

          writer.add_scalar('loss/train_batch', loss.item(), steps)
          
        # Validation
        for x, y in val_data:
          x, y = x.to(device), y.to(device)
          y_pred = model(x)
          
          val_acc.extend((y_pred.argmax(dim=1) == y).float().cpu().numpy())
          
        writer.add_scalar('accuracy/train_epoch', np.mean(np.stack(train_acc, 0)), epoch)
        writer.add_scalar('accuracy/valid_epoch', np.mean(np.stack(val_acc, 0)), epoch)

Train your network

In [ ]:
%reload_ext tensorboard
%tensorboard --logdir log --reload_interval 1

import time
import torch.utils.tensorboard as tb


train_data, val_data = fetch_dataloader(batch_size=128)
model = ConvNet(3, 10)
writer = tb.SummaryWriter('log/{}'.format(time.strftime('%m-%d-%H-%M')))
device = torch.device('cuda')

train(model, train_data, val_data, writer, device)
In [ ]: