This was inspired from: https://github.com/huggingface/transformers/blob/master/src/transformers/utils/notebook.py
import warnings
from nbdev.export import *
from nbdev.export import Config
from nbdev.showdoc import *
warnings.filterwarnings("ignore")
import os
import pytorch_lightning as pl
import torch
from fastcore.all import Path
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
from torchvision.datasets import FashionMNIST
source = Path(Config().path("nbs_path")) / "data"
os.makedirs(source, exist_ok=True)
# fmt: off
# data
mnist_train = FashionMNIST(source, train=True, download=True, transform=transforms.ToTensor())
mnist_train = DataLoader(mnist_train, batch_size=32, num_workers=4)
mnist_val = FashionMNIST(source, train=False, download=True, transform=transforms.ToTensor())
mnist_val = DataLoader(mnist_val, batch_size=32, num_workers=4)
class CoolSystem(pl.LightningModule):
def __init__(self, classes=10):
super().__init__()
self.save_hyperparameters()
self.l1 = torch.nn.Linear(28 * 28, self.hparams.classes)
self.train_metric = Accuracy()
self.valid_metric = Accuracy()
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
metric = self.train_metric(F.softmax(y_hat), y)
log_dict = dict(train_loss=loss, train_acc=metric)
self.log_dict(log_dict)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
metric = self.valid_metric(F.softmax(y_hat), y)
log_dict = dict(val_loss=loss, val_acc=metric)
self.log_dict(log_dict)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.001)
model = CoolSystem()
trainer = pl.Trainer(
callbacks=[NotebookProgressCallback()],
checkpoint_callback=False,
max_epochs=10,
logger=False,
)
trainer.fit(model, mnist_train, mnist_val)