This was inspired from: https://github.com/huggingface/transformers/blob/master/src/transformers/utils/notebook.py
import warnings

from nbdev.export import *
from nbdev.export import Config
from nbdev.showdoc import *

warnings.filterwarnings("ignore")

format_time[source]

format_time(t)

Format t (in seconds) to (h):mm:ss

html_progress_bar[source]

html_progress_bar(value, total, prefix, label, width=300)

text_to_html_table[source]

text_to_html_table(items)

Put the texts in items in an HTML table.

class NotebookProgressBar[source]

NotebookProgressBar(total:int, prefix:Optional[str]=None, leave:bool=True, parent:Optional[_ForwardRef('NotebookTrainingTracker')]=None, width:int=300)

A progress par for display in a notebook. Class attributes (overridden by derived classes)

- **warmup** (:obj:`int`) -- The number of iterations to do at the beginning while ignoring
  :obj:`update_every`.
- **update_every** (:obj:`float`) -- Since calling the time takes some time, we only do it every presumed
  :obj:`update_every` seconds. The progress bar uses the average time passed up until now to guess the next
  value for which it will call the update.

Args: total (:obj:int): The total number of iterations to reach. prefix (:obj:str, optional): A prefix to add before the progress bar. leave (:obj:bool, optional, defaults to :obj:True): Whether or not to leave the progress bar once it's completed. You can always call the :meth:~transformers.utils.notebook.NotebookProgressBar.close method to make the bar disappear. parent (:class:~transformers.notebook.NotebookTrainingTracker, optional): A parent object (like :class:~transformers.utils.notebook.NotebookTrainingTracker) that spawns progress bars and handle their display. If set, the object passed must have a :obj:display() method. width (:obj:int, optional, defaults to 300): The width (in pixels) that the bar will take. Example:: import time pbar = NotebookProgressBar(100) for val in range(100): pbar.update(val) time.sleep(0.07) pbar.update(100)

class NotebookTrainingTracker[source]

NotebookTrainingTracker(num_steps, column_names=None, prefix='') :: NotebookProgressBar

An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics. Args: num_steps (:obj:int): The number of steps during training. column_names (:obj:List[str], optional): The list of column names for the metrics table (will be inferred from the first call to :meth:~transformers.utils.notebook.NotebookTrainingTracker.write_line if not set).

class NotebookProgressCallback[source]

NotebookProgressCallback() :: ProgressBarBase

A progress par for display in a notebook.

Usage

import os

import pytorch_lightning as pl
import torch
from fastcore.all import Path
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
from torchvision.datasets import FashionMNIST

source = Path(Config().path("nbs_path")) / "data"
os.makedirs(source, exist_ok=True)

# fmt: off

# data
mnist_train = FashionMNIST(source, train=True, download=True, transform=transforms.ToTensor())
mnist_train = DataLoader(mnist_train, batch_size=32, num_workers=4)

mnist_val = FashionMNIST(source, train=False, download=True, transform=transforms.ToTensor())
mnist_val = DataLoader(mnist_val, batch_size=32, num_workers=4)
class CoolSystem(pl.LightningModule):
    def __init__(self, classes=10):
        super().__init__()
        self.save_hyperparameters()
        self.l1 = torch.nn.Linear(28 * 28, self.hparams.classes)
        self.train_metric = Accuracy()
        self.valid_metric = Accuracy()

    def forward(self, x):
        return torch.relu(self.l1(x.view(x.size(0), -1)))

    def training_step(self, batch, batch_idx):
        x, y = batch
        y_hat = self(x)
        loss = F.cross_entropy(y_hat, y)
        metric = self.train_metric(F.softmax(y_hat), y)
        log_dict = dict(train_loss=loss, train_acc=metric)
        self.log_dict(log_dict)
        return loss

    def validation_step(self, batch, batch_idx):
        x, y = batch
        y_hat = self(x)
        loss = F.cross_entropy(y_hat, y)
        metric = self.valid_metric(F.softmax(y_hat), y)
        log_dict = dict(val_loss=loss, val_acc=metric)
        self.log_dict(log_dict)

    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=0.001)
model = CoolSystem()

trainer = pl.Trainer(
    callbacks=[NotebookProgressCallback()],
    checkpoint_callback=False,
    max_epochs=10,
    logger=False,
)

trainer.fit(model, mnist_train, mnist_val)
GPU available: False, used: False
TPU available: False, using: 0 TPU cores

  | Name         | Type     | Params
------------------------------------------
0 | l1           | Linear   | 7.9 K 
1 | train_metric | Accuracy | 0     
2 | valid_metric | Accuracy | 0     
------------------------------------------
7.9 K     Trainable params
0         Non-trainable params
7.9 K     Total params
0.031     Total estimated model params size (MB)
Training [18750/18750 01:12, Epoch 9 {'loss': '1.21'}]
epoch val_loss val_acc train_loss train_acc time samples/s
0 1.291946 0.552600 1.509367 0.500000 7.051600 310.283100
1 1.262589 0.554700 1.456340 0.500000 7.094600 308.404400
2 1.247708 0.555900 1.439657 0.500000 7.838800 279.123200
3 1.240134 0.557100 1.431610 0.500000 7.908400 276.667000
4 1.233447 0.557800 1.421274 0.500000 7.759200 281.988300
5 1.229756 0.557300 1.408852 0.500000 6.945500 315.026000
6 1.227072 0.558200 1.403393 0.500000 6.974500 313.713700
7 1.224863 0.558100 1.396521 0.500000 6.602400 331.392700
8 1.223277 0.558800 1.390022 0.500000 6.573700 332.843500
9 1.220788 0.559500 1.384899 0.500000 7.635200 286.567600

1