Skip to content

Commit

Permalink
add: PyTorch Lightning (#14)
Browse files Browse the repository at this point in the history
* Update requirements

* Refactor model name

* update: gitignore lightning_logs

* update: torch usage in classic MNIST demo

* update: classic MNIST model

* add: MNIST model with PyTorch Lightning.

* update: README

* update: rebuild website

* update: PyTorch notebooks

* update: rebuild website

* add: PyTorch Lightning page to nav

* update: website build

Co-authored-by: alexioannides <[email protected]>
  • Loading branch information
AlexIoannides and alexioannides authored Dec 19, 2022
1 parent e4221ba commit 75ae72a
Show file tree
Hide file tree
Showing 53 changed files with 4,831 additions and 383 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

# PyTorch
data/
lightning_logs

# mlflow
mlflow/artefacts
Expand Down
76 changes: 47 additions & 29 deletions demos/pytorch/MNIST.ipynb
Original file line number Diff line number Diff line change
@@ -1,21 +1,25 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "departmental-prince",
"metadata": {},
"source": [
"# Deep Learning\n",
"\n",
"This notebook tackles the classic [MNIST](https://en.wikipedia.org/wiki/MNIST_database) classification task, as a proxy for building simple neural networks with PyTorch."
"This notebook tackles the classic [MNIST](https://en.wikipedia.org/wiki/MNIST_database) image classification task, as a proxy for building simple neural networks with PyTorch."
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "solar-steel",
"metadata": {},
"source": [
"## Imports and Configuration"
"## Imports and Configuration\n",
"\n",
"Set random seeds."
]
},
{
Expand All @@ -27,7 +31,7 @@
{
"data": {
"text/plain": [
"<torch._C.Generator at 0x10772e270>"
"<torch._C.Generator at 0x109a310f0>"
]
},
"execution_count": 1,
Expand Down Expand Up @@ -73,21 +77,21 @@
],
"source": [
"train_data = torchvision.datasets.MNIST(\n",
" root='./data',\n",
" root=\"./data\",\n",
" train=True,\n",
" download=True,\n",
" transform=torchvision.transforms.ToTensor()\n",
" transform=torchvision.transforms.ToTensor(),\n",
")\n",
"\n",
"test_data = torchvision.datasets.MNIST(\n",
" root='./data',\n",
" root=\"./data\",\n",
" train=False,\n",
" download=True,\n",
" transform=torchvision.transforms.ToTensor()\n",
" transform=torchvision.transforms.ToTensor(),\n",
")\n",
"\n",
"print(f'{len(train_data):,} instances of training data')\n",
"print(f'{len(test_data):,} instances of training data')"
"print(f\"{len(train_data):,} instances of training data\")\n",
"print(f\"{len(test_data):,} instances of training data\")"
]
},
{
Expand Down Expand Up @@ -124,8 +128,8 @@
],
"source": [
"data_instance, data_label = train_data[0]\n",
"print(f'label = {data_label}')\n",
"_ = plt.imshow(data_instance[0], cmap='gray')"
"print(f\"label = {data_label}\")\n",
"_ = plt.imshow(data_instance[0], cmap=\"gray\")"
]
},
{
Expand All @@ -147,19 +151,20 @@
"source": [
"class ClassifyMNIST(nn.Module):\n",
" \"\"\"MNIST classification network.\"\"\"\n",
" \n",
"\n",
" def __init__(self, n_hidden_neurons):\n",
" super().__init__()\n",
" self.input_dim = 28 * 28\n",
" self.n_classes = 10\n",
" self.n_hidden_neurons = n_hidden_neurons\n",
"\n",
" self.model = nn.Sequential(\n",
" nn.Flatten(),\n",
" nn.Linear(self.input_dim, n_hidden_neurons),\n",
" nn.ReLU(),\n",
" nn.Linear(n_hidden_neurons, self.n_classes)\n",
" nn.Linear(n_hidden_neurons, self.n_classes),\n",
" )\n",
" \n",
" \n",
"\n",
" def forward(self, X: torch.Tensor) -> torch.Tensor:\n",
" \"\"\"Compute a prediction.\"\"\"\n",
" return self.model(X)"
Expand All @@ -185,12 +190,12 @@
" criterion: nn.Module,\n",
" data_loader: DataLoader,\n",
" n_epochs: int,\n",
" learning_rate: float\n",
" learning_rate: float,\n",
") -> Sequence[float]:\n",
" \"\"\"Train the model over multiple epochs recording the loss for each.\"\"\"\n",
"\n",
" def process_batch(X: torch.Tensor, y: torch.Tensor) -> float:\n",
" y_hat = model.forward(X.view(-1, model.input_dim))\n",
" y_hat = model.forward(X)\n",
" loss = criterion(y_hat, y)\n",
" optimiser.zero_grad()\n",
" loss.backward()\n",
Expand All @@ -200,6 +205,7 @@
" def process_epoch(n: int) -> float:\n",
" return [process_batch(X, y) for X, y in data_loader][-1]\n",
"\n",
" model.train()\n",
" optimiser = torch.optim.SGD(model.parameters(), lr=0.05)\n",
" training_run = [process_epoch(epoch) for epoch in tqdm(range(n_epochs))]\n",
" return training_run"
Expand All @@ -223,7 +229,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 10/10 [00:18<00:00, 1.81s/it]\n"
"100%|██████████| 10/10 [00:19<00:00, 1.95s/it]\n"
]
},
{
Expand All @@ -248,9 +254,9 @@
],
"source": [
"training_data_loader = DataLoader(dataset=train_data, batch_size=500)\n",
"nn_model = ClassifyMNIST(28*28)\n",
"model = ClassifyMNIST(28 * 28)\n",
"loss_func = nn.CrossEntropyLoss()\n",
"train(nn_model, loss_func, training_data_loader, n_epochs=10, learning_rate=0.05)"
"train(model, loss_func, training_data_loader, n_epochs=10, learning_rate=0.05)"
]
},
{
Expand All @@ -270,7 +276,7 @@
{
"data": {
"text/plain": [
"tensor(0.9236)"
"0.9236"
]
},
"execution_count": 7,
Expand All @@ -279,21 +285,30 @@
}
],
"source": [
"def accuracy(model: nn.Module, data_loader: DataLoader) -> float:\n",
" \"\"\"Compute classification accuracy.\"\"\"\n",
" model.eval()\n",
" with torch.inference_mode():\n",
" correct = 0\n",
" for X, y in data_loader:\n",
" y_hat = torch.argmax(model(X), 1)\n",
" correct += (y_hat == y).sum().tolist()\n",
"\n",
" accuracy = correct / len(test_data)\n",
" return accuracy\n",
"\n",
"\n",
"test_data_loader = DataLoader(dataset=test_data, batch_size=10000)\n",
"correct = 0\n",
"for X, y in test_data_loader:\n",
" _, y_hat = torch.max(nn_model.forward(X.view(-1, 28*28)), 1)\n",
" correct += (y_hat == y).sum()\n",
"accuracy = correct / len(test_data)\n",
"accuracy"
"accuracy(model, test_data_loader)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "coupled-familiar",
"metadata": {},
"source": [
"Let's test it on the test instance displayed above."
"Scoring a single instance manually."
]
},
{
Expand All @@ -314,7 +329,10 @@
}
],
"source": [
"output_from_final_layer = nn_model.forward(data_instance.view(-1, 28*28))\n",
"model.eval()\n",
"with torch.inference_mode():\n",
" output_from_final_layer = model.forward(data_instance)\n",
"\n",
"value, index = torch.max(output_from_final_layer, 1)\n",
"index"
]
Expand Down
Loading

0 comments on commit 75ae72a

Please sign in to comment.