Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Simplify examples #6

Merged
merged 3 commits into from
Nov 13, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
- id: black
- repo: https://github.com/pycqa/isort
rev: 5.10.1
hooks:
- id: isort
name: isort (python)
12 changes: 9 additions & 3 deletions config/cifar100_config.yaml
Original file line number Diff line number Diff line change
@@ -1,12 +1,18 @@
max_epochs: 1
gpus: 1
n: 5
batch_size: 128
segments: 2

layer_type: continuous2d
train_fraction: 1.0
rescale_output: False
linear_output: True
periodicity: 2.0
lr: 0.001
nonlinearity: False

data:
num_workers: 10
batch_size: 128

defaults:
- optimizer: adam
- net: high_order_conv
12 changes: 6 additions & 6 deletions examples/cifar10.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from pytorch_lightning import LightningModule, Trainer
from torchmetrics import Accuracy

from high_order_layers_torch.FunctionalConvolution import (
PolynomialConvolution2d as PolyConv2d,
PiecewiseDiscontinuousPolynomialConvolution2d as PiecewiseDiscontinuousPolyConv2d,
)
from high_order_layers_torch.FunctionalConvolution import (
PiecewisePolynomialConvolution2d as PiecewisePolyConv2d,
)
from high_order_layers_torch.FunctionalConvolution import (
PiecewiseDiscontinuousPolynomialConvolution2d as PiecewiseDiscontinuousPolyConv2d,
PolynomialConvolution2d as PolyConv2d,
)

from torchmetrics import Accuracy

transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
Expand Down
206 changes: 45 additions & 161 deletions examples/cifar100.py
Original file line number Diff line number Diff line change
@@ -1,149 +1,50 @@
import os

import hydra
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning import LightningModule, Trainer
from high_order_layers_torch.layers import *
from torchmetrics import Accuracy
from torchmetrics.functional import accuracy
import hydra
from omegaconf import DictConfig, OmegaConf
import os
from pytorch_lightning import LightningDataModule, Trainer

from high_order_layers_torch.layers import *
from high_order_layers_torch.modules import ClassificationNet

transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)


class Net(LightningModule):
def __init__(self, cfg: DictConfig):
class Cifar100DataModule(LightningDataModule):
def __init__(
self,
data_dir: str,
batch_size: int = 32,
train_fraction: float = 0.9,
num_workers: int = 1,
):
super().__init__()
self._cfg = cfg
try:
self._data_dir = f"{hydra.utils.get_original_cwd()}/data"
except:
self._data_dir = "../data"
self._lr = cfg.lr
n = cfg.n
self.n = cfg.n
self._batch_size = cfg.batch_size
self._layer_type = cfg.layer_type
self._train_fraction = cfg.train_fraction
segments = cfg.segments
self._topk_metric = Accuracy(top_k=5)
self._nonlinearity = cfg.nonlinearity
if self._layer_type == "standard":
out_channels1 = 6 * ((n - 1) * segments + 1)
self.conv1 = torch.nn.Conv2d(
in_channels=3, out_channels=out_channels1, kernel_size=5
)
self.norm1 = nn.BatchNorm2d(out_channels1)
out_channels2 = 6 * ((n - 1) * segments + 1)
self.conv2 = torch.nn.Conv2d(
in_channels=out_channels2, out_channels=16, kernel_size=5
)
self.norm2 = nn.BatchNorm2d(out_channels2)
if self._layer_type == "standard0":
self.conv1 = torch.nn.Conv2d(
in_channels=3, out_channels=6 * n, kernel_size=5
)
self.conv2 = torch.nn.Conv2d(
in_channels=6 * n, out_channels=16, kernel_size=5
)

else:
self.conv1 = high_order_convolution_layers(
layer_type=self._layer_type,
n=n,
in_channels=3,
out_channels=6,
kernel_size=5,
segments=cfg.segments,
rescale_output=cfg.rescale_output,
periodicity=cfg.periodicity,
)
self.norm1 = nn.BatchNorm2d(6)
self.conv2 = high_order_convolution_layers(
layer_type=self._layer_type,
n=n,
in_channels=6,
out_channels=16,
kernel_size=5,
segments=cfg.segments,
rescale_output=cfg.rescale_output,
periodicity=cfg.periodicity,
)
self.norm2 = nn.BatchNorm2d(16)

self.pool = nn.MaxPool2d(2, 2)
self.avg_pool = nn.AdaptiveAvgPool2d(5)
self.flatten = nn.Flatten()
if cfg.linear_output:
self.fc1 = nn.Linear(16 * 5 * 5, 100)
else:
self.fc1 = high_order_fc_layers(
layer_type=self._layer_type,
n=n,
in_features=16 * 5 * 5,
out_features=100,
segments=cfg.segments,
)
self.norm3 = nn.LayerNorm(100)

def forward(self, x):
if self._nonlinearity is True:
x = self.pool(F.relu(self.conv1(x)))
x = self.norm1(x)
x = self.pool(F.relu(self.conv2(x)))
x = self.norm2(x)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.norm3(x)
else:
x = self.pool(self.conv1(x))
x = self.norm1(x)
x = self.pool(self.conv2(x))
x = self.norm2(x)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.norm3(x)
return x
self._data_dir = data_dir
self._batch_size = batch_size
self._train_fraction = train_fraction
self._num_workers = num_workers

def setup(self, stage):
num_train = int(self._train_fraction * 40000)
num_val = 10000
num_extra = 40000 - num_train
num_train = int(self._train_fraction * 50000)
num_val = 50000 - num_train

train = torchvision.datasets.CIFAR100(
root=self._data_dir, train=True, download=True, transform=transform
)

self._train_subset, self._val_subset, extra = torch.utils.data.random_split(
train,
[num_train, 10000, num_extra],
[num_train, num_val, 0],
generator=torch.Generator().manual_seed(1),
)

def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)

loss = F.cross_entropy(y_hat, y)
preds = torch.argmax(y_hat, dim=1)

acc = accuracy(preds, y)
val = self._topk_metric(y_hat, y)
val = self._topk_metric.compute()

self.log(f"train_loss", loss, prog_bar=True)
self.log(f"train_acc", acc, prog_bar=True)
self.log(f"train_acc5", val, prog_bar=True)

return loss
self._testset = torchvision.datasets.CIFAR100(
root=self._data_dir, train=False, download=True, transform=transform
)

def train_dataloader(self):
trainloader = torch.utils.data.DataLoader(
Expand All @@ -156,59 +57,42 @@ def train_dataloader(self):

def val_dataloader(self):
return torch.utils.data.DataLoader(
self._val_subset, batch_size=self._batch_size, shuffle=False, num_workers=10
self._val_subset,
batch_size=self._batch_size,
shuffle=False,
num_workers=self._num_workers,
)

def test_dataloader(self):
testset = torchvision.datasets.CIFAR100(
root=self._data_dir, train=False, download=True, transform=transform
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=4, shuffle=False, num_workers=10
self._testset,
batch_size=self._batch_size,
shuffle=False,
num_workers=self._num_workers,
)
return testloader

def validation_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, "val")

def eval_step(self, batch, batch_idx, name):
x, y = batch
logits = self(x)
loss = F.cross_entropy(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)

val = self._topk_metric(logits, y)
val = self._topk_metric.compute()

# Calling self.log will surface up scalars for you in TensorBoard
self.log(f"{name}_loss", loss, prog_bar=True)
self.log(f"{name}_acc", acc, prog_bar=True)
self.log(f"{name}_acc5", val, prog_bar=True)
return loss

def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
return self.eval_step(batch, batch_idx, "test")

def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=self._lr)


def cifar100(cfg: DictConfig):
print(OmegaConf.to_yaml(cfg))
print("Working directory : {}".format(os.getcwd()))

try:
print(f"Orig working directory : {hydra.utils.get_original_cwd()}")
data_dir = f"{hydra.utils.get_original_cwd()}/data"
except:
pass
data_dir = "./data"

print(f"Orig working directory : {data_dir}")

datamodule = Cifar100DataModule(
data_dir=data_dir,
batch_size=cfg.data.batch_size,
)

trainer = Trainer(max_epochs=cfg.max_epochs, gpus=cfg.gpus)
model = Net(cfg)
trainer.fit(model)
print("testing")
model = ClassificationNet(cfg)
trainer.fit(model, datamodule=datamodule)
result = trainer.test(model)
print("finished testing")
return result


Expand Down
5 changes: 3 additions & 2 deletions examples/function_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch_optimizer as alt_optim
from pytorch_lightning import LightningModule, Trainer
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from pytorch_lightning import LightningModule, Trainer

from high_order_layers_torch.layers import *
import torch_optimizer as alt_optim


class simple_func:
Expand Down
18 changes: 10 additions & 8 deletions examples/invariant_mnist.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,20 @@
import os

import hydra
import torch
import torchvision
import torchvision.transforms as transforms
from pytorch_lightning import LightningModule, Trainer
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch_optimizer as alt_optim
import torchvision
import torchvision.transforms as transforms
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import LightningModule, Trainer
from torchmetrics.functional import accuracy
from high_order_layers_torch.PolynomialLayers import *

from high_order_layers_torch.layers import *
from high_order_layers_torch.networks import *
from omegaconf import DictConfig, OmegaConf
import hydra
import os
import torch_optimizer as alt_optim
from high_order_layers_torch.PolynomialLayers import *


class Net(LightningModule):
Expand Down
12 changes: 7 additions & 5 deletions examples/mnist.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
import os

import hydra
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import EarlyStopping
from torchmetrics.functional import accuracy
import hydra
from omegaconf import DictConfig, OmegaConf
import os

from high_order_layers_torch.layers import *

transformStandard = transforms.Compose(
Expand Down
Loading