def get_datasets(self, stage: str, **kwargs): """ @TODO: Docs. Contribution is welcome """ datasets = OrderedDict() if stage != "infer": trainset = MNIST( "./data", train=False, download=True, transform=SimpleExperiment.get_transforms(stage=stage, mode="train"), ) testset = MNIST( "./data", train=False, download=True, transform=SimpleExperiment.get_transforms(stage=stage, mode="valid"), ) datasets["train"] = trainset datasets["valid"] = testset else: testset = MNIST( "./data", train=False, download=True, transform=SimpleExperiment.get_transforms(stage=stage, mode="valid"), ) datasets["infer"] = testset return datasets
def main(): model = ClassifyVAE(28 * 28, 64, 10) optimizer = torch.optim.Adam(model.parameters(), lr=0.02) loaders = { "train": DataLoader( MNIST( os.getcwd(), train=False, download=True, transform=ToTensor(), ), batch_size=32, ), "valid": DataLoader( MNIST( os.getcwd(), train=False, download=True, transform=ToTensor(), ), batch_size=32, ), } runner = CustomRunner() runner.train( model=model, optimizer=optimizer, loaders=loaders, verbose=True, check=True, )
def _get_loaders(*, root: str, batch_size: int = 1, num_workers: int = 1): data_transform = ToTensor() trainset = MNIST( root=root, train=True, download=True, transform=data_transform ) trainloader = DataLoader( trainset, batch_size=batch_size, num_workers=num_workers ) testset = MNIST( root=root, train=False, download=True, transform=data_transform ) testloader = DataLoader( testset, batch_size=batch_size, num_workers=num_workers ) loaders = collections.OrderedDict(train=trainloader, valid=testloader) return loaders
def datasets_fn(): """ Docs. """ dataset = MNIST( os.getcwd(), train=False, download=True, transform=ToTensor(), ) return {"train": dataset, "valid": dataset}
def get_datasets(self, stage: str, n_samples: int = 320, **kwargs): """ @TODO: Docs. Contribution is welcome """ datasets = OrderedDict() if stage != "infer": trainset = MNIST( "./data", train=False, download=True, transform=Experiment.get_transforms(stage=stage, mode="train"), ) testset = MNIST( "./data", train=False, download=True, transform=Experiment.get_transforms(stage=stage, mode="valid"), ) if n_samples > 0: trainset = Subset(trainset, list(range(n_samples))) testset = Subset(testset, list(range(n_samples))) datasets["train"] = trainset datasets["valid"] = testset else: testset = MNIST( "./data", train=False, download=True, transform=Experiment.get_transforms(stage=stage, mode="valid"), ) if n_samples > 0: testset = Subset(testset, list(range(n_samples))) datasets["infer"] = testset return datasets
# In[ ]: import collections import torch from catalyst.contrib.data.dataset import MNIST, ToTensor, Compose, Normalize bs = 32 num_workers = 0 data_transform = Compose([ToTensor(), Normalize((0.1307, ), (0.3081, ))]) loaders = collections.OrderedDict() trainset = MNIST("./data", train=False, download=True, transform=data_transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=bs, shuffle=True, num_workers=num_workers) testset = MNIST("./data", train=False, download=True, transform=data_transform) testloader = torch.utils.data.DataLoader(testset, batch_size=bs, shuffle=False, num_workers=num_workers) loaders["train"] = trainloader loaders["valid"] = testloader
import os import torch from torch.nn import functional as F from torch.utils.data import DataLoader from catalyst import dl from catalyst.contrib.data.dataset import MNIST, ToTensor from catalyst.utils import metrics model = torch.nn.Linear(28 * 28, 10) optimizer = torch.optim.Adam(model.parameters(), lr=0.02) loaders = { "train": DataLoader( MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32, ), } class CustomRunner(dl.Runner): def predict_batch(self, batch): # model inference step return self.model(batch[0].to(self.device).view(batch[0].size(0), -1)) def _handle_batch(self, batch): # model train/valid step x, y = batch y_hat = self.model(x.view(x.size(0), -1))
def main(): generator = nn.Sequential( # We want to generate 128 coefficients to reshape into a 7x7x128 map nn.Linear(128, 128 * 7 * 7), nn.LeakyReLU(0.2, inplace=True), Lambda(lambda x: x.view(x.size(0), 128, 7, 7)), nn.ConvTranspose2d(128, 128, (4, 4), stride=(2, 2), padding=1), nn.LeakyReLU(0.2, inplace=True), nn.ConvTranspose2d(128, 128, (4, 4), stride=(2, 2), padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(128, 1, (7, 7), padding=3), nn.Sigmoid(), ) discriminator = nn.Sequential( nn.Conv2d(1, 64, (3, 3), stride=(2, 2), padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 128, (3, 3), stride=(2, 2), padding=1), nn.LeakyReLU(0.2, inplace=True), GlobalMaxPool2d(), Flatten(), nn.Linear(128, 1), ) model = {"generator": generator, "discriminator": discriminator} optimizer = { "generator": torch.optim.Adam(generator.parameters(), lr=0.0003, betas=(0.5, 0.999)), "discriminator": torch.optim.Adam(discriminator.parameters(), lr=0.0003, betas=(0.5, 0.999)), } loaders = { "train": DataLoader( MNIST( os.getcwd(), train=True, download=True, transform=ToTensor(), ), batch_size=32, ), } runner = CustomRunner() runner.train( model=model, optimizer=optimizer, loaders=loaders, callbacks=[ dl.OptimizerCallback(optimizer_key="generator", metric_key="loss_generator"), dl.OptimizerCallback(optimizer_key="discriminator", metric_key="loss_discriminator"), ], main_metric="loss_generator", num_epochs=20, verbose=True, logdir="./logs_gan", check=True, )