def main():
    model = ClassifyUnet(1, 28, 10)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

    loaders = {
        "train":
        DataLoader(
            MNIST(
                "./data",
                train=False,
                download=True,
                transform=ToTensor(),
            ),
            batch_size=32,
        ),
        "valid":
        DataLoader(
            MNIST(
                "./data",
                train=False,
                download=True,
                transform=ToTensor(),
            ),
            batch_size=32,
        ),
    }

    runner = CustomRunner()
    runner.train(
        model=model,
        optimizer=optimizer,
        loaders=loaders,
        verbose=True,
        check=True,
    )
def _get_loaders(
    *, root: str, batch_size: int = 1, num_workers: int = 1
) -> Dict[str, DataLoader]:
    """
    Function to get loaders just for testing.

    Args:
        root (str): Path to root of dataset.
        batch_size (int): Batch size.
        num_workers (int): Num of workers.

    Returns:
        Dict[str, DataLoader]: Dict of loaders.
    """
    data_transform = ToTensor()

    trainset = MNIST(
        root=root, train=True, download=True, transform=data_transform
    )
    trainloader = DataLoader(
        trainset, batch_size=batch_size, num_workers=num_workers
    )
    testset = MNIST(
        root=root, train=False, download=True, transform=data_transform
    )
    testloader = DataLoader(
        testset, batch_size=batch_size, num_workers=num_workers
    )

    loaders = collections.OrderedDict(train=trainloader, valid=testloader)

    return loaders
Exemple #3
0
def main():
    generator = nn.Sequential(
        # We want to generate 128 coefficients to reshape into a 7x7x128 map
        nn.Linear(128, 128 * 7 * 7),
        nn.LeakyReLU(0.2, inplace=True),
        Lambda(lambda x: x.view(x.size(0), 128, 7, 7)),
        nn.ConvTranspose2d(128, 128, (4, 4), stride=(2, 2), padding=1),
        nn.LeakyReLU(0.2, inplace=True),
        nn.ConvTranspose2d(128, 128, (4, 4), stride=(2, 2), padding=1),
        nn.LeakyReLU(0.2, inplace=True),
        nn.Conv2d(128, 1, (7, 7), padding=3),
        nn.Sigmoid(),
    )
    discriminator = nn.Sequential(
        nn.Conv2d(1, 64, (3, 3), stride=(2, 2), padding=1),
        nn.LeakyReLU(0.2, inplace=True),
        nn.Conv2d(64, 128, (3, 3), stride=(2, 2), padding=1),
        nn.LeakyReLU(0.2, inplace=True),
        GlobalMaxPool2d(),
        Flatten(),
        nn.Linear(128, 1),
    )

    model = {"generator": generator, "discriminator": discriminator}
    optimizer = {
        "generator": torch.optim.Adam(
            generator.parameters(), lr=0.0003, betas=(0.5, 0.999)
        ),
        "discriminator": torch.optim.Adam(
            discriminator.parameters(), lr=0.0003, betas=(0.5, 0.999)
        ),
    }
    loaders = {
        "train": DataLoader(
            MNIST(
                os.getcwd(), train=True, download=True, transform=ToTensor(),
            ),
            batch_size=32,
        ),
    }

    runner = CustomRunner()
    runner.train(
        model=model,
        optimizer=optimizer,
        loaders=loaders,
        callbacks=[
            dl.OptimizerCallback(
                optimizer_key="generator", metric_key="loss_generator"
            ),
            dl.OptimizerCallback(
                optimizer_key="discriminator", metric_key="loss_discriminator"
            ),
        ],
        main_metric="loss_generator",
        num_epochs=20,
        verbose=True,
        logdir="./logs_gan",
        check=True,
    )
Exemple #4
0
def datasets_fn():
    """
    Docs.
    """
    dataset = MNIST(
        os.getcwd(), train=False, download=True, transform=ToTensor(),
    )
    return {"train": dataset, "valid": dataset}
Exemple #5
0
def main():
    """Run few epochs to check ``BatchTransformCallback`` callback."""
    model = torch.nn.Linear(28 * 28, 10)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

    loaders = {
        "train": DataLoader(
            MNIST(
                os.getcwd(), train=True, download=True, transform=ToTensor()
            ),
            batch_size=32,
        ),
        "valid": DataLoader(
            MNIST(
                os.getcwd(), train=False, download=True, transform=ToTensor()
            ),
            batch_size=32,
        ),
    }

    transrorms = [
        augmentation.RandomAffine(degrees=(-15, 20), scale=(0.75, 1.25)),
    ]

    runner = CustomRunner()

    # model training
    runner.train(
        model=model,
        optimizer=optimizer,
        loaders=loaders,
        logdir="./logs",
        num_epochs=5,
        verbose=True,
        load_best_on_end=True,
        check=True,
        callbacks=[BatchTransformCallback(transrorms, input_key=0)],
    )

    # model inference
    for prediction in runner.predict_loader(loader=loaders["train"]):
        assert prediction.detach().cpu().numpy().shape[-1] == 10
def _get_loaders(*, root: str, batch_size: int = 1, num_workers: int = 1):
    data_transform = ToTensor()

    trainset = MNIST(
        root=root, train=True, download=True, transform=data_transform
    )
    trainloader = DataLoader(
        trainset, batch_size=batch_size, num_workers=num_workers
    )
    testset = MNIST(
        root=root, train=False, download=True, transform=data_transform
    )
    testloader = DataLoader(
        testset, batch_size=batch_size, num_workers=num_workers
    )

    loaders = collections.OrderedDict(train=trainloader, valid=testloader)

    return loaders
    sys.exit()

# # Data

# In[ ]:

import collections
import torch

from catalyst.contrib.datasets import MNIST
from catalyst.contrib.data.transforms import ToTensor, Compose, Normalize

bs = 32
num_workers = 0

data_transform = Compose([ToTensor(), Normalize((0.1307, ), (0.3081, ))])

loaders = collections.OrderedDict()

trainset = MNIST("./data",
                 train=False,
                 download=True,
                 transform=data_transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=bs,
                                          shuffle=True,
                                          num_workers=num_workers)

testset = MNIST("./data", train=False, download=True, transform=data_transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=bs,
Exemple #8
0
 def get_transforms(stage: str = None, mode: str = None):
     """
     @TODO: Docs. Contribution is welcome
     """
     return Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
Exemple #9
0
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader

from catalyst import dl
from catalyst.contrib.data.transforms import ToTensor
from catalyst.contrib.datasets import MNIST
from catalyst.utils import metrics

model = torch.nn.Linear(28 * 28, 10)
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

loaders = {
    "train": DataLoader(
        MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()),
        batch_size=32,
    ),
}


class CustomRunner(dl.Runner):
    def predict_batch(self, batch):
        # model inference step
        return self.model(batch[0].to(self.device).view(batch[0].size(0), -1))

    def _handle_batch(self, batch):
        # model train/valid step
        x, y = batch
        y_hat = self.model(x.view(x.size(0), -1))
Exemple #10
0
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader

from catalyst import dl
from catalyst.contrib.data.transforms import ToTensor
from catalyst.contrib.datasets import MNIST
from catalyst.utils import metrics

model = torch.nn.Linear(28 * 28, 10)
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

loaders = {
    "train":
    DataLoader(
        MNIST("./data", train=True, download=True, transform=ToTensor()),
        batch_size=32,
    ),
}


class CustomRunner(dl.Runner):
    def predict_batch(self, batch):
        # model inference step
        return self.model(batch[0].to(self.device).view(batch[0].size(0), -1))

    def _handle_batch(self, batch):
        # model train/valid step
        x, y = batch
        y_hat = self.model(x.view(x.size(0), -1))
Exemple #11
0
import os, torch
from torch.nn import functional as F
from catalyst import dl
from catalyst.contrib.data.transforms import ToTensor
from catalyst.contrib.datasets import MNIST
from catalyst.utils import metrics

model = torch.nn.Linear(28 * 28, 10)
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

loaders = {
    "train":
    DataLoader(MNIST(os.getcwd(),
                     train=True,
                     download=True,
                     transform=ToTensor()),
               batch_size=32),
    "valid":
    DataLoader(MNIST(os.getcwd(),
                     train=False,
                     download=True,
                     transform=ToTensor()),
               batch_size=32),
}


class CustomRunner(dl.Runner):
    def predict_batch(self, batch):
        return self.model(batch[0].to(self.device).view(batch[0].size(0), -1))

    def _handle_batch(self, batch):