示例#1
0
def main():
    model = ClassifyAE(28 * 28, 128, 10)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

    loaders = {
        "train":
        DataLoader(
            MNIST(
                "./data",
                train=False,
                download=True,
                transform=ToTensor(),
            ),
            batch_size=32,
        ),
        "valid":
        DataLoader(
            MNIST(
                "./data",
                train=False,
                download=True,
                transform=ToTensor(),
            ),
            batch_size=32,
        ),
    }

    runner = CustomRunner()
    runner.train(
        model=model,
        optimizer=optimizer,
        loaders=loaders,
        verbose=True,
        check=True,
    )
示例#2
0
def _get_loaders(*,
                 root: str,
                 batch_size: int = 1,
                 num_workers: int = 1) -> Dict[str, DataLoader]:
    """
    Function to get loaders just for testing.

    Args:
        root (str): Path to root of dataset.
        batch_size (int): Batch size.
        num_workers (int): Num of workers.

    Returns:
        Dict[str, DataLoader]: Dict of loaders.
    """
    data_transform = ToTensor()

    trainset = MNIST(root=root,
                     train=True,
                     download=True,
                     transform=data_transform)
    trainloader = DataLoader(trainset,
                             batch_size=batch_size,
                             num_workers=num_workers)
    testset = MNIST(root=root,
                    train=False,
                    download=True,
                    transform=data_transform)
    testloader = DataLoader(testset,
                            batch_size=batch_size,
                            num_workers=num_workers)

    loaders = collections.OrderedDict(train=trainloader, valid=testloader)

    return loaders
示例#3
0
def main():
    """Run few epochs to check ``BatchTransformCallback`` callback."""
    model = torch.nn.Linear(28 * 28, 10)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

    loaders = {
        "train":
        DataLoader(
            MNIST(os.getcwd(), train=True, download=True,
                  transform=ToTensor()),
            batch_size=32,
        ),
        "valid":
        DataLoader(
            MNIST(os.getcwd(),
                  train=False,
                  download=True,
                  transform=ToTensor()),
            batch_size=32,
        ),
    }

    transrorms = [
        augmentation.RandomAffine(degrees=(-15, 20), scale=(0.75, 1.25)),
    ]

    runner = CustomRunner()

    # model training
    runner.train(
        model=model,
        optimizer=optimizer,
        loaders=loaders,
        logdir="./logs",
        num_epochs=5,
        verbose=True,
        load_best_on_end=True,
        check=True,
        callbacks=[BatchTransformCallback(transrorms, input_key=0)],
    )

    # model inference
    for prediction in runner.predict_loader(loader=loaders["train"]):
        assert prediction.detach().cpu().numpy().shape[-1] == 10
示例#4
0
def datasets_fn():
    """
    Docs.
    """
    dataset = MNIST(
        "./data",
        train=False,
        download=True,
        transform=ToTensor(),
    )
    return {"train": dataset, "valid": dataset}
示例#5
0
def test_pruning_callback() -> None:
    """Quantize model"""
    loaders = {
        "train":
        DataLoader(
            MNIST(os.getcwd(), train=True, download=True,
                  transform=ToTensor()),
            batch_size=32,
        ),
        "valid":
        DataLoader(
            MNIST(os.getcwd(),
                  train=False,
                  download=True,
                  transform=ToTensor()),
            batch_size=32,
        ),
    }
    model = nn.Sequential(Flatten(), nn.Linear(784, 512), nn.ReLU(),
                          nn.Linear(512, 10))
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
    runner = dl.SupervisedRunner()
    runner.train(
        model=model,
        callbacks=[
            dl.DynamicQuantizationCallback(out_dir="./logs", backend="qnnpack")
        ],
        loaders=loaders,
        criterion=criterion,
        optimizer=optimizer,
        num_epochs=1,
        logdir="./logs",
        check=True,
    )
    assert os.path.isfile("./logs/best_quantized.pth")
示例#6
0
def _get_loaders(*, root: str, batch_size: int = 1, num_workers: int = 1):
    data_transform = ToTensor()

    trainset = MNIST(root=root,
                     train=True,
                     download=True,
                     transform=data_transform)
    trainloader = DataLoader(trainset,
                             batch_size=batch_size,
                             num_workers=num_workers)
    testset = MNIST(root=root,
                    train=False,
                    download=True,
                    transform=data_transform)
    testloader = DataLoader(testset,
                            batch_size=batch_size,
                            num_workers=num_workers)

    loaders = collections.OrderedDict(train=trainloader, valid=testloader)

    return loaders
示例#7
0
 def get_transforms(stage: str = None, mode: str = None):
     """
     @TODO: Docs. Contribution is welcome
     """
     return Compose([ToTensor(), Normalize((0.1307, ), (0.3081, ))])
示例#8
0
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader

from catalyst import dl
from catalyst.contrib.datasets import MNIST
from catalyst.data.cv import ToTensor
from catalyst.utils import metrics

model = torch.nn.Linear(28 * 28, 10)
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

loaders = {
    "train":
    DataLoader(
        MNIST("./data", train=True, download=True, transform=ToTensor()),
        batch_size=32,
    ),
}


class CustomRunner(dl.Runner):
    def predict_batch(self, batch):
        # model inference step
        return self.model(batch[0].to(self.device).view(batch[0].size(0), -1))

    def _handle_batch(self, batch):
        # model train/valid step
        x, y = batch
        y_hat = self.model(x.view(x.size(0), -1))
示例#9
0
def main():
    generator = nn.Sequential(
        # We want to generate 128 coefficients to reshape into a 7x7x128 map
        nn.Linear(128, 128 * 7 * 7),
        nn.LeakyReLU(0.2, inplace=True),
        Lambda(lambda x: x.view(x.size(0), 128, 7, 7)),
        nn.ConvTranspose2d(128, 128, (4, 4), stride=(2, 2), padding=1),
        nn.LeakyReLU(0.2, inplace=True),
        nn.ConvTranspose2d(128, 128, (4, 4), stride=(2, 2), padding=1),
        nn.LeakyReLU(0.2, inplace=True),
        nn.Conv2d(128, 1, (7, 7), padding=3),
        nn.Sigmoid(),
    )
    discriminator = nn.Sequential(
        nn.Conv2d(1, 64, (3, 3), stride=(2, 2), padding=1),
        nn.LeakyReLU(0.2, inplace=True),
        nn.Conv2d(64, 128, (3, 3), stride=(2, 2), padding=1),
        nn.LeakyReLU(0.2, inplace=True),
        GlobalMaxPool2d(),
        Flatten(),
        nn.Linear(128, 1),
    )

    model = {"generator": generator, "discriminator": discriminator}
    optimizer = {
        "generator":
        torch.optim.Adam(generator.parameters(), lr=0.0003,
                         betas=(0.5, 0.999)),
        "discriminator":
        torch.optim.Adam(discriminator.parameters(),
                         lr=0.0003,
                         betas=(0.5, 0.999)),
    }
    loaders = {
        "train":
        DataLoader(
            MNIST(
                "./data",
                train=True,
                download=True,
                transform=ToTensor(),
            ),
            batch_size=32,
        ),
    }

    runner = CustomRunner()
    runner.train(
        model=model,
        optimizer=optimizer,
        loaders=loaders,
        callbacks=[
            dl.OptimizerCallback(optimizer_key="generator",
                                 metric_key="loss_generator"),
            dl.OptimizerCallback(optimizer_key="discriminator",
                                 metric_key="loss_discriminator"),
        ],
        main_metric="loss_generator",
        num_epochs=20,
        verbose=True,
        logdir="./logs_gan",
        check=True,
    )
示例#10
0
# The fastai DataLoader is a drop-in replacement for Pytorch's;
#   no code changes are required other than changing the import line
from fastai.data.load import DataLoader
import os,torch
from torch.nn import functional as F
from catalyst import dl
from catalyst.data.cv import ToTensor
from catalyst.contrib.datasets import MNIST
from catalyst.utils import metrics

model = torch.nn.Linear(28 * 28, 10)
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

loaders = {
    "train": DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32),
    "valid": DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32),
}

class CustomRunner(dl.Runner):
    def predict_batch(self, batch): return self.model(batch[0].to(self.device).view(batch[0].size(0), -1))

    def _handle_batch(self, batch):
        x, y = batch
        y_hat = self.model(x.view(x.size(0), -1))

        loss = F.cross_entropy(y_hat, y)
        accuracy01, accuracy03 = metrics.accuracy(y_hat, y, topk=(1, 3))
        self.batch_metrics.update(
            {"loss": loss, "accuracy01": accuracy01, "accuracy03": accuracy03}
        )

# # Data

# In[ ]:

import collections
import torch

from catalyst.contrib.datasets import MNIST
from catalyst.data.cv import ToTensor, Compose, Normalize

bs = 32
num_workers = 0

data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])

loaders = collections.OrderedDict()

trainset = MNIST(
    "./data", train=False, download=True, transform=data_transform
)
trainloader = torch.utils.data.DataLoader(
    trainset, batch_size=bs, shuffle=True, num_workers=num_workers
)

testset = MNIST("./data", train=False, download=True, transform=data_transform)
testloader = torch.utils.data.DataLoader(
    testset, batch_size=bs, shuffle=False, num_workers=num_workers
)