Ejemplo n.º 1
0
def train_net(Gen, Discr):
    G = Gen(in_noise=128, out_ch=3)
    G_polyak = copy.deepcopy(G).eval()
    D = Discr()
    print(G)
    print(D)

    def G_fun(batch):
        z = torch.randn(BS, 128, device=device)
        fake = G(z)
        preds = D(fake * 2 - 1).squeeze()
        loss = gan_loss.generated(preds)
        loss.backward()
        return {'loss': loss.item(), 'imgs': fake.detach()}

    def G_polyak_fun(batch):
        z = torch.randn(BS, 128, device=device)
        fake = G_polyak(z)
        return {'imgs': fake.detach()}

    def D_fun(batch):
        z = torch.randn(BS, 128, device=device)
        fake = G(z)
        fake_loss = gan_loss.fake(D(fake * 2 - 1))
        fake_loss.backward()

        x = batch[0]

        real_loss = gan_loss.real(D(x * 2 - 1))
        real_loss.backward()

        loss = real_loss.item() + fake_loss.item()
        return {
            'loss': loss,
            'real_loss': real_loss.item(),
            'fake_loss': fake_loss.item()
        }

    loop = GANRecipe(G, D, G_fun, D_fun, G_polyak_fun, dl,
                     log_every=100).to(device)
    loop.register('polyak', G_polyak)
    loop.G_loop.callbacks.add_callbacks([
        tcb.Optimizer(
            tch.optim.RAdamW(G.parameters(), lr=1e-4, betas=(0., 0.99))),
        tcb.Polyak(G, G_polyak),
    ])
    loop.register('G_polyak', G_polyak)
    loop.callbacks.add_callbacks([
        tcb.Log('batch.0', 'x'),
        tcb.WindowedMetricAvg('real_loss'),
        tcb.WindowedMetricAvg('fake_loss'),
        tcb.Optimizer(
            tch.optim.RAdamW(D.parameters(), lr=4e-4, betas=(0., 0.99))),
    ])
    loop.test_loop.callbacks.add_callbacks([
        tcb.Log('imgs', 'polyak_imgs'),
        tcb.VisdomLogger('main', prefix='test')
    ])
    loop.to(device).run(100)
Ejemplo n.º 2
0
    def fit(self,
            iters,
            content_img,
            style_img,
            style_ratio,
            content_layers=None):
        """
        Run the recipe

        Args:
            n_iters (int): number of iterations to run
            content (PIL.Image): content image
            style (PIL.Image): style image
            ratio (float): weight of style loss
            content_layers (list of str): layers on which to reconstruct
                content
        """
        self.loss.to(self.device)
        self.loss.set_style(pil2t(style_img).to(self.device), style_ratio)
        self.loss.set_content(pil2t(content_img).to(self.device), content_layers)

        canvas = ParameterizedImg(3, content_img.height,
                                  content_img.width, init_sd=0.00)

        self.opt = tch.optim.RAdamW(canvas.parameters(), 1e-2, (0.7, 0.7),
                eps=0.00001, weight_decay=0)

        def forward(_):
            self.opt.zero_grad()
            img = canvas()
            loss, losses = self.loss(img)
            loss.backward()

            return {
                'loss': loss,
                'content_loss': losses['content_loss'],
                'style_loss': losses['style_loss'],
                'img': img
            }

        loop = Recipe(forward, range(iters))
        loop.register('canvas', canvas)
        loop.register('model', self)
        loop.callbacks.add_callbacks([
            tcb.Counter(),
            tcb.WindowedMetricAvg('loss'),
            tcb.WindowedMetricAvg('content_loss'),
            tcb.WindowedMetricAvg('style_loss'),
            tcb.Log('img', 'img'),
            tcb.VisdomLogger(visdom_env=self.visdom_env, log_every=10),
            tcb.StdoutLogger(log_every=10),
            tcb.Optimizer(self.opt, log_lr=True),
            tcb.LRSched(torch.optim.lr_scheduler.ReduceLROnPlateau(self.opt,
                threshold=0.001, cooldown=500),
                step_each_batch=True)
        ])
        loop.to(self.device)
        loop.run(1)
        return canvas.render().cpu()
Ejemplo n.º 3
0
def make_loop(hourglass, body, display, num_iter, lr):
    loop = TrainAndCall(hourglass,
                        body,
                        display,
                        range(num_iter),
                        test_every=50,
                        checkpoint=None)
    opt = tch.optim.RAdamW(hourglass.parameters(), lr=lr)
    loop.callbacks.add_callbacks([
        tcb.WindowedMetricAvg('loss'),
        tcb.Optimizer(opt, clip_grad_norm=0.5, log_lr=True),
    ])
    loop.test_loop.callbacks.add_callbacks([
        tcb.Log('recon', 'img'),
        tcb.Log('orig', 'orig'),
        tcb.Log('loss', 'loss'),
    ])
    return loop
Ejemplo n.º 4
0
    def fit(self, n_iters, neuron):
        """
        Run the recipe

        Args:
            n_iters (int): number of iterations to run
            neuron (int): the feature map to maximize

        Returns:
            the optimized image
        """
        canvas = ParameterizedImg(3, self.input_size + 10,
                                  self.input_size + 10)

        def forward(_):
            cim = canvas()
            rnd = random.randint(0, cim.shape[2] // 10)
            im = cim[:, :, rnd:, rnd:]
            im = torch.nn.functional.interpolate(im,
                                                 size=(self.input_size,
                                                       self.input_size),
                                                 mode='bilinear')
            _, acts = self.model(self.norm(im), detach=False)
            fmap = acts[self.layer]
            loss = -fmap[0][neuron].sum()
            loss.backward()

            return {'loss': loss, 'img': cim}

        loop = Recipe(forward, range(n_iters))
        loop.register('canvas', canvas)
        loop.register('model', self)
        loop.callbacks.add_callbacks([
            tcb.Counter(),
            tcb.Log('loss', 'loss'),
            tcb.Log('img', 'img'),
            tcb.Optimizer(DeepDreamOptim(canvas.parameters(), lr=self.lr)),
            tcb.VisdomLogger(visdom_env=self.visdom_env, log_every=10),
            tcb.StdoutLogger(log_every=10)
        ])
        loop.to(self.device)
        loop.run(1)
        return canvas.render().cpu()
Ejemplo n.º 5
0
def test_trainandcall():
    model = nn.Linear(10, 2)

    def train_step(batch):
        x, y = batch
        out = model(x)
        loss = torch.nn.functional.cross_entropy(out, y)
        loss.backward()
        return {'loss': loss}

    def after_train():
        print('Yup.')
        return {}

    trainloader = DataLoader(FakeData(), 4, shuffle=True)
    trainer = TrainAndCall(model, train_step, after_train, trainloader)
    trainer.callbacks.add_callbacks(
        [tcb.Optimizer(torch.optim.Adam(model.parameters(), lr=1e-3))])

    trainer.run(1)
Ejemplo n.º 6
0
    def fit(self, ref, iters, lr=3e-4, device='cpu', visdom_env='deepdream'):
        """
        Args:
            lr (float, optional): the learning rate
            visdom_env (str or None): the name of the visdom env to use, or None
                to disable Visdom
        """
        ref_tensor = TF.ToTensor()(ref).unsqueeze(0)
        canvas = ParameterizedImg(1,
                                  3,
                                  ref_tensor.shape[2],
                                  ref_tensor.shape[3],
                                  init_img=ref_tensor,
                                  space='spectral',
                                  colors='uncorr')

        def forward(_):
            img = canvas()
            rnd = random.randint(0, 10)
            loss = self.loss(self.norm(img[:, :, rnd:, rnd:]))
            loss.backward()
            return {'loss': loss, 'img': img}

        loop = Recipe(forward, range(iters))
        loop.register('model', self)
        loop.register('canvas', canvas)
        loop.callbacks.add_callbacks([
            tcb.Counter(),
            tcb.Log('loss', 'loss'),
            tcb.Log('img', 'img'),
            tcb.Optimizer(DeepDreamOptim(canvas.parameters(), lr=lr)),
            tcb.VisdomLogger(visdom_env=visdom_env, log_every=10),
            tcb.StdoutLogger(log_every=10)
        ])
        loop.to(device)
        loop.run(1)
        return canvas.render().cpu()
Ejemplo n.º 7
0
def CrossEntropyClassification(model,
                               train_loader,
                               test_loader,
                               classes,
                               lr=3e-3,
                               beta1=0.9,
                               wd=1e-2,
                               visdom_env='main',
                               test_every=1000,
                               log_every=100):
    """
    A Classification recipe with a default froward training / testing pass
    using cross entropy, and extended with RAdamW and ReduceLROnPlateau.

    Args:
        model (nn.Module): a model learnable with cross entropy
        train_loader (DataLoader): Training set dataloader
        test_loader (DataLoader): Testing set dataloader
        classes (list of str): classes name, in order
        lr (float): the learning rate
        beta1 (float): RAdamW's beta1
        wd (float): weight decay
        visdom_env (str): name of the visdom environment to use, or None for
            not using Visdom (default: None)
        test_every (int): testing frequency, in number of iterations (default:
            1000)
        log_every (int): logging frequency, in number of iterations (default:
            1000)
    """

    def train_step(batch):
        x, y = batch
        pred = model(x)
        loss = torch.nn.functional.cross_entropy(pred, y)
        loss.backward()
        return {'loss': loss, 'pred': pred}

    def validation_step(batch):
        x, y = batch
        pred = model(x)
        loss = torch.nn.functional.cross_entropy(pred, y)
        return {'loss': loss, 'pred': pred}

    loop = Classification(model,
                          train_step,
                          validation_step,
                          train_loader,
                          test_loader,
                          classes,
                          visdom_env=visdom_env,
                          test_every=test_every,
                          log_every=log_every)

    opt = RAdamW(model.parameters(),
                 lr=lr,
                 betas=(beta1, 0.999),
                 weight_decay=wd)
    loop.callbacks.add_callbacks([
        tcb.Optimizer(opt, log_lr=True),
        tcb.LRSched(torch.optim.lr_scheduler.ReduceLROnPlateau(opt))
    ])
    return loop
Ejemplo n.º 8
0
    def fit(self,
            iters,
            content_img,
            style_img,
            style_ratio,
            content_layers=None):
        """
        Run the recipe

        Args:
            n_iters (int): number of iterations to run
            content (PIL.Image): content image
            style (PIL.Image): style image
            ratio (float): weight of style loss
            content_layers (list of str): layers on which to reconstruct
                content
        """
        self.loss.to(self.device)
        self.loss.set_style(pil2t(style_img).to(self.device), style_ratio)
        self.loss.set_content(
            pil2t(content_img).to(self.device), content_layers)

        self.loss2.to(self.device)
        self.loss2.set_style(
            torch.nn.functional.interpolate(pil2t(style_img)[None],
                                            scale_factor=0.5,
                                            mode='bilinear')[0].to(
                                                self.device), style_ratio)
        self.loss2.set_content(
            torch.nn.functional.interpolate(pil2t(content_img)[None],
                                            scale_factor=0.5,
                                            mode='bilinear')[0].to(
                                                self.device), content_layers)

        canvas = ParameterizedImg(3,
                                  content_img.height,
                                  content_img.width,
                                  init_img=pil2t(content_img))

        self.opt = tch.optim.RAdamW(canvas.parameters(), 3e-2)

        def forward(_):
            img = canvas()
            loss, losses = self.loss(img)
            loss.backward()
            loss, losses = self.loss2(
                torch.nn.functional.interpolate(canvas(),
                                                scale_factor=0.5,
                                                mode='bilinear'))
            loss.backward()

            return {
                'loss': loss,
                'content_loss': losses['content_loss'],
                'style_loss': losses['style_loss'],
                'img': img
            }

        loop = Recipe(forward, range(iters))
        loop.register('canvas', canvas)
        loop.register('model', self)
        loop.callbacks.add_callbacks([
            tcb.Counter(),
            tcb.WindowedMetricAvg('loss'),
            tcb.WindowedMetricAvg('content_loss'),
            tcb.WindowedMetricAvg('style_loss'),
            tcb.Log('img', 'img'),
            tcb.VisdomLogger(visdom_env=self.visdom_env, log_every=10),
            tcb.StdoutLogger(log_every=10),
            tcb.Optimizer(self.opt, log_lr=True),
        ])
        loop.to(self.device)
        loop.run(1)
        return canvas.render().cpu()
Ejemplo n.º 9
0
def MixupClassification(model,
                        train_loader,
                        test_loader,
                        classes,
                        *,
                        lr=3e-3,
                        beta1=0.9,
                        wd=1e-2,
                        visdom_env='main',
                        test_every=1000,
                        log_every=100):
    """
    A Classification recipe with a default froward training / testing pass
    using cross entropy and mixup, and extended with RAdamW and
    ReduceLROnPlateau.

    Args:
        model (nn.Module): a model learnable with cross entropy
        train_loader (DataLoader): Training set dataloader. Must have soft
            targets. Should be a DataLoader loading a MixupDataset or
            compatible.
        test_loader (DataLoader): Testing set dataloader. Dataset must have
            categorical targets.
        classes (list of str): classes name, in order
        lr (float): the learning rate
        beta1 (float): RAdamW's beta1
        wd (float): weight decay
        visdom_env (str): name of the visdom environment to use, or None for
            not using Visdom (default: None)
        test_every (int): testing frequency, in number of iterations (default:
            1000)
        log_every (int): logging frequency, in number of iterations (default:
            1000)
    """

    from torchelie.loss import continuous_cross_entropy

    def train_step(batch):
        x, y = batch
        pred = model(x)
        loss = continuous_cross_entropy(pred, y)
        loss.backward()
        return {'loss': loss}

    def validation_step(batch):
        x, y = batch
        pred = model(x)
        loss = torch.nn.functional.cross_entropy(pred, y)
        return {'loss': loss, 'pred': pred}

    loop = TrainAndTest(model,
                        train_step,
                        validation_step,
                        train_loader,
                        test_loader,
                        visdom_env=visdom_env,
                        test_every=test_every,
                        log_every=log_every)

    loop.callbacks.add_callbacks([
        tcb.WindowedMetricAvg('loss'),
    ])
    loop.register('classes', classes)

    loop.test_loop.callbacks.add_callbacks([
        tcb.AccAvg(post_each_batch=False),
        tcb.WindowedMetricAvg('loss', False),
    ])

    if visdom_env is not None:
        loop.callbacks.add_epilogues(
            [tcb.ImageGradientVis(),
             tcb.MetricsTable()])

    if len(classes) <= 25:
        loop.test_loop.callbacks.add_callbacks([
            tcb.ConfusionMatrix(classes),
        ])

    loop.test_loop.callbacks.add_callbacks([
        tcb.ClassificationInspector(30, classes, False),
        tcb.MetricsTable(False)
    ])

    opt = RAdamW(model.parameters(),
                 lr=lr,
                 betas=(beta1, 0.999),
                 weight_decay=wd)
    loop.callbacks.add_callbacks([
        tcb.Optimizer(opt, log_lr=True),
        tcb.LRSched(torch.optim.lr_scheduler.ReduceLROnPlateau(opt))
    ])
    return loop
Ejemplo n.º 10
0
def CrossEntropyClassification(model,
                               train_loader,
                               test_loader,
                               classes,
                               lr=3e-3,
                               beta1=0.9,
                               wd=1e-2,
                               visdom_env='main',
                               test_every=1000,
                               log_every=100):
    """
    Extends Classification with default cross entropy forward passes. Also adds
    RAdamW and ReduceLROnPlateau.

    Inherited training callbacks:

    - AccAvg for displaying accuracy
    - WindowedMetricAvg for displaying loss
    - ConfusionMatrix if len(classes) <= 25
    - ClassificationInspector
    - MetricsTable
    - ImageGradientVis
    - Counter for counting iterations, connected to the testing loop as well
    - VisdomLogger
    - StdoutLogger

    Training callbacks:

    - Optimizer with RAdamW
    - LRSched with ReduceLROnPlateau

    Testing:

    Testing loop is in :code:`.test_loop`.

    Inherited testing callbacks:

    - AccAvg
    - WindowedMetricAvg
    - ConfusionMatrix if :code:`len(classes) <= 25`
    - ClassificationInspector
    - MetricsTable
    - VisdomLogger
    - StdoutLogger
    - Checkpoint saving the best testing loss

    Args:
        model (nn.Module): a model learnable with cross entropy
        train_loader (DataLoader): Training set dataloader
        test_loader (DataLoader): Testing set dataloader
        classes (list of str): classes name, in order
        lr (float): the learning rate
        beta1 (float): RAdamW's beta1
        wd (float): weight decay
        visdom_env (str): name of the visdom environment to use, or None for
            not using Visdom (default: None)
        test_every (int): testing frequency, in number of iterations (default:
            1000)
        log_every (int): logging frequency, in number of iterations (default:
            1000)
    """
    def train_step(batch):
        x, y = batch
        pred = model(x)
        loss = torch.nn.functional.cross_entropy(pred, y)
        loss.backward()
        return {'loss': loss, 'pred': pred}

    def validation_step(batch):
        x, y = batch
        pred = model(x)
        loss = torch.nn.functional.cross_entropy(pred, y)
        return {'loss': loss, 'pred': pred}

    loop = Classification(model,
                          train_step,
                          validation_step,
                          train_loader,
                          test_loader,
                          classes,
                          visdom_env=visdom_env,
                          test_every=test_every,
                          log_every=log_every)

    opt = RAdamW(model.parameters(),
                 lr=lr,
                 betas=(beta1, 0.999),
                 weight_decay=wd)
    loop.callbacks.add_callbacks([
        tcb.Optimizer(opt, log_lr=True),
        tcb.LRSched(torch.optim.lr_scheduler.ReduceLROnPlateau(opt))
    ])
    return loop