Exemple #1
0
    def test_basic_checkpoint(self):
        p = torch.tensor([2.0, 1.0, 10.0])
        training_steps = 500

        model = Net(p)
        optim = torch.optim.SGD(model.parameters(), lr=0.01)

        tbmodel = torchbearer.Trial(
            model,
            optim,
            loss,
            callbacks=[torchbearer.callbacks.MostRecent(filepath='test.pt')
                       ]).for_train_steps(training_steps).for_val_steps(1)
        tbmodel.run(2)  # Simulate 2 'epochs'

        # Reload
        p = torch.tensor([2.0, 1.0, 10.0])
        model = Net(p)
        optim = torch.optim.SGD(model.parameters(), lr=0.01)

        tbmodel = torchbearer.Trial(
            model,
            optim,
            loss,
            callbacks=[torchbearer.callbacks.MostRecent(filepath='test.pt')
                       ]).for_train_steps(training_steps)
        tbmodel.load_state_dict(torch.load('test.pt'))
        self.assertEqual(len(tbmodel.state[torchbearer.HISTORY]), 2)
        self.assertAlmostEqual(model.pars[0].item(), 5.0, places=4)
        self.assertAlmostEqual(model.pars[1].item(), 0.0, places=4)
        self.assertAlmostEqual(model.pars[2].item(), 1.0, places=4)

        import os
        os.remove('test.pt')
Exemple #2
0
def main():
    fake_parser = FakeArgumentParser(add_help=False, allow_abbrev=False)
    add_shared_args(fake_parser)
    fake_args, _ = fake_parser.parse_known_args()

    parser = argparse.ArgumentParser()
    add_shared_args(parser)
    add_sub_args(fake_args, parser)
    args = parser.parse_args()

    trainloader, valloader, testloader = build_dataloaders(args)

    args.output.mkdir(exist_ok=True, parents=True)
    path = str(args.output)
    save_args(args.output)

    model = get_model(args.model)()

    init_lr, sched = parse_learning_rate_arg(args.learning_rate)

    if args.optimiser == 'Adam':
        opt = optim.Adam(model.parameters(),
                         lr=init_lr,
                         weight_decay=args.weight_decay)
    else:
        opt = optim.SGD(model.parameters(),
                        lr=init_lr,
                        weight_decay=args.weight_decay,
                        momentum=args.momentum)

    callbacks = [
        Interval(filepath=path + '/model.{epoch:02d}.pt', period=10),
        MostRecent(filepath=path + '/model_final.pt'),
        CSVLogger(path + '/train-log.csv'), *sched
    ]

    trial = tb.Trial(model,
                     opt,
                     torch.nn.CrossEntropyLoss(),
                     metrics=['loss', 'acc', 'lr'],
                     callbacks=callbacks).to(args.device)
    trial.with_generators(train_generator=trainloader, val_generator=valloader)
    trial.run(epochs=args.epochs, verbose=2)

    trial = tb.Trial(model,
                     criterion=torch.nn.CrossEntropyLoss(),
                     metrics=['loss', 'acc'],
                     callbacks=[CSVLogger(path + '/test-log.csv')
                                ]).to(args.device)
    trial.with_generators(test_generator=testloader)
    trial.predict(verbose=2)
Exemple #3
0
def train(args,
          model,
          model_loss,
          trainloader,
          valloader,
          epochs,
          name='model'):
    init_lr, sched = parse_learning_rate_arg(args.learning_rate)
    path = str(args.output)

    opt = build_optimiser(args, model, init_lr)
    callbacks = [
        Interval(filepath=path + '/' + name + '.{epoch:02d}.pt', period=10),
        MostRecent(filepath=path + '/' + name + '_final.pt'),
        CSVLogger(path + '/' + name + '-train-log.csv'), *sched
    ]

    metrics = ['loss', 'lr']
    if isinstance(model_loss, nn.CrossEntropyLoss):
        metrics.append('acc')

    trial = tb.Trial(model,
                     opt,
                     model_loss,
                     metrics=metrics,
                     callbacks=callbacks).to(args.device)
    trial.with_generators(train_generator=trainloader, val_generator=valloader)
    trial.run(epochs=epochs, verbose=2)
    return trial
def build_trial(args):
    model = build_model(args)
    loss = build_loss(args)

    init_lr, sched = parse_learning_rate_arg(args.learning_rate)
    optim = torch.optim.Adam(model.parameters(), lr=init_lr, weight_decay=args.weight_decay)

    inv = get_dataset(args.dataset).inv_transform

    callbacks = [
        Interval(filepath=str(args.output) + '/model_{epoch:02d}.pt', period=args.snapshot_interval),
        CSVLogger(str(args.output) + '/log.csv'),
        imaging.FromState(tb.Y_PRED, transform=inv).on_val().cache(args.num_reconstructions).make_grid().with_handler(
            img_to_file(str(args.output) + '/val_reconstruction_samples_{epoch:02d}.png')),
        imaging.FromState(tb.Y_TRUE, transform=inv).on_val().cache(args.num_reconstructions).make_grid().with_handler(
            img_to_file(str(args.output) + '/val_samples.png')),
        *model.get_callbacks(args),
        *sched,
    ]

    if args.variational:
        @torchbearer.callbacks.add_to_loss
        def add_kld_loss_callback(state):
            kl = torch.mean(0.5 * torch.sum(torch.exp(state[LOGVAR]) + state[MU] ** 2 - 1. - state[LOGVAR], 1))
            return kl

        callbacks.append(add_kld_loss_callback)

    trial = tb.Trial(model, optimizer=optim, criterion=loss, metrics=['loss', 'mse', 'lr'], callbacks=callbacks)
    trial.with_loader(autoenc_loader)

    return trial, model
def train(optimizer_name):
    scheduler = torchbearer.callbacks.torch_scheduler.MultiStepLR(milestones=[60, 120, 160], gamma=0.2)

    if DATASET == 'CIFAR100':
        model = ResNet18(100)
    else:
        model = ResNet18()

    checkpoint = torchbearer.callbacks.ModelCheckpoint(DATASET +  "\\" + str(trial_num) + "\\" + optimizer_name + '_checkpoint.pt')
    logger = torchbearer.callbacks.CSVLogger(DATASET + "\\" + str(trial_num) + "\\" + optimizer_name + '_log.pt', separator=',', append=True)

    if optimizer_name == 'SGD':
        optimizer = torch.optim.SGD(model.parameters(), lr=0.05, momentum=0.9, weight_decay=0.001)
    elif optimizer_name == 'Lookahead':
        optimizer = Lookahead(torch.optim.SGD(model.parameters(), lr=0.1), la_alpha=0.8, la_steps=5)
    elif optimizer_name == 'AdamW':
        optimizer = torch.optim.AdamW(model.parameters(), lr=3e-4, weight_decay=1)
    elif optimizer_name == 'Polyak':
        optimizer = torch.optim.ASGD(model.parameters(), lr=0.3, weight_decay=0.001)
    elif optimizer_name == 'Adam':
        optimizer = torch.optim.Adam(model.parameters())
    elif optimizer_name == 'Lookahead(Adam)':
        optimizer = Lookahead(torch.optim.Adam(model.parameters()))
    else:
        raise ValueError("Optimizer not setup")

    loss_function = nn.CrossEntropyLoss()
    trial = torchbearer.Trial(model, optimizer, loss_function, metrics=['loss', 'accuracy'], callbacks=[scheduler, checkpoint, logger]).to(device)
    trial.with_generators(trainloader, val_generator=valloader)
    results.append(trial.run(epochs=NB_EPOCHS))   
Exemple #6
0
    def on_batch(self, state):
        training = state[torchbearer.MODEL].training
        state[torchbearer.MODEL].eval()

        targets_hot = self._targets_hot(state)

        key = self.logit_key

        @torchbearer.callbacks.on_sample
        def make_eval(_):
            state[torchbearer.MODEL].eval()

        model = _CAMWrapper(self.input_size,
                            state[torchbearer.MODEL],
                            transform=self.in_transform)
        trial = torchbearer.Trial(model,
                                  self.optimizer_factory(
                                      filter(lambda p: p.requires_grad,
                                             [model.input_image])),
                                  _cam_loss(key, targets_hot, self.decay),
                                  callbacks=[make_eval])
        trial.for_train_steps(self.steps).to(state[torchbearer.DEVICE],
                                             state[torchbearer.DATA_TYPE])
        trial.run(verbose=self.verbose)

        if training:
            state[torchbearer.MODEL].train()

        return model.input_image.sigmoid()
Exemple #7
0
    def test_only_model(self):
        p = torch.tensor([2.0, 1.0, 10.0])

        model = Net(p)

        tbmodel = torchbearer.Trial(model)
        self.assertListEqual(tbmodel.run(), [])
Exemple #8
0
    def test_with_loader(self):
        p = torch.tensor([2.0, 1.0, 10.0])
        training_steps = 2

        model = Net(p)
        optim = torch.optim.SGD(model.parameters(), lr=0.01)
        test_var = {'loaded': False}

        def custom_loader(state):
            state[torchbearer.X], state[torchbearer.Y_TRUE] = None, None
            test_var['loaded'] = True

        tbmodel = torchbearer.Trial(
            model,
            optim,
            loss,
            callbacks=[torchbearer.callbacks.MostRecent(filepath='test.pt')
                       ]).for_train_steps(training_steps).for_val_steps(1)
        tbmodel.with_loader(custom_loader)
        self.assertTrue(not test_var['loaded'])
        tbmodel.run(1)
        self.assertTrue(test_var['loaded'])

        import os
        os.remove('test.pt')
Exemple #9
0
def main_wrap(objects, **kwargs):
    global acc_plot, data
    model = Net(params['objects'])  # change to different model here
    loss_function = nn.CrossEntropyLoss()
    optimiser = optim.Adam(model.parameters(), lr=0.01)
    trial = torchbearer.Trial(model,
                              optimiser,
                              loss_function,
                              metrics=['accuracy'],
                              verbose=0).to(device)
    #callbacks=[activation_weights],

    dataset = ToyTask(objects, **kwargs)  # change to different dataset here
    loader = DataLoader(dataset, batch_size=batch_size)
    trial.with_generators(train_generator=loader, test_generator=loader)
    print("\nTraining:")
    #    activation_weights.on_end_epoch(None)
    trial.run(epochs=train_epochs, verbose=1)

    data.append(extract_weights(model))

    acc = []
    for i in tqdm(range(val_epochs), desc='Testing'):
        acc.append(
            trial.evaluate(verbose=0,
                           data_key=torchbearer.TEST_DATA)['test_acc'])

    acc_plot.append(np.mean(acc))
Exemple #10
0
    def test_no_model(self):
        tbmodel = torchbearer.Trial(None)

        import warnings
        with warnings.catch_warnings(record=True) as w:
            tbmodel.run()
            self.assertTrue(len(w) == 1)
Exemple #11
0
    def test_zero_model(self):
        model = Linear(3, 1)
        init.constant_(model.weight, 0)
        init.constant_(model.bias, 0)
        optim = torch.optim.SGD(model.parameters(), lr=0.01)

        trial = torchbearer.Trial(model, optim, loss)
        trial.with_test_data(torch.rand(10, 3), batch_size=3)
        preds = trial.predict()

        for i in range(len(preds)):
            self.assertAlmostEqual(preds[i], 0)
    def test_basic_opt(self):
        p = torch.tensor([2.0, 1.0, 10.0])
        training_steps = 1000

        model = NetWithState(p)
        optim = torch.optim.SGD(model.parameters(), lr=0.01)

        tbmodel = tb.Trial(model, optim, loss).for_train_steps(training_steps).for_val_steps(1)
        tbmodel.run()

        self.assertAlmostEqual(model.pars[0].item(), 5.0, places=4)
        self.assertAlmostEqual(model.pars[1].item(), 0.0, places=4)
        self.assertAlmostEqual(model.pars[2].item(), 1.0, places=4)
def worker():
    setup()
    print("Rank and node: {}-{}".format(args.rank, platform.node()))

    model = ToyModel().to('cpu')
    ddp_model = DDP(model)

    kwargs = {}

    ds = datasets.MNIST('./data/mnist/',
                        train=True,
                        download=True,
                        transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.1307, ), (0.3081, ))
                        ]))

    train_sampler = torch.utils.data.distributed.DistributedSampler(ds)
    train_loader = torch.utils.data.DataLoader(ds,
                                               batch_size=128,
                                               sampler=train_sampler,
                                               **kwargs)

    test_ds = datasets.MNIST('./data/mnist',
                             train=False,
                             transform=transforms.Compose([
                                 transforms.ToTensor(),
                                 transforms.Normalize((0.1307, ), (0.3081, ))
                             ]))
    test_sampler = torch.utils.data.distributed.DistributedSampler(test_ds)
    test_loader = torch.utils.data.DataLoader(test_ds,
                                              batch_size=128,
                                              sampler=test_sampler,
                                              **kwargs)

    loss_fn = nn.CrossEntropyLoss()
    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)

    trial = torchbearer.Trial(ddp_model,
                              optimizer,
                              loss_fn,
                              metrics=['loss', 'acc'],
                              callbacks=[sync, grad, flatten])
    trial.with_train_generator(train_loader)
    trial.run(10, verbose=2)

    print("Model hash: {}".format(hash(model)))
    print('First parameter: {}'.format(next(model.parameters())))

    cleanup()
Exemple #14
0
def main_wrap(objects, **kwargs):
  global acc_plot
  model = Net(params['objects'])
  loss_function = nn.CrossEntropyLoss()
  optimiser = optim.Adam(model.parameters(), lr=0.01)
  trial = torchbearer.Trial(model, optimiser, loss_function,  metrics=['accuracy']).to(device)
  dataset = ToyTask(objects, **kwargs)
  loader = DataLoader(dataset, batch_size=batch_size)#, pin_memory=True)
  trial.with_generators(train_generator=loader, test_generator=loader)
  trial.run(epochs=numepocs, verbose=1)
  acc = []
  for i in range (200):
    acc.append(trial.evaluate(verbose=0, data_key=torchbearer.TEST_DATA)['test_acc'])
                         
  acc_plot.append(np.mean(acc))
  print(acc_plot)
Exemple #15
0
    def test_callbacks(self):
        from torch.utils.data import TensorDataset
        traingen = TensorDataset(torch.rand(10, 1, 3), torch.rand(10, 1))
        valgen = TensorDataset(torch.rand(10, 1, 3), torch.rand(10, 1))
        testgen = TensorDataset(torch.rand(10, 1, 3), torch.rand(10, 1))

        model = torch.nn.Linear(3, 1)
        optim = torch.optim.SGD(model.parameters(), lr=0.01)
        cbs = []
        cbs.extend([
            c.EarlyStopping(),
            c.GradientClipping(10, model.parameters()),
            c.Best('test.pt'),
            c.MostRecent('test.pt'),
            c.ReduceLROnPlateau(),
            c.CosineAnnealingLR(0.1, 0.01),
            c.ExponentialLR(1),
            c.Interval('test.pt'),
            c.CSVLogger('test_csv.pt'),
            c.L1WeightDecay(),
            c.L2WeightDecay(),
            c.TerminateOnNaN(monitor='fail_metric')
        ])

        trial = torchbearer.Trial(model,
                                  optim,
                                  torch.nn.MSELoss(),
                                  metrics=['loss'],
                                  callbacks=cbs)
        trial = trial.with_generators(traingen, valgen, testgen)
        trial.run(2)
        trial.predict()
        trial.evaluate(data_key=torchbearer.TEST_DATA)
        trial.evaluate()

        import os
        os.remove('test.pt')
        os.remove('test_csv.pt')
Exemple #16
0
    def on_batch(self, state):
        training = state[torchbearer.MODEL].training

        @torchbearer.callbacks.on_sample
        def make_eval(_):
            state[torchbearer.MODEL].eval()

        @torchbearer.callbacks.add_to_loss
        def loss(state):
            return -self.criterion(state)

        model = _Wrapper(self.image, state[torchbearer.MODEL])
        trial = torchbearer.Trial(model,
                                  self.optimizer,
                                  callbacks=[make_eval, loss])
        trial.for_train_steps(self.steps).to(state[torchbearer.DEVICE],
                                             state[torchbearer.DATA_TYPE])
        trial.run(verbose=self.verbose)

        if training:
            state[torchbearer.MODEL].train()

        return model.image.get_valid_image()
Exemple #17
0
    def on_batch(self, state):
        training = state[torchbearer.MODEL].training
        state[torchbearer.MODEL].eval()

        targets_hot = self._targets_hot(state)

        to_prob = (lambda x: x.exp()) if self.prob_key is None else (
            lambda x: x)
        key = self.logit_key if self.prob_key is None else self.prob_key

        model = _CAMWrapper(self.input_size, state[torchbearer.MODEL])
        trial = torchbearer.Trial(
            model,
            self.optimizer_factory(
                filter(lambda p: p.requires_grad, [model.input_batch])),
            _cam_loss(key, to_prob, targets_hot, self.decay))
        trial.for_train_steps(self.steps).to(state[torchbearer.DEVICE],
                                             state[torchbearer.DATA_TYPE])
        trial.run(verbose=self.verbose)

        if training:
            state[torchbearer.MODEL].train()

        return model.input_batch.squeeze(0)
def build_test_trial(args):
    model = build_model(args).to(args.device)
    inv = get_dataset(args.dataset).inv_transform

    callbacks = [
        CSVLogger(str(args.output) + '/log.csv'),
        imaging.FromState(tb.Y_PRED, transform=inv).on_test().cache(args.num_reconstructions).make_grid().with_handler(
            img_to_file(str(args.output) + '/test_reconstruction_samples.png')),
        imaging.FromState(tb.Y_TRUE, transform=inv).on_test().cache(args.num_reconstructions).make_grid().with_handler(
            img_to_file(str(args.output) + '/test_samples.png'))
    ]

    metrics = ['mse'] #Additional metrics: ChamferMetric(), ModifiedHausdorffMetric()

    if args.classifier_weights:
        classifier = get_classifier_model(args.classifier_model)().to(args.device)
        state = torch.load(args.classifier_weights, map_location=args.device)
        classifier.load_state_dict(state[tb.MODEL])
        metrics.append(ClassificationMetric(classifier))

    trial = tb.Trial(model, metrics=metrics, callbacks=callbacks)
    trial.with_loader(autoenc_loader)

    return trial, model
        # gaussian blur the image with the chosen sigma value
        blurredImage = blur(testset.test_data[i].numpy(), sigma_values[j])
        # update the training set with the blurred image
        testset.test_data[i] = torch.from_numpy(blurredImage)

    trainloader = DataLoader(trainset, batch_size=128, shuffle=True)
    testloader = DataLoader(testset, batch_size=128, shuffle=True)

    #test the controlled network
    model = torch.load('save_ann.pkl')
    loss_function = nn.CrossEntropyLoss()
    # live_loss_plot = LiveLossPlot()
    optimiser = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)
    # trial = torchbearer.Trial(model, optimiser, loss_function, callbacks=[live_loss_plot], metrics=['loss', 'accuracy']).to(device)
    trial = torchbearer.Trial(model,
                              optimiser,
                              loss_function,
                              metrics=['loss', 'accuracy']).to(device)
    trial.with_generators(trainloader, test_generator=testloader)
    results = trial.evaluate(data_key=torchbearer.TEST_DATA)
    print(results)
    blur_acc_control.append(100.0 * results["test_acc"])

    #test the sleeping network
    model = torch.load('save_sleep.pkl')
    loss_function = nn.CrossEntropyLoss()
    # live_loss_plot = LiveLossPlot()
    optimiser = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)
    # trial = torchbearer.Trial(model, optimiser, loss_function, callbacks=[live_loss_plot], metrics=['loss', 'accuracy']).to(device)
    trial = torchbearer.Trial(model,
                              optimiser,
                              loss_function,
        return state[tb.MODEL].x.data


@tb.callbacks.on_step_training
def greedy_update(state):
    if state[tb.MODEL].x > 1:
        state[tb.MODEL].x.data.fill_(1)
    elif state[tb.MODEL].x < -1:
        state[tb.MODEL].x.data.fill_(-1)


training_steps = 6000000

model = Online()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99])
tbtrial = tb.Trial(model, optim, loss, [est()], pass_state=True, callbacks=[greedy_update, TensorBoard(comment='adam', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()

model = Online()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99], amsgrad=True)
tbtrial = tb.Trial(model, optim, loss, [est()], pass_state=True, callbacks=[greedy_update, TensorBoard(comment='amsgrad', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()

model = Stochastic()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99])
tbtrial = tb.Trial(model, optim, loss, [est()], callbacks=[greedy_update, TensorBoard(comment='adam', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()

model = Stochastic()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99], amsgrad=True)
tbtrial = tb.Trial(model, optim, loss, [est()], callbacks=[greedy_update, TensorBoard(comment='amsgrad', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
Exemple #21
0
 def test_no_model(self):
     trial = torchbearer.Trial(None)
     trial.run()
     self.assertTrue(torchbearer.trial.MockModel()(torch.rand(1)) is None)
Exemple #22
0
def main():
    fake_parser = FakeArgumentParser(add_help=False, allow_abbrev=False)
    add_shared_args(fake_parser)
    fake_args, _ = fake_parser.parse_known_args()

    parser = argparse.ArgumentParser()
    add_shared_args(parser)
    add_sub_args(fake_args, parser)
    args = parser.parse_args()

    rndtf = transforms.Compose([
        transforms.RandomAffine(10.0,
                                translate=(0.1, 0.1),
                                scale=(0.95, 1.01),
                                shear=1,
                                fillcolor=None),
        transforms.Lambda(lambda x: x * (1 + (torch.rand_like(x) - 0.5) / 10))
    ])
    args.additional_transforms = transforms.Lambda(lambda x:
                                                   (rndtf(x), rndtf(x)))

    orig_batch_size = args.batch_size
    args.batch_size = args.barlow_batch_size
    trainloader, valloader, testloader = build_dataloaders(args)
    args.batch_size = orig_batch_size

    args.output.mkdir(exist_ok=True, parents=True)
    save_args(args.output)

    model = get_model(args.model)()
    btmodel = BarlowTwinsModel(model)

    model_loss = nn.CrossEntropyLoss()
    btmodel_loss = BarlowTwinsLoss()

    train(args,
          btmodel,
          btmodel_loss,
          trainloader,
          valloader,
          args.barlow_epochs,
          name='btmodel')

    model.lock_features(not args.finetune)

    trainloader, valloader, testloader = build_dataloaders(
        args)  # reload data with other batch size
    train(args,
          model,
          model_loss,
          trainloader,
          valloader,
          args.epochs,
          name='model')

    trial = tb.Trial(model,
                     criterion=torch.nn.CrossEntropyLoss(),
                     metrics=['loss', 'acc'],
                     callbacks=[CSVLogger(str(args.output) + '/test-log.csv')
                                ]).to(args.device)
    trial.with_generators(test_generator=testloader)
    trial.predict(verbose=2)
Exemple #23
0
 def test_no_train_steps(self):
     trial = torchbearer.Trial(None)
     trial.for_val_steps(10)
     trial.run()
Exemple #24
0
# Model and optimizer
model = GAN()
optim = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.5, 0.999))


@tb.metrics.running_mean
@tb.metrics.mean
class g_loss(tb.metrics.Metric):
    def __init__(self):
        super().__init__('g_loss')

    def process(self, state):
        return state[G_LOSS]


@tb.metrics.running_mean
@tb.metrics.mean
class d_loss(tb.metrics.Metric):
    def __init__(self):
        super().__init__('d_loss')

    def process(self, state):
        return state[D_LOSS]


torchbearertrial = tb.Trial(model, optim, criterion=None, metrics=['loss', g_loss(), d_loss()],
                            callbacks=[loss_callback, saver_callback], pass_state=True)
torchbearertrial.with_train_generator(dataloader)
torchbearertrial.to(device)
torchbearertrial.run(epochs=200)
Exemple #25
0
                            tb.LOSS, DISC_OPT)


def closure(state):
    closure_gen(state)
    state[GEN_OPT].step()
    closure_disc(state)
    state[DISC_OPT].step()


from torchbearer.metrics import mean, running_mean
metrics = ['loss', mean(running_mean(D_LOSS)), mean(running_mean(G_LOSS))]

trial = tb.Trial(generator,
                 None,
                 criterion=gen_crit,
                 metrics=metrics,
                 callbacks=[saver_callback])
trial.with_train_generator(dataloader, steps=200000)
trial.to(device)

new_keys = {
    DISC_MODEL: discriminator.to(device),
    DISC_OPT: optimizer_D,
    GEN_OPT: optimizer_G,
    DISC_CRIT: disc_crit
}
trial.state.update(new_keys)
trial.with_closure(closure)
trial.run(epochs=1)
Exemple #26
0
        x = [5,0,1]
        """
        out = torch.zeros_like(self.pars)
        out[0] = self.pars[0] - 5
        out[1] = self.pars[1]
        out[2] = self.pars[2] - 1
        return torch.sum(out**2)

    def forward(self, _, state):
        state[ESTIMATE] = self.pars.detach().unsqueeze(1)
        return self.f()


def loss(y_pred, y_true):
    return y_pred


p = torch.tensor([2.0, 1.0, 10.0])
training_steps = 50000

model = Net(p)
optim = torch.optim.SGD(model.parameters(), lr=0.0001)

tbtrial = tb.Trial(model,
                   optim,
                   loss, [tb.metrics.running_mean(ESTIMATE, dim=1), 'loss'],
                   pass_state=True)
tbtrial.for_train_steps(training_steps).to('cuda')
tbtrial.run()
print(list(model.parameters())[0].data)
# utilize the GPU if one exists, otherwise use the CPU
device = "cuda:0" if torch.cuda.is_available() else "cpu"

# initialize control network
control_model = NetworkControl(784, 1200, 10)

# define the loss function
loss_function = nn.CrossEntropyLoss()
# define the optimiser, learning rate and momentum
optimiser = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)

# create a live loss plot of accuracy and loss after each training epoch
plot = LiveLossPlot()

# train the network using model, optimiser and loss function for 2 epochs
trial = torchbearer.Trial(control_model, optimiser, loss_function, callbacks=[plot], metrics=['loss', 'accuracy']).to(device)
trial.with_generators(trainloader, test_generator=testloader)
trial.run(epochs=2)

# save the trained model
torch.save(model.state_dict(),"control.pth")

# print the accuracy and loss of control network on the MNIST test data
results = trial.evaluate(data_key=torchbearer.TEST_DATA)
print("Test Loss:\t", results.get("test_loss"))
print("Test Accuracy:\t", results.get("test_acc"))

from scipy.ndimage import gaussian_filter
import numpy as np
from matplotlib import pyplot as plt
import random