def run(args):
    dataset = AlderleyWrapper()

    save_args(args)  # save command line to a file for reference
    train_loader = alderley_cgan_data_loader(args, dataset=dataset)  # get the data
    model = CGANModel(
        args,
        discriminators=[PixelDiscriminator(), patchCDiscriminatorNetwork(args)],
        generator=UnetUpsample())

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(CWGANDiscriminatorLoss(penalty_weight=args.penalty_weight, model=model))
    trainer.build_optimizer('Adam', [parameter for discriminator in model.discriminators for parameter in discriminator.parameters()], lr=args.discriminator_lr)

    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(CGenerateDataCallback(args, dataset=dataset))
    trainer.register_callback(CGeneratorTrainingCallback(
        args,
        parameters=model.generator.parameters(),
        criterion=WGANGeneratorLoss(), dataset=dataset))
    trainer.bind_loader('train', train_loader, num_inputs=2)
    # Custom logging configuration so it knows to log our images

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
Beispiel #2
0
    def test_multi_gpu(self):
        import torch
        if not torch.cuda.is_available():
            return

        from inferno.trainers.basic import Trainer
        from inferno.io.box.cifar10 import get_cifar10_loaders
        import os

        # Make model
        net = self._make_test_model()
        # Make trainer
        trainer = Trainer(model=net) \
            .build_optimizer('Adam') \
            .build_criterion('CrossEntropyLoss') \
            .build_metric('CategoricalError') \
            .validate_every((1, 'epochs')) \
            .save_every((1, 'epochs'), to_directory=os.path.join(self.ROOT_DIR, 'saves')) \
            .save_at_best_validation_score() \
            .set_max_num_epochs(2)\
            .cuda(devices=[0, 1, 2, 3])

        train_loader, validate_loader = get_cifar10_loaders(
            root_directory=self.ROOT_DIR, download=True)
        trainer.bind_loader('train', train_loader)
        trainer.bind_loader('validate', validate_loader)

        trainer.fit()
Beispiel #3
0
 def test_cifar(self):
     from inferno.trainers.basic import Trainer
     from inferno.io.box.cifar10 import get_cifar10_loaders
     # Build cifar10 loaders
     trainloader, testloader = get_cifar10_loaders(
         root_directory=join(self.ROOT_DIR, 'data'),
         download=self.DOWNLOAD_CIFAR)
     # Make model
     net = self._make_test_model()
     tic = time.time()
     # Make trainer
     trainer = Trainer(model=net)\
         .build_optimizer('Adam')\
         .build_criterion('CrossEntropyLoss')\
         .build_metric('CategoricalError')\
         .validate_every((1, 'epochs'))\
         .save_every((1, 'epochs'), to_directory=join(self.ROOT_DIR, 'saves'))\
         .save_at_best_validation_score()\
         .set_max_num_epochs(2)
     # Bind trainer to datasets
     trainer.bind_loader('train',
                         trainloader).bind_loader('validate', testloader)
     # Check device and fit
     if self.CUDA:
         if self.HALF_PRECISION:
             trainer.cuda().set_precision('half').fit()
         else:
             trainer.cuda().fit()
     else:
         trainer.fit()
     toc = time.time()
     print("[*] Elapsed time: {} seconds.".format(toc - tic))
def train_net_with_inferno(config_dict, net, criterion, optimizer, trainloader,
                           valloader):
    """
    Trains the NeuralNet with inferno
    :param config_dict: dict with configs
    :param net: NeuralNet
    :param criterion: criterion for NN
    :param optimizer: optimizer for NN
    :param trainloader: dataloader with traind ata
    :param valloader: dataloader with validation data
    """

    print("Start training with inferno!")

    from inferno.trainers.basic import Trainer
    from inferno.trainers.callbacks.essentials import SaveAtBestValidationScore

    model_folder = os.path.join(config_dict["project_folder"], "model/")
    if not os.path.exists(model_folder):
        os.mkdir(model_folder)

    trainer = Trainer(net) \
        .save_every((1, 'epochs'), to_directory=model_folder) \
        .build_criterion(criterion) \
        .build_optimizer(optimizer) \
        .build_metric(sorensen_dice_metric) \
        .evaluate_metric_every('never') \
        .validate_every((1, 'epochs'), for_num_iterations=50) \
        .register_callback(SaveAtBestValidationScore(smoothness=.5))

    trainer.set_max_num_epochs(config_dict['max_train_epochs'])
    trainer.bind_loader('train',
                        trainloader).bind_loader('validate', valloader)
    trainer.cuda()
    trainer.fit()
 def test_serialization(self):
     trainer = self.get_trainer(3)
     # Serialize
     trainer.save()
     # Unserialize
     trainer = Trainer().load(os.path.join(self.ROOT_DIR, 'saves'))
     train_loader, test_loader = self.get_random_dataloaders(
         input_channels=3)
     trainer.bind_loader('train',
                         train_loader).bind_loader('validate', test_loader)
     trainer.fit()
Beispiel #6
0
 def test_serialization(self):
     if not hasattr(self, 'trainer'):
         self.setUp()
     # Serialize
     self.trainer.save()
     # Unserialize
     trainer = Trainer().load(os.path.join(self.ROOT_DIR, 'saves'))
     train_loader, test_loader = \
         get_cifar10_loaders(root_directory=os.path.join(self.ROOT_DIR, 'data'),
                             download=self.DOWNLOAD_CIFAR)
     trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
     trainer.fit()
     trainer.print("Inspect logs at: {}".format(self.trainer.log_directory))
def task(env):
    dataset_name = 'MNIST'
    dataset_path = env.dataset(dataset_name)
    batch_size = 128
    transform = transforms.Compose(
        [
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    mnist_set = torchvision.datasets.MNIST(str(dataset_path), train=True, download=True, transform=transform)
    train_loader = torch.utils.data.DataLoader(mnist_set, batch_size=batch_size,
                                               shuffle=True, num_workers=2)
    mnist_test_set = torchvision.datasets.MNIST(str(dataset_path), train=False, download=True, transform=transform)
    test_loader = torch.utils.data.DataLoader(mnist_test_set, batch_size=512,
                                               shuffle=True, num_workers=2)

    model = Model(n_classes=10, in_channels=1, layers=4)
    exp = TrainLog(env, dataset_name,model,log_time=True)

    logging.info(' saving  to %s', exp.save_directory)
    logging.info(' logging to %s', exp.log_directory)
    # Load loaders
    # train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
    #                                                     download=DOWNLOAD_CIFAR)

    # Build trainer
    iterations = 5000
    trainer = Trainer(model) \
        .build_criterion('NLLLoss') \
        .build_metric('CategoricalError') \
        .build_optimizer('Adam', lr=0.001) \
        .save_every((1, 'epochs')) \
        .save_to_directory(str(exp.save_directory)) \
        .validate_every((1, 'epochs'))\
        .set_max_num_iterations(iterations) \
        .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                        log_images_every='never'),
                      log_directory=str(exp.log_directory))
    # Bind loaders
    trainer.bind_loader('train', train_loader)
    trainer.bind_loader('validate', test_loader)
    trainer.cuda()

    # Go!
    logging.info('start training')
    trainer.fit()
    trainer.set_max_num_iterations(trainer.iteration_count + iterations)
    trainer.build_optimizer('Adam',lr=0.0001)
    logging.info('slower lr')
    trainer.fit()
Beispiel #8
0
def train_model(args):
    """
    Performs the training
    """
    if os.path.exists(
            os.path.join(args.save_directory,
                         Trainer()._checkpoint_filename)):
        # Skip training if checkpoint exists
        return
    model = WsjModel(args)
    kwargs = {
        'num_workers': args.num_workers,
        'pin_memory': True
    } if args.cuda else {}
    train_loader = DataLoader(WsjDataset('train', args),
                              shuffle=True,
                              batch_size=args.batch_size,
                              collate_fn=wsj_collate_fn,
                              **kwargs)
    validate_loader = DataLoader(WsjDataset('dev', args),
                                 shuffle=True,
                                 batch_size=args.batch_size,
                                 collate_fn=wsj_collate_fn,
                                 **kwargs)
    # Build trainer
    trainer = Trainer(model) \
        .build_criterion(ctc_loss()) \
        .build_metric(ctc_loss()) \
        .build_optimizer('Adam') \
        .validate_every((1, 'epochs')) \
        .save_every((1, 'epochs')) \
        .save_to_directory(args.save_directory) \
        .set_max_num_epochs(args.epochs) \
        #.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),

    #log_images_every='never'),
    #log_directory=args.save_directory)

    # Bind loaders
    trainer.bind_loader('train', train_loader, num_inputs=3)
    trainer.bind_loader('validate', validate_loader, num_inputs=3)
    trainer.register_callback(EpochTimer())
    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
    trainer.save()
Beispiel #9
0
def my_train(load, folder):
    click.echo('starting training')
    os.makedirs(folder, exist_ok=True)

    # joint_transform = Compose(
    #     RandomRotate(),
    #     RandomTranspose(),
    #     RandomFlip()
    # )

    # setup logger
    os.makedirs('derived_data/log', exist_ok=True)
    Logger.instance().setup('derived_data/log')

    vae = Vae()

    ds = HemoDataset(root_folder=root_folder, image_transform=None, training=True)
    train_loader = torch.utils.data.DataLoader(ds, batch_size=1536, num_workers=8)

    # Build trainer
    trainer = Trainer(vae)
    trainer.save_to_directory(folder)

    if load:
        trainer.load()
    # trainer.cuda(devices=[0, 1])
    trainer.cuda()

    trainer.build_criterion(vae.loss_function())
    trainer.build_optimizer('Adam', lr=0.001)
    # trainer.validate_every((2, 'epochs'))
    trainer.save_every((1, 'epochs'))
    trainer.set_max_num_epochs(100)
    trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                           log_images_every='never',
                                           # log_images_every=(1, 'iteration'),
                                           log_directory=folder))

    # Bind loaders
    trainer.bind_loader('train', train_loader, num_inputs=1, num_targets=1)

    # bind callbacks
    trainer.register_callback(GarbageCollection(), trigger='end_of_training_iteration')
    trainer.register_callback(ShowMinimalConsoleInfo(), trigger='end_of_training_iteration')

    # trainer.bind_loader('train', train_loader, num_inputs=3, num_targets=1)
    trainer.fit()
    pushover_notification.send('embeddings generated')
Beispiel #10
0
def run(args):
    dataset = RorschachWrapper()

    save_args(args)  # save command line to a file for reference
    train_loader = rorschach_cgan_data_loader(args,
                                              dataset=dataset)  # get the data
    # todo
    model = patchCWGANModel(args,
                            discriminator=patchCDiscriminatorNetwork(args),
                            generator=CGeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        CWGANDiscriminatorLoss(penalty_weight=args.penalty_weight,
                               model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)

    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(CGenerateDataCallback(args, dataset=dataset))
    trainer.register_callback(
        CGeneratorTrainingCallback(args,
                                   parameters=model.generator.parameters(),
                                   criterion=WGANGeneratorLoss(),
                                   dataset=dataset))
    trainer.bind_loader('train', train_loader, num_inputs=2)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
def run(args):
    model = SpeechModel(args)
    trainer = Trainer()
    if os.path.exists(
            os.path.join(args.save_directory, trainer._checkpoint_filename)):
        trainer.load(from_directory=args.save_directory)
        model.load_state_dict(trainer.model.state_dict())
        if args.cuda:
            model = model.cuda()
    else:
        train_loader = make_loader('train', args, batch_size=args.batch_size)
        dev_loader = make_loader('dev', args, batch_size=args.batch_size)
        # Build trainer
        trainer = Trainer(model) \
            .build_criterion(CTCCriterion, size_average=True) \
            .build_optimizer('Adam', weight_decay=1e-7, lr=5e-5) \
            .validate_every((1, 'epochs')) \
            .save_every((1, 'epochs')) \
            .save_to_directory(args.save_directory) \
            .set_max_num_epochs(args.epochs) \
            .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                            log_images_every='never'),
                          log_directory=args.save_directory)

        # Bind loaders
        trainer.bind_loader('train', train_loader, num_inputs=3, num_targets=1)
        trainer.bind_loader('validate',
                            dev_loader,
                            num_inputs=3,
                            num_targets=1)

        if args.cuda:
            trainer.cuda()

        # Go!
        trainer.fit()
        trainer.save()

    test_loader = make_loader('test',
                              args=args,
                              shuffle=False,
                              batch_size=1,
                              test=True)
    run_logits(loader=test_loader, model=model, args=args)
def train_model(args):
    """
    Perform training then call prediction
    """
    model = all_cnn_module()
    model.apply(initializer)
    train_loader, validate_loader, test_loader = make_loaders(args)
    # Build trainer
    savepath = os.path.join(args.save_directory,
                            Trainer()._checkpoint_filename)
    if os.path.exists(savepath):
        trainer = Trainer().load(from_directory=args.save_directory)
        if args.cuda:
            trainer.cuda()
    else:
        trainer = Trainer(model) \
            .build_criterion('CrossEntropyLoss') \
            .build_metric('CategoricalError') \
            .save_every((1, 'epochs')) \
            .validate_every((1, 'epochs')) \
            .save_to_directory(args.save_directory) \
            .set_max_num_epochs(args.epochs) \
            .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                            log_images_every='never'),
                          log_directory=args.save_directory)

        # These are the params from the paper
        trainer.build_optimizer('SGD',
                                lr=0.01,
                                momentum=0.9,
                                weight_decay=0.001)
        # Also works with Adam and default settings
        # trainer.build_optimizer('Adam')
        trainer.bind_loader('train', train_loader)
        trainer.bind_loader('validate', validate_loader)

        if args.cuda:
            trainer.cuda()

        # Go!
        trainer.fit()
        trainer.save()
    write_predictions(args, trainer.model, test_loader)
Beispiel #13
0
def run(args):
    save_args(args)  # save command line to a file for reference
    train_loader = mnist_data_loader(args)  # get the data
    model = GANModel(args,
                     discriminator=DiscriminatorNetwork(args),
                     generator=GeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        WGANDiscriminatorLoss(penalty_weight=args.penalty_weight, model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)
    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(GenerateDataCallback(args))
    trainer.register_callback(
        GeneratorTrainingCallback(args,
                                  parameters=model.generator.parameters(),
                                  criterion=WGANGeneratorLoss()))
    trainer.bind_loader('train', train_loader)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()

    # Generate video from saved images
    if not args.no_ffmpeg:
        generate_video(args.save_directory)
Beispiel #14
0
def main():
    # Load dataset
    train_loader = get_cremi_loaders(
        config=join(PROJECT_DIRECTORY, 'Configurations', CONFIG_FILENAME))
    # Build model
    dense_unet = DUNet(1, 1)
    # Build trainer
    trainer = Trainer(model=dense_unet) \
        .build_optimizer('Adam') \
        .build_criterion('SorensenDiceLoss') \
        .build_logger(TensorboardLogger(send_image_at_batch_indices=0,
                                        send_image_at_channel_indices='all',
                                        log_images_every=(20, 'iterations')),
                      log_directory=join(PROJECT_DIRECTORY, 'Logs')) \
        .save_every((1000, 'iterations'), to_directory=join(PROJECT_DIRECTORY, 'Weights')) \
        .set_max_num_iterations(1000000) \
        .cuda()
    # Bind loader to trainer
    trainer.bind_loader('train', train_loader)
    # Go!
    trainer.fit()
Beispiel #15
0
def train(net, num_epochs, dataset):
    data_loader = torch.utils.data.DataLoader(dataset,
                                              shuffle=True,
                                              batch_size=100)
    net.train(mode=True)

    for i in range(num_epochs):
        lr = 0.002 / (1 + 0.2 * i)
        trainer = Trainer(net) \
            .build_criterion(SeqCrossEntropyLoss) \
            .build_optimizer('Adam', lr=lr) \
            .set_max_num_epochs(1) \
            .save_every((10, 'iterations')) \
            .save_to_directory('net/')

        trainer.bind_loader('train', data_loader, num_inputs=3)

        if torch.cuda.is_available():
            trainer.cuda()

        trainer.fit()
 def get_trainer(self, input_channels):
     # Build model
     net = self._make_test_model(input_channels)
     # Build trainer
     trainer = Trainer(net)\
         .build_logger(TensorboardLogger(send_image_at_batch_indices=0,
                                         send_image_at_channel_indices='all',
                                         log_images_every=(20, 'iterations')),
                       log_directory=self.LOG_DIRECTORY)\
         .build_criterion('CrossEntropyLoss')\
         .build_metric('CategoricalError')\
         .build_optimizer('Adam')\
         .validate_every((1, 'epochs'))\
         .save_every((2, 'epochs'), to_directory=self.SAVE_DIRECTORY)\
         .save_at_best_validation_score()\
         .set_max_num_epochs(2)\
         .set_precision(self.PRECISION)
     # Bind loaders
     train_loader, test_loader = self.get_random_dataloaders(
         input_channels=input_channels)
     trainer.bind_loader('train',
                         train_loader).bind_loader('validate', test_loader)
     return trainer
Beispiel #17
0
class TestTensorboard(unittest.TestCase):
    ROOT_DIR = os.path.dirname(__file__)
    PRECISION = 'half'
    DOWNLOAD_CIFAR = True

    @staticmethod
    def _make_test_model():
        toy_net = nn.Sequential(nn.Conv2d(3, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 256, 3, 1, 1),
                                nn.ELU(),
                                nn.AdaptiveMaxPool2d((1, 1)),
                                AsMatrix(),
                                nn.Linear(256, 10),
                                nn.Softmax())
        return toy_net

    def setUp(self):
        # Build model
        net = self._make_test_model()

        # Build trainer
        self.trainer = Trainer(net)\
            .build_logger(TensorboardLogger(send_image_at_batch_indices=0,
                                            send_image_at_channel_indices='all',
                                            log_images_every=(20, 'iterations')),
                          log_directory=os.path.join(self.ROOT_DIR, 'logs'))\
            .build_criterion('CrossEntropyLoss')\
            .build_metric('CategoricalError')\
            .build_optimizer('Adam')\
            .validate_every((1, 'epochs'))\
            .save_every((2, 'epochs'), to_directory=os.path.join(self.ROOT_DIR, 'saves'))\
            .save_at_best_validation_score()\
            .set_max_num_epochs(2)\
            .cuda().set_precision(self.PRECISION)

        # Load CIFAR10 data
        train_loader, test_loader = \
            get_cifar10_loaders(root_directory=os.path.join(self.ROOT_DIR, 'data'),
                                download=self.DOWNLOAD_CIFAR)

        # Bind loaders
        self.trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)

    def test_tensorboard(self):
        # Set up if required
        if not hasattr(self, 'trainer'):
            self.setUp()
        # Train
        self.trainer.fit()
        # Print info for check
        self.trainer.print("Inspect logs at: {}".format(self.trainer.log_directory))

    def test_serialization(self):
        if not hasattr(self, 'trainer'):
            self.setUp()
        # Serialize
        self.trainer.save()
        # Unserialize
        trainer = Trainer().load(os.path.join(self.ROOT_DIR, 'saves'))
        train_loader, test_loader = \
            get_cifar10_loaders(root_directory=os.path.join(self.ROOT_DIR, 'data'),
                                download=self.DOWNLOAD_CIFAR)
        trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
        trainer.fit()
        trainer.print("Inspect logs at: {}".format(self.trainer.log_directory))
Beispiel #18
0
def train_model(args):
    model = fm.model_fn(args)
    print('Done initializing model')
    # labels_all_dict = np.load('labels_all_dict.npy')
    labels_all_dict = np.load('labels_all_dict_10s_subset_500.npy')
    if args.no_val:
        file_names_train = np.load(
            'file_names_train_notest_hop_subset_500.npy')
        # file_names_train = np.load('file_names_train_notest.npy')
        train_dataloader = fd.create_dataloader(file_names_train,
                                                args.data_directory,
                                                labels_all_dict.item(), args)
    else:
        file_names_train = np.load('file_names_train.npy')
        file_names_test = np.load('file_names_test.npy')
        train_dataloader = fd.create_dataloader(file_names_train,
                                                args.data_directory,
                                                labels_all_dict.item(), args)
        dev_dataloader = fd.create_dataloader(file_names_test,
                                              args.data_directory,
                                              labels_all_dict.item(), args)

    # train_dataloader = fd.create_dataloader(train_feats, train_labels, args)
    # dev_dataloader = fd.create_dataloader(dev_feats, dev_labels, args)

    test_dataloader_flag = False
    if test_dataloader_flag:
        it = 0
        for x, y, z in train_dataloader:

            print('x.shape: {}'.format(x.shape))
            print('y.shape: {}'.format(y.shape))
            print('z.shape: {}'.format(z.shape))
            print('x: {}'.format(x))
            print('y: {}'.format(y))
            print('z: {}'.format(z))

            it = it + 1
            print(it)

    print('Done Creating Data Loaders')

    if args.load_model:
        (model, trainer) = load_model(args)
        trainer \
            .bind_loader('train', train_dataloader, num_inputs=2, num_targets=1)
        if not args.no_val:
            trainer.bind_loader('validate',
                                dev_dataloader,
                                num_inputs=2,
                                num_targets=1)
    else:
        # Build trainer
        loss_fn = cust_CrossEntropyLoss()
        trainer = Trainer(model) \
            .build_criterion(loss_fn) \
            .build_metric('CategoricalError') \
            .build_optimizer(torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5)) \
            .save_every((1, 'epochs')) \
            .save_to_directory(args.save_directory) \
            .set_max_num_epochs(args.epochs) \
            .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                            log_images_every='never'),
                          log_directory=args.save_directory)
        if not args.no_val:
            trainer \
                .save_at_best_validation_score(True) \
                .validate_every((1, 'epochs'))
        trainer \
            .bind_loader('train', train_dataloader, num_inputs=2, num_targets=1)
        if not args.no_val:
            trainer.bind_loader('validate',
                                dev_dataloader,
                                num_inputs=2,
                                num_targets=1)

            # .build_optimizer('Adam') \
            # .save_at_best_validation_score(True) \
            # .build_metric(LevenshteinMetric) \
            # .evaluate_metric_every('300 iterations') \

    if args.cuda:
        print('Switch trainer to CUDA')
        trainer.cuda()

    trainer.fit()
    print('Done Fitting Trainer')
    return model
Beispiel #19
0
trainer = Trainer(model)
trainer.build_criterion('CrossEntropyLoss')
trainer.build_metric('CategoricalError')
trainer.build_optimizer('Adam')
trainer.validate_every((2, 'epochs'))
trainer.save_every((5, 'epochs'))
trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(10)

trainer.build_logger(logger, log_directory=LOG_DIRECTORY)
trainer.set_log_directory(LOG_DIRECTORY)

##################################################
# Bind loaders
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', validate_loader)

##################################################

from inferno.trainers.callbacks.base import Callback


class SawtoothLearningrate(Callback):
    """oscillating learning rate"""
    def __init__(self, min_value, max_value, frequency):
        super(SawtoothLearningrate, self).__init__()
        self.min_value = min_value
        self.max_value = max_value
        self.frequency = frequency
Beispiel #20
0
max_num_iterations = 10000
trainer = Trainer(model) \
    .build_criterion('NLLLoss') \
    .build_metric('CategoricalError') \
    .build_optimizer('Adam') \
    .save_every((1, 'epochs')) \
    .save_to_directory(str(exp.save_directory))\
    .validate_every((2, 'epochs'))\
    .set_max_num_epochs(100) \
    .build_logger(TensorboardLogger(log_scalars_every=(1, 'iterations'),
                                    log_images_every='never'),
                  log_directory=str(exp.log_directory))
    # .save_every((2000, 'iterations'), to_directory=str(exp.save_directory), checkpoint_filename='latest') \

# Bind loaders
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', test_loader)
trainer.cuda()

# Go!
trainer.fit()

# TODO: train different max pool sizes

# TODO: train different kernel numbers

# TODO: train different conv layers

# TODO: train with different number of training images

# TODO: report run time