def run(args):
    dataset = AlderleyWrapper()

    save_args(args)  # save command line to a file for reference
    train_loader = alderley_cgan_data_loader(args, dataset=dataset)  # get the data
    model = CGANModel(
        args,
        discriminators=[PixelDiscriminator(), patchCDiscriminatorNetwork(args)],
        generator=UnetUpsample())

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(CWGANDiscriminatorLoss(penalty_weight=args.penalty_weight, model=model))
    trainer.build_optimizer('Adam', [parameter for discriminator in model.discriminators for parameter in discriminator.parameters()], lr=args.discriminator_lr)

    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(CGenerateDataCallback(args, dataset=dataset))
    trainer.register_callback(CGeneratorTrainingCallback(
        args,
        parameters=model.generator.parameters(),
        criterion=WGANGeneratorLoss(), dataset=dataset))
    trainer.bind_loader('train', train_loader, num_inputs=2)
    # Custom logging configuration so it knows to log our images

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
예제 #2
0
def task(env):
    dataset_name = 'MNIST'
    dataset_path = env.dataset(dataset_name)
    batch_size = 128
    transform = transforms.Compose(
        [
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    mnist_set = torchvision.datasets.MNIST(str(dataset_path), train=True, download=True, transform=transform)
    train_loader = torch.utils.data.DataLoader(mnist_set, batch_size=batch_size,
                                               shuffle=True, num_workers=2)
    mnist_test_set = torchvision.datasets.MNIST(str(dataset_path), train=False, download=True, transform=transform)
    test_loader = torch.utils.data.DataLoader(mnist_test_set, batch_size=512,
                                               shuffle=True, num_workers=2)

    model = Model(n_classes=10, in_channels=1, layers=4)
    exp = TrainLog(env, dataset_name,model,log_time=True)

    logging.info(' saving  to %s', exp.save_directory)
    logging.info(' logging to %s', exp.log_directory)
    # Load loaders
    # train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
    #                                                     download=DOWNLOAD_CIFAR)

    # Build trainer
    iterations = 5000
    trainer = Trainer(model) \
        .build_criterion('NLLLoss') \
        .build_metric('CategoricalError') \
        .build_optimizer('Adam', lr=0.001) \
        .save_every((1, 'epochs')) \
        .save_to_directory(str(exp.save_directory)) \
        .validate_every((1, 'epochs'))\
        .set_max_num_iterations(iterations) \
        .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                        log_images_every='never'),
                      log_directory=str(exp.log_directory))
    # Bind loaders
    trainer.bind_loader('train', train_loader)
    trainer.bind_loader('validate', test_loader)
    trainer.cuda()

    # Go!
    logging.info('start training')
    trainer.fit()
    trainer.set_max_num_iterations(trainer.iteration_count + iterations)
    trainer.build_optimizer('Adam',lr=0.0001)
    logging.info('slower lr')
    trainer.fit()
예제 #3
0
def my_train(load, folder):
    click.echo('starting training')
    os.makedirs(folder, exist_ok=True)

    # joint_transform = Compose(
    #     RandomRotate(),
    #     RandomTranspose(),
    #     RandomFlip()
    # )

    # setup logger
    os.makedirs('derived_data/log', exist_ok=True)
    Logger.instance().setup('derived_data/log')

    vae = Vae()

    ds = HemoDataset(root_folder=root_folder, image_transform=None, training=True)
    train_loader = torch.utils.data.DataLoader(ds, batch_size=1536, num_workers=8)

    # Build trainer
    trainer = Trainer(vae)
    trainer.save_to_directory(folder)

    if load:
        trainer.load()
    # trainer.cuda(devices=[0, 1])
    trainer.cuda()

    trainer.build_criterion(vae.loss_function())
    trainer.build_optimizer('Adam', lr=0.001)
    # trainer.validate_every((2, 'epochs'))
    trainer.save_every((1, 'epochs'))
    trainer.set_max_num_epochs(100)
    trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                           log_images_every='never',
                                           # log_images_every=(1, 'iteration'),
                                           log_directory=folder))

    # Bind loaders
    trainer.bind_loader('train', train_loader, num_inputs=1, num_targets=1)

    # bind callbacks
    trainer.register_callback(GarbageCollection(), trigger='end_of_training_iteration')
    trainer.register_callback(ShowMinimalConsoleInfo(), trigger='end_of_training_iteration')

    # trainer.bind_loader('train', train_loader, num_inputs=3, num_targets=1)
    trainer.fit()
    pushover_notification.send('embeddings generated')
예제 #4
0
def run(args):
    dataset = RorschachWrapper()

    save_args(args)  # save command line to a file for reference
    train_loader = rorschach_cgan_data_loader(args,
                                              dataset=dataset)  # get the data
    # todo
    model = patchCWGANModel(args,
                            discriminator=patchCDiscriminatorNetwork(args),
                            generator=CGeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        CWGANDiscriminatorLoss(penalty_weight=args.penalty_weight,
                               model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)

    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(CGenerateDataCallback(args, dataset=dataset))
    trainer.register_callback(
        CGeneratorTrainingCallback(args,
                                   parameters=model.generator.parameters(),
                                   criterion=WGANGeneratorLoss(),
                                   dataset=dataset))
    trainer.bind_loader('train', train_loader, num_inputs=2)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
def train_model(args):
    """
    Perform training then call prediction
    """
    model = all_cnn_module()
    model.apply(initializer)
    train_loader, validate_loader, test_loader = make_loaders(args)
    # Build trainer
    savepath = os.path.join(args.save_directory,
                            Trainer()._checkpoint_filename)
    if os.path.exists(savepath):
        trainer = Trainer().load(from_directory=args.save_directory)
        if args.cuda:
            trainer.cuda()
    else:
        trainer = Trainer(model) \
            .build_criterion('CrossEntropyLoss') \
            .build_metric('CategoricalError') \
            .save_every((1, 'epochs')) \
            .validate_every((1, 'epochs')) \
            .save_to_directory(args.save_directory) \
            .set_max_num_epochs(args.epochs) \
            .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                            log_images_every='never'),
                          log_directory=args.save_directory)

        # These are the params from the paper
        trainer.build_optimizer('SGD',
                                lr=0.01,
                                momentum=0.9,
                                weight_decay=0.001)
        # Also works with Adam and default settings
        # trainer.build_optimizer('Adam')
        trainer.bind_loader('train', train_loader)
        trainer.bind_loader('validate', validate_loader)

        if args.cuda:
            trainer.cuda()

        # Go!
        trainer.fit()
        trainer.save()
    write_predictions(args, trainer.model, test_loader)
예제 #6
0
def run(args):
    save_args(args)  # save command line to a file for reference
    train_loader = mnist_data_loader(args)  # get the data
    model = GANModel(args,
                     discriminator=DiscriminatorNetwork(args),
                     generator=GeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        WGANDiscriminatorLoss(penalty_weight=args.penalty_weight, model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)
    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(GenerateDataCallback(args))
    trainer.register_callback(
        GeneratorTrainingCallback(args,
                                  parameters=model.generator.parameters(),
                                  criterion=WGANGeneratorLoss()))
    trainer.bind_loader('train', train_loader)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()

    # Generate video from saved images
    if not args.no_ffmpeg:
        generate_video(args.save_directory)
예제 #7
0
# data loaders
from inferno.io.box.cifar import get_cifar10_loaders
train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
                                                    download=DOWNLOAD_CIFAR)

logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                           log_images_every='never')

##################################################
# Build trainer
from inferno.trainers.basic import Trainer

trainer = Trainer(model)
trainer.build_criterion('CrossEntropyLoss')
trainer.build_metric('CategoricalError')
trainer.build_optimizer('Adam')
trainer.validate_every((2, 'epochs'))
trainer.save_every((5, 'epochs'))
trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(10)

trainer.build_logger(logger, log_directory=LOG_DIRECTORY)
trainer.set_log_directory(LOG_DIRECTORY)

##################################################
# Bind loaders
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', validate_loader)

##################################################
예제 #8
0
        p0, p1 = (0.86, 0.14)

    print("class p ", p0, p1)

    bsd_val = Bsd500Sp(bsd_root=bsd_root,
                       pmap_root=pmap_root,
                       split='val',
                       joint_transformation=joint_transformation)

    model = ConvNet()  #.double()

    smoothness = 0.001
    trainer = Trainer(model)

    trainer.build_criterion(LossWrapper(p0=p0, p1=p1))
    trainer.build_optimizer('Adam')  #, lr=0.0001)
    trainer.validate_every((1, 'epochs'))
    #trainer.save_every((4, 'epochs'))
    trainer.save_to_directory(SAVE_DIRECTORY)
    trainer.set_max_num_epochs(200)
    trainer.register_callback(
        SaveAtBestValidationScore(smoothness=smoothness, verbose=True))
    trainer.register_callback(
        AutoLR(factor=0.5,
               patience='1 epochs',
               monitor_while='validating',
               monitor='validation_loss',
               monitor_momentum=smoothness,
               consider_improvement_with_respect_to='previous',
               verbose=True))
예제 #9
0
trainer = Trainer(net)

trainer.bind_loader('train', trainloader)
trainer.bind_loader('validate', valloader)

trainer.save_to_directory('./checkpoints')
trainer.save_every((200, 'iterations'))
trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                log_images_every='never'), log_directory=LOG_DIRECTORY)



trainer.validate_every((200, 'iterations'), for_num_iterations=50)
#trainer.build_metric()


trainer.build_criterion(nn.L1Loss)
trainer.build_optimizer(optim.Adam, lr=1e-4, weight_decay=0.0005)

trainer.set_max_num_iterations(20000)


if torch.cuda.is_available():
    trainer.cuda()

print('starting training')
trainer.fit()

print('finished training')