def run(args):
    dataset = AlderleyWrapper()

    save_args(args)  # save command line to a file for reference
    train_loader = alderley_cgan_data_loader(args, dataset=dataset)  # get the data
    model = CGANModel(
        args,
        discriminators=[PixelDiscriminator(), patchCDiscriminatorNetwork(args)],
        generator=UnetUpsample())

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(CWGANDiscriminatorLoss(penalty_weight=args.penalty_weight, model=model))
    trainer.build_optimizer('Adam', [parameter for discriminator in model.discriminators for parameter in discriminator.parameters()], lr=args.discriminator_lr)

    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(CGenerateDataCallback(args, dataset=dataset))
    trainer.register_callback(CGeneratorTrainingCallback(
        args,
        parameters=model.generator.parameters(),
        criterion=WGANGeneratorLoss(), dataset=dataset))
    trainer.bind_loader('train', train_loader, num_inputs=2)
    # Custom logging configuration so it knows to log our images

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
def task(env):
    dataset_name = 'MNIST'
    dataset_path = env.dataset(dataset_name)
    dataset = MNIST(str(dataset_path), train=True,  download=True, transform=mnist_to_tensor32)
    testset = MNIST(str(dataset_path), train=False, download=True, transform=mnist_to_tensor32)
    test_loader  = DataLoader(testset, batch_size=512, shuffle=True, num_workers=2)
    train_loader = DataLoader(dataset, batch_size=32, shuffle=True, num_workers=2)

    triplets = dis.TripelDataset(dataset, dataset.train_labels, K=1)
    train_loader_triple = DataLoader(triplets, batch_size=16,collate_fn=dis.stack_triplets)

    model = Model(in_channels=1, n_classes=10, shortcut=True)
    criterion = dis.TripletLoss()
    exp = TrainLog(env, dataset_name,model.info,log_time=True)

    logging.info(' saving  to %s', exp.save_directory)
    logging.info(' logging to %s', exp.log_directory)
    # Load loaders
    # train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
    #                                                     download=DOWNLOAD_CIFAR)

    iterations = 0
    trainer = Trainer(model)
    set_log_and_save(trainer, exp)
    trainer.build_criterion(criterion) \
            .build_optimizer('SGD', lr=0.001, weight_decay=0.0005) \
            .set_max_num_iterations(iterations)\
            .bind_loader('train', train_loader_triple)
    trainer.cuda()
    logging.info('start training')
    trainer.fit()

    print(model.forward(Variable(next(iter(train_loader_triple))[0]).cuda()))

    model.use_shortcut = False
    model.classify = True
    print(model.forward(Variable(next(iter(train_loader_triple))[0]).cuda()))

    trainer = Trainer(model)
    set_log_and_save(trainer, exp)
    trainer.build_criterion('CrossEntropyLoss') \
        .build_metric('CategoricalError') \
        .build_optimizer('Adam', lr=0.001) \
        .set_max_num_iterations(5000) \
        .bind_loader('train', train_loader) \
        .bind_loader('test', test_loader)
    trainer.cuda()
    logging.info('start training')
    trainer.fit()
Beispiel #3
0
def my_train(load, folder):
    click.echo('starting training')
    os.makedirs(folder, exist_ok=True)

    # joint_transform = Compose(
    #     RandomRotate(),
    #     RandomTranspose(),
    #     RandomFlip()
    # )

    # setup logger
    os.makedirs('derived_data/log', exist_ok=True)
    Logger.instance().setup('derived_data/log')

    vae = Vae()

    ds = HemoDataset(root_folder=root_folder, image_transform=None, training=True)
    train_loader = torch.utils.data.DataLoader(ds, batch_size=1536, num_workers=8)

    # Build trainer
    trainer = Trainer(vae)
    trainer.save_to_directory(folder)

    if load:
        trainer.load()
    # trainer.cuda(devices=[0, 1])
    trainer.cuda()

    trainer.build_criterion(vae.loss_function())
    trainer.build_optimizer('Adam', lr=0.001)
    # trainer.validate_every((2, 'epochs'))
    trainer.save_every((1, 'epochs'))
    trainer.set_max_num_epochs(100)
    trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                           log_images_every='never',
                                           # log_images_every=(1, 'iteration'),
                                           log_directory=folder))

    # Bind loaders
    trainer.bind_loader('train', train_loader, num_inputs=1, num_targets=1)

    # bind callbacks
    trainer.register_callback(GarbageCollection(), trigger='end_of_training_iteration')
    trainer.register_callback(ShowMinimalConsoleInfo(), trigger='end_of_training_iteration')

    # trainer.bind_loader('train', train_loader, num_inputs=3, num_targets=1)
    trainer.fit()
    pushover_notification.send('embeddings generated')
Beispiel #4
0
def run(args):
    dataset = RorschachWrapper()

    save_args(args)  # save command line to a file for reference
    train_loader = rorschach_cgan_data_loader(args,
                                              dataset=dataset)  # get the data
    # todo
    model = patchCWGANModel(args,
                            discriminator=patchCDiscriminatorNetwork(args),
                            generator=CGeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        CWGANDiscriminatorLoss(penalty_weight=args.penalty_weight,
                               model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)

    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(CGenerateDataCallback(args, dataset=dataset))
    trainer.register_callback(
        CGeneratorTrainingCallback(args,
                                   parameters=model.generator.parameters(),
                                   criterion=WGANGeneratorLoss(),
                                   dataset=dataset))
    trainer.bind_loader('train', train_loader, num_inputs=2)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
Beispiel #5
0
def run(args):
    save_args(args)  # save command line to a file for reference
    train_loader = mnist_data_loader(args)  # get the data
    model = GANModel(args,
                     discriminator=DiscriminatorNetwork(args),
                     generator=GeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        WGANDiscriminatorLoss(penalty_weight=args.penalty_weight, model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)
    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(GenerateDataCallback(args))
    trainer.register_callback(
        GeneratorTrainingCallback(args,
                                  parameters=model.generator.parameters(),
                                  criterion=WGANGeneratorLoss()))
    trainer.bind_loader('train', train_loader)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()

    # Generate video from saved images
    if not args.no_ffmpeg:
        generate_video(args.save_directory)
Beispiel #6
0
##################################################
# data loaders
from inferno.io.box.cifar import get_cifar10_loaders
train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
                                                    download=DOWNLOAD_CIFAR)

logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                           log_images_every='never')

##################################################
# Build trainer
from inferno.trainers.basic import Trainer

trainer = Trainer(model)
trainer.build_criterion('CrossEntropyLoss')
trainer.build_metric('CategoricalError')
trainer.build_optimizer('Adam')
trainer.validate_every((2, 'epochs'))
trainer.save_every((5, 'epochs'))
trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(10)

trainer.build_logger(logger, log_directory=LOG_DIRECTORY)
trainer.set_log_directory(LOG_DIRECTORY)

##################################################
# Bind loaders
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', validate_loader)
Beispiel #7
0
# Build a residual unet where the last layer is not activated
sl_unet = MySideLossUNet(in_channels=5, out_channels=2)

model = nn.Sequential(
    ResBlock(dim=2, in_channels=1, out_channels=5),
    sl_unet
)
train_loader, test_loader, validate_loader = get_binary_blob_loaders(
    train_batch_size=3,
    length=512, # <= size of the images
    gaussian_noise_sigma=1.5 # <= how noise are the images
)

# Build trainer
trainer = Trainer(model)
trainer.build_criterion(MySideLoss())
trainer.build_optimizer('Adam')
trainer.validate_every((10, 'epochs'))
#trainer.save_every((10, 'epochs'))
#trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(40)

# Bind loaders
trainer \
    .bind_loader('train', train_loader)\
    .bind_loader('validate', validate_loader)

if USE_CUDA:
    trainer.cuda()

# Go!
Beispiel #8
0
    else:
        p0, p1 = (0.86, 0.14)

    print("class p ", p0, p1)

    bsd_val = Bsd500Sp(bsd_root=bsd_root,
                       pmap_root=pmap_root,
                       split='val',
                       joint_transformation=joint_transformation)

    model = ConvNet()  #.double()

    smoothness = 0.001
    trainer = Trainer(model)

    trainer.build_criterion(LossWrapper(p0=p0, p1=p1))
    trainer.build_optimizer('Adam')  #, lr=0.0001)
    trainer.validate_every((1, 'epochs'))
    #trainer.save_every((4, 'epochs'))
    trainer.save_to_directory(SAVE_DIRECTORY)
    trainer.set_max_num_epochs(200)
    trainer.register_callback(
        SaveAtBestValidationScore(smoothness=smoothness, verbose=True))
    trainer.register_callback(
        AutoLR(factor=0.5,
               patience='1 epochs',
               monitor_while='validating',
               monitor='validation_loss',
               monitor_momentum=smoothness,
               consider_improvement_with_respect_to='previous',
               verbose=True))
trainer = Trainer(net)

trainer.bind_loader('train', trainloader)
trainer.bind_loader('validate', valloader)

trainer.save_to_directory('./checkpoints')
trainer.save_every((200, 'iterations'))
trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                log_images_every='never'), log_directory=LOG_DIRECTORY)



trainer.validate_every((200, 'iterations'), for_num_iterations=50)
#trainer.build_metric()


trainer.build_criterion(nn.L1Loss)
trainer.build_optimizer(optim.Adam, lr=1e-4, weight_decay=0.0005)

trainer.set_max_num_iterations(20000)


if torch.cuda.is_available():
    trainer.cuda()

print('starting training')
trainer.fit()

print('finished training')
trainer = Trainer(net)

trainer.bind_loader('train', trainloader)
trainer.bind_loader('validate', valloader)

trainer.save_to_directory('./checkpoints')
trainer.save_every((200, 'iterations'))
trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                log_images_every='never'), log_directory=LOG_DIRECTORY)


trainer.validate_every((200, 'iterations'), for_num_iterations=50)
# trainer.build_metric()


trainer.build_criterion(criterion)
trainer.build_optimizer(optim.Adam, lr=1e-4, weight_decay=0.0005)

trainer.set_max_num_iterations(20000)


if torch.cuda.is_available():
    trainer.cuda()

print('starting training')
trainer.fit()

print('finished training')

print('the end')