コード例 #1
0
ファイル: train.py プロジェクト: mobie/platybrowser-datasets
def set_up_training(project_directory, config, data_config,
                    load_pretrained_model):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    criterion = SorensenDiceLoss()
    loss_train = LossWrapper(criterion=criterion,
                             transforms=Compose(ApplyAndRemoveMask(),
                                                InvertTarget()))
    loss_val = LossWrapper(criterion=criterion,
                           transforms=Compose(RemoveSegmentationFromTarget(),
                                              ApplyAndRemoveMask(),
                                              InvertTarget()))

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.95

    offsets = data_config['volume_config']['segmentation']['affinity_config'][
        'offsets']
    metric = ArandErrorFromMulticut(average_slices=False,
                                    use_2d_ws=True,
                                    n_threads=8,
                                    weight_edges=True,
                                    offsets=offsets)

    trainer = Trainer(model)\
        .save_every((1000, 'iterations'),
                    to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss_train)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))\
        .register_callback(GarbageCollection())

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'),
        log_histograms_every='never').observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
コード例 #2
0
def set_up_training(project_directory, config, data_config, criterion, balance,
                    load_pretrained_model):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    # TODO
    logger.info("Using criterion: %s" % criterion)

    # TODO this should go somewhere more prominent
    affinity_offsets = data_config['volume_config']['segmentation'][
        'affinity_offsets']

    # TODO implement affinities on gpu again ?!
    criterion = CRITERIA[criterion]
    loss = LossWrapper(
        criterion=criterion(),
        transforms=Compose(MaskTransitionToIgnoreLabel(affinity_offsets),
                           RemoveSegmentationFromTarget(), InvertTarget()),
        weight_function=BalanceAffinities(
            ignore_label=0, offsets=affinity_offsets) if balance else None)

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.95

    # use multicut pipeline for validation
    metric = ArandErrorFromSegmentationPipeline(
        local_affinity_multicut_from_wsdt2d(n_threads=10, time_limit=120))
    trainer = Trainer(model)\
        .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations')).observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
コード例 #3
0
def load_model(args):
    trainer = Trainer()
    trainer.load(from_directory=args.load_directory, best=False)
    # trainer.load(from_directory=args.load_directory, best=False)
    trainer.set_max_num_epochs(args.epochs + trainer.epoch_count)
    model = trainer.model
    trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                           log_images_every='never'),
                         log_directory=args.save_directory)
    trainer.save_to_directory(args.save_directory)
    return (model, trainer)
コード例 #4
0
def set_up_training(project_directory, config, data_config,
                    load_pretrained_model):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    affinity_offsets = data_config['volume_config']['segmentation'][
        'affinity_offsets']
    loss = LossWrapper(criterion=SorensenDiceLoss(),
                       transforms=Compose(
                           MaskTransitionToIgnoreLabel(affinity_offsets),
                           RemoveSegmentationFromTarget(), InvertTarget()))

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.95

    # use multicut pipeline for validation
    # metric = ArandErrorFromSegmentationPipeline(local_affinity_multicut_from_wsdt2d(n_threads=10,
    #                                                                                 time_limit=120))

    # use damws for validation
    stride = [2, 10, 10]
    metric = ArandErrorFromSegmentationPipeline(
        DamWatershed(affinity_offsets, stride, randomize_bounds=False))
    trainer = Trainer(model)\
        .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations')).observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
コード例 #5
0
def set_up_training(project_directory, config, data_config):

    # Get model
    model_name = config.get('model_name')
    model = getattr(models, model_name)(**config.get('model_kwargs'))

    criterion = SorensenDiceLoss()
    loss_train = LossWrapper(criterion=criterion, transforms=InvertTarget())
    loss_val = LossWrapper(criterion=criterion,
                           transforms=Compose(RemoveSegmentationFromTarget(),
                                              InvertTarget()))

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.75

    offsets = data_config['volume_config']['segmentation']['affinity_config'][
        'offsets']
    strides = [1, 10, 10]
    metric = ArandErrorFromMWS(average_slices=False,
                               offsets=offsets,
                               strides=strides,
                               randomize_strides=False)

    trainer = Trainer(model)\
        .save_every((1000, 'iterations'),
                    to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss_train)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness,
                                                     verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.99,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))\

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'),
        log_histograms_every='never').observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
コード例 #6
0
def set_up_training(project_directory, config, data_config,
                    load_pretrained_model, max_iters):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    loss = LossWrapper(criterion=SorensenDiceLoss(),
                       transforms=Compose(MaskIgnoreLabel(),
                                          RemoveSegmentationFromTarget()))
    # TODO loss transforms:
    # - Invert Target ???

    # Build trainer and validation metric
    logger.info("Building trainer.")
    # smoothness = 0.95

    # TODO set up validation ?!
    trainer = Trainer(model)\
        .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .register_callback(ManualLR(decay_specs=[((k * 100, 'iterations'), 0.99)
                                                 for k in range(1, max_iters // 100)]))
    # .validate_every((100, 'iterations'), for_num_iterations=1)\
    # .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
    # .build_metric(metric)\
    # .register_callback(AutoLR(factor=0.98,
    #                           patience='100 iterations',
    #                           monitor_while='validating',
    #                           monitor_momentum=smoothness,
    #                           consider_improvement_with_respect_to='previous'))

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'))  # .observe_states(
    #     ['validation_input', 'validation_prediction, validation_target'],
    #     observe_while='validating'
    # )

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
コード例 #7
0
def my_train(load, folder):
    click.echo('starting training')
    os.makedirs(folder, exist_ok=True)

    # joint_transform = Compose(
    #     RandomRotate(),
    #     RandomTranspose(),
    #     RandomFlip()
    # )

    # setup logger
    os.makedirs('derived_data/log', exist_ok=True)
    Logger.instance().setup('derived_data/log')

    vae = Vae()

    ds = HemoDataset(root_folder=root_folder, image_transform=None, training=True)
    train_loader = torch.utils.data.DataLoader(ds, batch_size=1536, num_workers=8)

    # Build trainer
    trainer = Trainer(vae)
    trainer.save_to_directory(folder)

    if load:
        trainer.load()
    # trainer.cuda(devices=[0, 1])
    trainer.cuda()

    trainer.build_criterion(vae.loss_function())
    trainer.build_optimizer('Adam', lr=0.001)
    # trainer.validate_every((2, 'epochs'))
    trainer.save_every((1, 'epochs'))
    trainer.set_max_num_epochs(100)
    trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                           log_images_every='never',
                                           # log_images_every=(1, 'iteration'),
                                           log_directory=folder))

    # Bind loaders
    trainer.bind_loader('train', train_loader, num_inputs=1, num_targets=1)

    # bind callbacks
    trainer.register_callback(GarbageCollection(), trigger='end_of_training_iteration')
    trainer.register_callback(ShowMinimalConsoleInfo(), trigger='end_of_training_iteration')

    # trainer.bind_loader('train', train_loader, num_inputs=3, num_targets=1)
    trainer.fit()
    pushover_notification.send('embeddings generated')
コード例 #8
0
ファイル: train_affs.py プロジェクト: mobie/lgn-em-datasets
def set_up_training(project_directory, config, data_config,
                    load_pretrained_model):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    loss = dice_loss()
    loss_val = dice_loss(is_val=True)
    metric = mws_metric()
    # metric = loss_val

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.9

    trainer = Trainer(model)\
        .save_every((1000, 'iterations'),
                    to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=5)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))\
        .register_callback(GarbageCollection())\

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'),
        log_histograms_every='never').observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
コード例 #9
0
def run(args):
    dataset = RorschachWrapper()

    save_args(args)  # save command line to a file for reference
    train_loader = rorschach_cgan_data_loader(args,
                                              dataset=dataset)  # get the data
    # todo
    model = patchCWGANModel(args,
                            discriminator=patchCDiscriminatorNetwork(args),
                            generator=CGeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        CWGANDiscriminatorLoss(penalty_weight=args.penalty_weight,
                               model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)

    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(CGenerateDataCallback(args, dataset=dataset))
    trainer.register_callback(
        CGeneratorTrainingCallback(args,
                                   parameters=model.generator.parameters(),
                                   criterion=WGANGeneratorLoss(),
                                   dataset=dataset))
    trainer.bind_loader('train', train_loader, num_inputs=2)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
コード例 #10
0
def run(args):
    save_args(args)  # save command line to a file for reference
    train_loader = mnist_data_loader(args)  # get the data
    model = GANModel(args,
                     discriminator=DiscriminatorNetwork(args),
                     generator=GeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        WGANDiscriminatorLoss(penalty_weight=args.penalty_weight, model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)
    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(GenerateDataCallback(args))
    trainer.register_callback(
        GeneratorTrainingCallback(args,
                                  parameters=model.generator.parameters(),
                                  criterion=WGANGeneratorLoss()))
    trainer.bind_loader('train', train_loader)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()

    # Generate video from saved images
    if not args.no_ffmpeg:
        generate_video(args.save_directory)
コード例 #11
0
                           log_images_every='never')

##################################################
# Build trainer
from inferno.trainers.basic import Trainer

trainer = Trainer(model)
trainer.build_criterion('CrossEntropyLoss')
trainer.build_metric('CategoricalError')
trainer.build_optimizer('Adam')
trainer.validate_every((2, 'epochs'))
trainer.save_every((5, 'epochs'))
trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(10)

trainer.build_logger(logger, log_directory=LOG_DIRECTORY)
trainer.set_log_directory(LOG_DIRECTORY)

##################################################
# Bind loaders
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', validate_loader)

##################################################

from inferno.trainers.callbacks.base import Callback


class SawtoothLearningrate(Callback):
    """oscillating learning rate"""
    def __init__(self, min_value, max_value, frequency):
コード例 #12
0
# data loaders
train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
                                        download=DOWNLOAD_CIFAR)

##################################################
# Build trainer
trainer = Trainer(model)
trainer.build_criterion('CrossEntropyLoss')
trainer.build_metric('CategoricalError')
trainer.build_optimizer('Adam')
trainer.validate_every((2, 'epochs'))
trainer.save_every((5, 'epochs'))
trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(10)
trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                log_images_every='never'), 
              log_directory=LOG_DIRECTORY)

##################################################
# Bind loaders
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', validate_loader)

##################################################
# activate cuda
if USE_CUDA:
    trainer.cuda()

##################################################
# fit
trainer.fit()
コード例 #13
0
ファイル: train_inferno.py プロジェクト: wolny/lsfm_utils
def set_up_training(project_directory, config):

    # Load the model to train from the configuratuib file ('./config/train_config.yml')
    model_name = config.get('model_name')
    model = getattr(models, model_name)(**config.get('model_kwargs'))

    # Initialize the loss: we use the SorensenDiceLoss, which has the nice property
    # of being fairly robust for un-balanced targets
    criterion = SorensenDiceLoss()
    # Wrap the loss to apply additional transformations before the actual
    # loss is applied. Here, we apply the mask to the target
    # and invert the target (necessary for sorensen dice) during training.
    # In addition, we need to remove the segmentation from the target
    # during validation (we only keep the segmentation in the target during validation)
    loss_train = LossWrapper(criterion=criterion,
                             transforms=Compose(ApplyAndRemoveMask(),
                                                InvertTarget()))
    loss_val = LossWrapper(criterion=criterion,
                           transforms=Compose(RemoveSegmentationFromTarget(),
                                              ApplyAndRemoveMask(),
                                              InvertTarget()))

    # Build the validation metric: we validate by running connected components on
    # the affinities for several thresholds
    # metric = ArandErrorFromConnectedComponentsOnAffinities(thresholds=[.5, .6, .7, .8, .9],
    #                                                        invert_affinities=True)
    metric = ArandErrorFromConnectedComponents(thresholds=[.5, .6, .7, .8, .9],
                                               invert_input=True,
                                               average_input=True)

    logger.info("Building trainer.")
    smoothness = 0.95
    # Build the trainer object
    trainer = Trainer(model)\
        .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss_train)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))
    # .register_callback(DumpHDF5Every(frequency='99 iterations',
    #                                  to_directory=os.path.join(project_directory, 'debug')))

    logger.info("Building logger.")
    # Build tensorboard logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations')).observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer