예제 #1
0
def train_model(args):
    model = model_fn()
    train_loader, validate_loader = mnist_data_loaders(args)

    # Build trainer
    trainer = Trainer(model) \
        .build_criterion('RegularizedCrossEntropyLoss') \
        .build_metric('CategoricalError') \
        .build_optimizer('Adam') \
        .validate_every((1, 'epochs')) \
        .save_every((1, 'epochs')) \
        .save_to_directory(args.save_directory) \
        .set_max_num_epochs(args.epochs) \
        .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                        log_images_every='never'),
                      log_directory=args.save_directory)

    # Record regularization losses
    trainer.logger.observe_training_and_validation_states([
        'main_loss', 'total_regularization_loss', 'activity_regularization',
        'l1_weight_regularization'
    ])

    # Bind loaders
    trainer \
        .bind_loader('train', train_loader) \
        .bind_loader('validate', validate_loader)

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
예제 #2
0
    def setUp(self):
        # Build model
        net = self._make_test_model()

        # Build trainer
        self.trainer = Trainer(net)\
            .build_logger(TensorboardLogger(send_image_at_batch_indices=0,
                                            send_image_at_channel_indices='all',
                                            log_images_every=(20, 'iterations')),
                          log_directory=os.path.join(self.ROOT_DIR, 'logs'))\
            .build_criterion('CrossEntropyLoss')\
            .build_metric('CategoricalError')\
            .build_optimizer('Adam')\
            .validate_every((1, 'epochs'))\
            .save_every((2, 'epochs'), to_directory=os.path.join(self.ROOT_DIR, 'saves'))\
            .save_at_best_validation_score()\
            .set_max_num_epochs(2)\
            .cuda().set_precision(self.PRECISION)

        # Load CIFAR10 data
        train_loader, test_loader = \
            get_cifar10_loaders(root_directory=os.path.join(self.ROOT_DIR, 'data'),
                                download=self.DOWNLOAD_CIFAR)

        # Bind loaders
        self.trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
예제 #3
0
def set_up_training(project_directory, config, data_config,
                    load_pretrained_model):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    criterion = SorensenDiceLoss()
    loss_train = LossWrapper(criterion=criterion,
                             transforms=Compose(ApplyAndRemoveMask(),
                                                InvertTarget()))
    loss_val = LossWrapper(criterion=criterion,
                           transforms=Compose(RemoveSegmentationFromTarget(),
                                              ApplyAndRemoveMask(),
                                              InvertTarget()))

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.95

    offsets = data_config['volume_config']['segmentation']['affinity_config'][
        'offsets']
    metric = ArandErrorFromMulticut(average_slices=False,
                                    use_2d_ws=True,
                                    n_threads=8,
                                    weight_edges=True,
                                    offsets=offsets)

    trainer = Trainer(model)\
        .save_every((1000, 'iterations'),
                    to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss_train)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))\
        .register_callback(GarbageCollection())

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'),
        log_histograms_every='never').observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
예제 #4
0
def train_model(model, train_dataset, valid_dataset, args):
    kw = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              **kw)
    validate_loader = DataLoader(dataset=valid_dataset,
                                 batch_size=args.batch_size,
                                 **kw)
    trainer = Trainer(model) \
        .build_criterion(CrossEntropyLoss3D) \
        .build_metric(CategoricalError3D) \
        .build_optimizer('Adam', weight_decay=1e-6) \
        .save_every((1, 'epochs')) \
        .validate_every((100, 'iteration')) \
        .save_to_directory(args.save_directory) \
        .set_max_num_epochs(args.epochs) \
        .build_logger(
            TensorboardLogger(log_scalars_every=(1, 'iteration'), log_images_every='never'),
            log_directory='tb_log/'
        ) \
        .bind_loader('train', train_loader) \
        .bind_loader('validate', validate_loader)

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
def train_model(args):
    model = MNISTCNNModel()
    train_loader, validate_loader = mnist_data_loaders(args)

    # Build trainer
    trainer = Trainer(model) \
        .build_criterion('CrossEntropyLoss') \
        .build_metric('CategoricalError') \
        .build_optimizer('Adam') \
        .validate_every((2, 'epochs')) \
        .save_every((5, 'epochs')) \
        .save_to_directory(args.save_directory) \
        .set_max_num_epochs(args.epochs) \
        .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                        log_images_every='never'),
                      log_directory=args.save_directory)

    # Bind loaders
    trainer \
        .bind_loader('train', train_loader) \
        .bind_loader('validate', validate_loader)

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
def set_up_training(project_directory, config, data_config, criterion, balance,
                    load_pretrained_model):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    # TODO
    logger.info("Using criterion: %s" % criterion)

    # TODO this should go somewhere more prominent
    affinity_offsets = data_config['volume_config']['segmentation'][
        'affinity_offsets']

    # TODO implement affinities on gpu again ?!
    criterion = CRITERIA[criterion]
    loss = LossWrapper(
        criterion=criterion(),
        transforms=Compose(MaskTransitionToIgnoreLabel(affinity_offsets),
                           RemoveSegmentationFromTarget(), InvertTarget()),
        weight_function=BalanceAffinities(
            ignore_label=0, offsets=affinity_offsets) if balance else None)

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.95

    # use multicut pipeline for validation
    metric = ArandErrorFromSegmentationPipeline(
        local_affinity_multicut_from_wsdt2d(n_threads=10, time_limit=120))
    trainer = Trainer(model)\
        .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations')).observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
예제 #7
0
def load_model(args):
    trainer = Trainer()
    trainer.load(from_directory=args.load_directory, best=False)
    # trainer.load(from_directory=args.load_directory, best=False)
    trainer.set_max_num_epochs(args.epochs + trainer.epoch_count)
    model = trainer.model
    trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                           log_images_every='never'),
                         log_directory=args.save_directory)
    trainer.save_to_directory(args.save_directory)
    return (model, trainer)
def set_up_training(project_directory, config, data_config,
                    load_pretrained_model):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    affinity_offsets = data_config['volume_config']['segmentation'][
        'affinity_offsets']
    loss = LossWrapper(criterion=SorensenDiceLoss(),
                       transforms=Compose(
                           MaskTransitionToIgnoreLabel(affinity_offsets),
                           RemoveSegmentationFromTarget(), InvertTarget()))

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.95

    # use multicut pipeline for validation
    # metric = ArandErrorFromSegmentationPipeline(local_affinity_multicut_from_wsdt2d(n_threads=10,
    #                                                                                 time_limit=120))

    # use damws for validation
    stride = [2, 10, 10]
    metric = ArandErrorFromSegmentationPipeline(
        DamWatershed(affinity_offsets, stride, randomize_bounds=False))
    trainer = Trainer(model)\
        .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations')).observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
예제 #9
0
def set_up_training(project_directory, config, data_config):

    # Get model
    model_name = config.get('model_name')
    model = getattr(models, model_name)(**config.get('model_kwargs'))

    criterion = SorensenDiceLoss()
    loss_train = LossWrapper(criterion=criterion, transforms=InvertTarget())
    loss_val = LossWrapper(criterion=criterion,
                           transforms=Compose(RemoveSegmentationFromTarget(),
                                              InvertTarget()))

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.75

    offsets = data_config['volume_config']['segmentation']['affinity_config'][
        'offsets']
    strides = [1, 10, 10]
    metric = ArandErrorFromMWS(average_slices=False,
                               offsets=offsets,
                               strides=strides,
                               randomize_strides=False)

    trainer = Trainer(model)\
        .save_every((1000, 'iterations'),
                    to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss_train)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness,
                                                     verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.99,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))\

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'),
        log_histograms_every='never').observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
예제 #10
0
def main(args):

    img_size = 224
    train_trans = transforms.Compose([
        transforms.RandomResizedCrop(img_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    test_trans = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(img_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    train_ds     = IBMDataset(args.data_dir, transform=train_trans, name='train')
    train_loader = data.DataLoader(train_ds, 
                                 batch_size = args.batch_size,
                                 shuffle = True)

    val_ds     = IBMDataset(args.data_dir, transform=test_trans, name='val')
    print(len(val_ds), len(train_ds))
    val_loader = data.DataLoader(val_ds, 
                                 batch_size = args.batch_size,
                                 shuffle = True)
    LOG_DIRECTORY = '../logs/'
    SAVE_DIRECTORY = '../models/resnet50/'
    
    model =  ResNet50()
    trainer = Trainer(model) \
    .build_criterion('CrossEntropyLoss') \
    .build_metric('CategoricalError') \
    .build_optimizer('Adam') \
    .validate_every((1, 'epochs')) \
    .save_every((1, 'epochs')) \
    .save_to_directory(SAVE_DIRECTORY) \
    .set_max_num_epochs(args.num_epochs) \
    .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                  log_images_every='never'),
                log_directory=LOG_DIRECTORY)

    # Bind loaders
    trainer \
    .bind_loader('train', train_loader) \
    .bind_loader('validate', val_loader)

    if torch.cuda.is_available():
        trainer.cuda()

    trainer.fit()
def set_up_training(project_directory, config, data_config,
                    load_pretrained_model, max_iters):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    loss = LossWrapper(criterion=SorensenDiceLoss(),
                       transforms=Compose(MaskIgnoreLabel(),
                                          RemoveSegmentationFromTarget()))
    # TODO loss transforms:
    # - Invert Target ???

    # Build trainer and validation metric
    logger.info("Building trainer.")
    # smoothness = 0.95

    # TODO set up validation ?!
    trainer = Trainer(model)\
        .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .register_callback(ManualLR(decay_specs=[((k * 100, 'iterations'), 0.99)
                                                 for k in range(1, max_iters // 100)]))
    # .validate_every((100, 'iterations'), for_num_iterations=1)\
    # .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
    # .build_metric(metric)\
    # .register_callback(AutoLR(factor=0.98,
    #                           patience='100 iterations',
    #                           monitor_while='validating',
    #                           monitor_momentum=smoothness,
    #                           consider_improvement_with_respect_to='previous'))

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'))  # .observe_states(
    #     ['validation_input', 'validation_prediction, validation_target'],
    #     observe_while='validating'
    # )

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
예제 #12
0
파일: inferno.py 프로젝트: satyaog/speedrun
    def inferno_build_tensorboard(self):
        if self.get('trainer/tensorboard') is not None:
            if TensorboardLogger is None:
                print("warning can not use TensorboardLogger")
                return

            tb_args = self.get('trainer/tensorboard')
            tb_args['log_directory'] = f"{self.experiment_directory}/Logs"
            print("logging to ", tb_args['log_directory'])
            tb_logger = TensorboardLogger(**tb_args)

            # register Tensorboard logger
            self._trainer.build_logger(tb_logger)
            # and set _logger to so it can be used by the Tensorboardmixin
            self._logger = tb_logger
예제 #13
0
def set_up_training(project_directory, config, data_config,
                    load_pretrained_model):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    loss = dice_loss()
    loss_val = dice_loss(is_val=True)
    metric = mws_metric()
    # metric = loss_val

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.9

    trainer = Trainer(model)\
        .save_every((1000, 'iterations'),
                    to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=5)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))\
        .register_callback(GarbageCollection())\

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'),
        log_histograms_every='never').observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
예제 #14
0
def my_train(load, folder):
    click.echo('starting training')
    os.makedirs(folder, exist_ok=True)

    # joint_transform = Compose(
    #     RandomRotate(),
    #     RandomTranspose(),
    #     RandomFlip()
    # )

    # setup logger
    os.makedirs('derived_data/log', exist_ok=True)
    Logger.instance().setup('derived_data/log')

    vae = Vae()

    ds = HemoDataset(root_folder=root_folder, image_transform=None, training=True)
    train_loader = torch.utils.data.DataLoader(ds, batch_size=1536, num_workers=8)

    # Build trainer
    trainer = Trainer(vae)
    trainer.save_to_directory(folder)

    if load:
        trainer.load()
    # trainer.cuda(devices=[0, 1])
    trainer.cuda()

    trainer.build_criterion(vae.loss_function())
    trainer.build_optimizer('Adam', lr=0.001)
    # trainer.validate_every((2, 'epochs'))
    trainer.save_every((1, 'epochs'))
    trainer.set_max_num_epochs(100)
    trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                           log_images_every='never',
                                           # log_images_every=(1, 'iteration'),
                                           log_directory=folder))

    # Bind loaders
    trainer.bind_loader('train', train_loader, num_inputs=1, num_targets=1)

    # bind callbacks
    trainer.register_callback(GarbageCollection(), trigger='end_of_training_iteration')
    trainer.register_callback(ShowMinimalConsoleInfo(), trigger='end_of_training_iteration')

    # trainer.bind_loader('train', train_loader, num_inputs=3, num_targets=1)
    trainer.fit()
    pushover_notification.send('embeddings generated')
def run(args):
    model = SpeechModel(args)
    trainer = Trainer()
    if os.path.exists(
            os.path.join(args.save_directory, trainer._checkpoint_filename)):
        trainer.load(from_directory=args.save_directory)
        model.load_state_dict(trainer.model.state_dict())
        if args.cuda:
            model = model.cuda()
    else:
        train_loader = make_loader('train', args, batch_size=args.batch_size)
        dev_loader = make_loader('dev', args, batch_size=args.batch_size)
        # Build trainer
        trainer = Trainer(model) \
            .build_criterion(CTCCriterion, size_average=True) \
            .build_optimizer('Adam', weight_decay=1e-7, lr=5e-5) \
            .validate_every((1, 'epochs')) \
            .save_every((1, 'epochs')) \
            .save_to_directory(args.save_directory) \
            .set_max_num_epochs(args.epochs) \
            .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                            log_images_every='never'),
                          log_directory=args.save_directory)

        # Bind loaders
        trainer.bind_loader('train', train_loader, num_inputs=3, num_targets=1)
        trainer.bind_loader('validate',
                            dev_loader,
                            num_inputs=3,
                            num_targets=1)

        if args.cuda:
            trainer.cuda()

        # Go!
        trainer.fit()
        trainer.save()

    test_loader = make_loader('test',
                              args=args,
                              shuffle=False,
                              batch_size=1,
                              test=True)
    run_logits(loader=test_loader, model=model, args=args)
def train_model(args):
    """
    Perform training then call prediction
    """
    model = all_cnn_module()
    model.apply(initializer)
    train_loader, validate_loader, test_loader = make_loaders(args)
    # Build trainer
    savepath = os.path.join(args.save_directory,
                            Trainer()._checkpoint_filename)
    if os.path.exists(savepath):
        trainer = Trainer().load(from_directory=args.save_directory)
        if args.cuda:
            trainer.cuda()
    else:
        trainer = Trainer(model) \
            .build_criterion('CrossEntropyLoss') \
            .build_metric('CategoricalError') \
            .save_every((1, 'epochs')) \
            .validate_every((1, 'epochs')) \
            .save_to_directory(args.save_directory) \
            .set_max_num_epochs(args.epochs) \
            .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                            log_images_every='never'),
                          log_directory=args.save_directory)

        # These are the params from the paper
        trainer.build_optimizer('SGD',
                                lr=0.01,
                                momentum=0.9,
                                weight_decay=0.001)
        # Also works with Adam and default settings
        # trainer.build_optimizer('Adam')
        trainer.bind_loader('train', train_loader)
        trainer.bind_loader('validate', validate_loader)

        if args.cuda:
            trainer.cuda()

        # Go!
        trainer.fit()
        trainer.save()
    write_predictions(args, trainer.model, test_loader)
예제 #17
0
파일: train.py 프로젝트: Beinabih/tiktorch
def main():
    # Load dataset
    train_loader = get_cremi_loaders(
        config=join(PROJECT_DIRECTORY, 'Configurations', CONFIG_FILENAME))
    # Build model
    dense_unet = DUNet(1, 1)
    # Build trainer
    trainer = Trainer(model=dense_unet) \
        .build_optimizer('Adam') \
        .build_criterion('SorensenDiceLoss') \
        .build_logger(TensorboardLogger(send_image_at_batch_indices=0,
                                        send_image_at_channel_indices='all',
                                        log_images_every=(20, 'iterations')),
                      log_directory=join(PROJECT_DIRECTORY, 'Logs')) \
        .save_every((1000, 'iterations'), to_directory=join(PROJECT_DIRECTORY, 'Weights')) \
        .set_max_num_iterations(1000000) \
        .cuda()
    # Bind loader to trainer
    trainer.bind_loader('train', train_loader)
    # Go!
    trainer.fit()
예제 #18
0
 def get_trainer(self, input_channels):
     # Build model
     net = self._make_test_model(input_channels)
     # Build trainer
     trainer = Trainer(net)\
         .build_logger(TensorboardLogger(send_image_at_batch_indices=0,
                                         send_image_at_channel_indices='all',
                                         log_images_every=(20, 'iterations')),
                       log_directory=self.LOG_DIRECTORY)\
         .build_criterion('CrossEntropyLoss')\
         .build_metric('CategoricalError')\
         .build_optimizer('Adam')\
         .validate_every((1, 'epochs'))\
         .save_every((2, 'epochs'), to_directory=self.SAVE_DIRECTORY)\
         .save_at_best_validation_score()\
         .set_max_num_epochs(2)\
         .set_precision(self.PRECISION)
     # Bind loaders
     train_loader, test_loader = self.get_random_dataloaders(
         input_channels=input_channels)
     trainer.bind_loader('train',
                         train_loader).bind_loader('validate', test_loader)
     return trainer
예제 #19
0
def run(args):
    dataset = RorschachWrapper()

    save_args(args)  # save command line to a file for reference
    train_loader = rorschach_cgan_data_loader(args,
                                              dataset=dataset)  # get the data
    # todo
    model = patchCWGANModel(args,
                            discriminator=patchCDiscriminatorNetwork(args),
                            generator=CGeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        CWGANDiscriminatorLoss(penalty_weight=args.penalty_weight,
                               model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)

    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(CGenerateDataCallback(args, dataset=dataset))
    trainer.register_callback(
        CGeneratorTrainingCallback(args,
                                   parameters=model.generator.parameters(),
                                   criterion=WGANGeneratorLoss(),
                                   dataset=dataset))
    trainer.bind_loader('train', train_loader, num_inputs=2)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()
예제 #20
0
def main():
    # Load dataset
    train_loader = get_cremi_loaders(config=join(PROJECT_DIRECTORY, "Configurations", CONFIG_FILENAME))
    # Build model
    dense_unet = DUNet(1, 1)
    # Build trainer
    trainer = (
        Trainer(model=dense_unet)
        .build_optimizer("Adam")
        .build_criterion("SorensenDiceLoss")
        .build_logger(
            TensorboardLogger(
                send_image_at_batch_indices=0, send_image_at_channel_indices="all", log_images_every=(20, "iterations")
            ),
            log_directory=join(PROJECT_DIRECTORY, "Logs"),
        )
        .save_every((1000, "iterations"), to_directory=join(PROJECT_DIRECTORY, "Weights"))
        .set_max_num_iterations(1000000)
        .cuda()
    )
    # Bind loader to trainer
    trainer.bind_loader("train", train_loader)
    # Go!
    trainer.fit()
예제 #21
0
def run(args):
    save_args(args)  # save command line to a file for reference
    train_loader = mnist_data_loader(args)  # get the data
    model = GANModel(args,
                     discriminator=DiscriminatorNetwork(args),
                     generator=GeneratorNetwork(args))

    # Build trainer
    trainer = Trainer(model)
    trainer.build_criterion(
        WGANDiscriminatorLoss(penalty_weight=args.penalty_weight, model=model))
    trainer.build_optimizer('Adam',
                            model.discriminator.parameters(),
                            lr=args.discriminator_lr)
    trainer.save_every((1, 'epochs'))
    trainer.save_to_directory(args.save_directory)
    trainer.set_max_num_epochs(args.epochs)
    trainer.register_callback(GenerateDataCallback(args))
    trainer.register_callback(
        GeneratorTrainingCallback(args,
                                  parameters=model.generator.parameters(),
                                  criterion=WGANGeneratorLoss()))
    trainer.bind_loader('train', train_loader)
    # Custom logging configuration so it knows to log our images
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(args.log_image_frequency,
                                                 'iteration'))
    trainer.build_logger(logger, log_directory=args.save_directory)
    logger.observe_state('generated_images')
    logger.observe_state('real_images')
    logger._trainer_states_being_observed_while_training.remove(
        'training_inputs')

    if args.cuda:
        trainer.cuda()

    # Go!
    trainer.fit()

    # Generate video from saved images
    if not args.no_ffmpeg:
        generate_video(args.save_directory)
예제 #22
0
def set_up_training(project_directory, config):

    # Load the model to train from the configuratuib file ('./config/train_config.yml')
    model_name = config.get('model_name')
    model = getattr(models, model_name)(**config.get('model_kwargs'))

    # Initialize the loss: we use the SorensenDiceLoss, which has the nice property
    # of being fairly robust for un-balanced targets
    criterion = SorensenDiceLoss()
    # Wrap the loss to apply additional transformations before the actual
    # loss is applied. Here, we apply the mask to the target
    # and invert the target (necessary for sorensen dice) during training.
    # In addition, we need to remove the segmentation from the target
    # during validation (we only keep the segmentation in the target during validation)
    loss_train = LossWrapper(criterion=criterion,
                             transforms=Compose(ApplyAndRemoveMask(),
                                                InvertTarget()))
    loss_val = LossWrapper(criterion=criterion,
                           transforms=Compose(RemoveSegmentationFromTarget(),
                                              ApplyAndRemoveMask(),
                                              InvertTarget()))

    # Build the validation metric: we validate by running connected components on
    # the affinities for several thresholds
    # metric = ArandErrorFromConnectedComponentsOnAffinities(thresholds=[.5, .6, .7, .8, .9],
    #                                                        invert_affinities=True)
    metric = ArandErrorFromConnectedComponents(thresholds=[.5, .6, .7, .8, .9],
                                               invert_input=True,
                                               average_input=True)

    logger.info("Building trainer.")
    smoothness = 0.95
    # Build the trainer object
    trainer = Trainer(model)\
        .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss_train)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))
    # .register_callback(DumpHDF5Every(frequency='99 iterations',
    #                                  to_directory=os.path.join(project_directory, 'debug')))

    logger.info("Building logger.")
    # Build tensorboard logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations')).observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
    curent_model_dir = os.path.join(model_dir, args.id)
    log_info('Model will be saved to %s' % (curent_model_dir))
    log_info(' + Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    weight_dir = os.path.join(curent_model_dir, 'weights')
    log_info('Weights will be saved to %s' % (weight_dir))
    if not os.path.exists(weight_dir):
        os.mkdir(weight_dir)
    logs_dir = os.path.join(curent_model_dir, 'logs')
    if not os.path.exists(logs_dir):
        os.mkdir(logs_dir)
    log_info('Logs will be saved to %s' % (logs_dir))

    # Build trainer
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=(np.inf, 'epochs'))

    def log_histogram(self, tag, values, bins=1000):
        pass

    logger.log_histogram = log_histogram

    trainer = Trainer(model)\
        .build_criterion('CrossEntropyLoss') \
        .build_metric('CategoricalError') \
        .build_optimizer('Adam') \
        .validate_every((2, 'epochs')) \
        .save_every((5, 'epochs')) \
        .save_to_directory(weight_dir) \
        .set_max_num_epochs(10000) \
        .build_logger(logger, log_directory=logs_dir) \
예제 #24
0
##################################################
# data loaders
train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
                                        download=DOWNLOAD_CIFAR)

##################################################
# Build trainer
trainer = Trainer(model)
trainer.build_criterion('CrossEntropyLoss')
trainer.build_metric('CategoricalError')
trainer.build_optimizer('Adam')
trainer.validate_every((2, 'epochs'))
trainer.save_every((5, 'epochs'))
trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(10)
trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                log_images_every='never'), 
              log_directory=LOG_DIRECTORY)

##################################################
# Bind loaders
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', validate_loader)

##################################################
# activate cuda
if USE_CUDA:
    trainer.cuda()

##################################################
# fit
trainer.fit()
예제 #25
0
                                        shuffle=True, num_workers=2)


net = torch.nn.Sequential(
    ConvReLU2D(in_channels=1, out_channels=3, kernel_size=3),
    UNet(in_channels=3, out_channels=N_DIRECTIONS, dim=2, final_activation='ReLU')
    )

trainer = Trainer(net)

trainer.bind_loader('train', trainloader)
trainer.bind_loader('validate', valloader)

trainer.save_to_directory('./checkpoints')
trainer.save_every((200, 'iterations'))
trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                log_images_every='never'), log_directory=LOG_DIRECTORY)



trainer.validate_every((200, 'iterations'), for_num_iterations=50)
#trainer.build_metric()


trainer.build_criterion(nn.L1Loss)
trainer.build_optimizer(optim.Adam, lr=1e-4, weight_decay=0.0005)

trainer.set_max_num_iterations(20000)


if torch.cuda.is_available():
    trainer.cuda()
예제 #26
0
def train_model(args):
    model = fm.model_fn(args)
    print('Done initializing model')
    # labels_all_dict = np.load('labels_all_dict.npy')
    labels_all_dict = np.load('labels_all_dict_10s_subset_500.npy')
    if args.no_val:
        file_names_train = np.load(
            'file_names_train_notest_hop_subset_500.npy')
        # file_names_train = np.load('file_names_train_notest.npy')
        train_dataloader = fd.create_dataloader(file_names_train,
                                                args.data_directory,
                                                labels_all_dict.item(), args)
    else:
        file_names_train = np.load('file_names_train.npy')
        file_names_test = np.load('file_names_test.npy')
        train_dataloader = fd.create_dataloader(file_names_train,
                                                args.data_directory,
                                                labels_all_dict.item(), args)
        dev_dataloader = fd.create_dataloader(file_names_test,
                                              args.data_directory,
                                              labels_all_dict.item(), args)

    # train_dataloader = fd.create_dataloader(train_feats, train_labels, args)
    # dev_dataloader = fd.create_dataloader(dev_feats, dev_labels, args)

    test_dataloader_flag = False
    if test_dataloader_flag:
        it = 0
        for x, y, z in train_dataloader:

            print('x.shape: {}'.format(x.shape))
            print('y.shape: {}'.format(y.shape))
            print('z.shape: {}'.format(z.shape))
            print('x: {}'.format(x))
            print('y: {}'.format(y))
            print('z: {}'.format(z))

            it = it + 1
            print(it)

    print('Done Creating Data Loaders')

    if args.load_model:
        (model, trainer) = load_model(args)
        trainer \
            .bind_loader('train', train_dataloader, num_inputs=2, num_targets=1)
        if not args.no_val:
            trainer.bind_loader('validate',
                                dev_dataloader,
                                num_inputs=2,
                                num_targets=1)
    else:
        # Build trainer
        loss_fn = cust_CrossEntropyLoss()
        trainer = Trainer(model) \
            .build_criterion(loss_fn) \
            .build_metric('CategoricalError') \
            .build_optimizer(torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5)) \
            .save_every((1, 'epochs')) \
            .save_to_directory(args.save_directory) \
            .set_max_num_epochs(args.epochs) \
            .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                            log_images_every='never'),
                          log_directory=args.save_directory)
        if not args.no_val:
            trainer \
                .save_at_best_validation_score(True) \
                .validate_every((1, 'epochs'))
        trainer \
            .bind_loader('train', train_dataloader, num_inputs=2, num_targets=1)
        if not args.no_val:
            trainer.bind_loader('validate',
                                dev_dataloader,
                                num_inputs=2,
                                num_targets=1)

            # .build_optimizer('Adam') \
            # .save_at_best_validation_score(True) \
            # .build_metric(LevenshteinMetric) \
            # .evaluate_metric_every('300 iterations') \

    if args.cuda:
        print('Switch trainer to CUDA')
        trainer.cuda()

    trainer.fit()
    print('Done Fitting Trainer')
    return model
예제 #27
0
model = nn.Sequential(
    ConvELU2D(in_channels=3, out_channels=256, kernel_size=3),
    nn.MaxPool2d(kernel_size=2, stride=2),
    ConvELU2D(in_channels=256, out_channels=256, kernel_size=3),
    nn.MaxPool2d(kernel_size=2, stride=2),
    ConvELU2D(in_channels=256, out_channels=256, kernel_size=3),
    nn.MaxPool2d(kernel_size=2, stride=2), Flatten(),
    nn.Linear(in_features=(256 * 4 * 4), out_features=10), nn.Softmax())

##################################################
# data loaders
from inferno.io.box.cifar import get_cifar10_loaders
train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
                                                    download=DOWNLOAD_CIFAR)

logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                           log_images_every='never')

##################################################
# Build trainer
from inferno.trainers.basic import Trainer

trainer = Trainer(model)
trainer.build_criterion('CrossEntropyLoss')
trainer.build_metric('CategoricalError')
trainer.build_optimizer('Adam')
trainer.validate_every((2, 'epochs'))
trainer.save_every((5, 'epochs'))
trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(10)

trainer.build_logger(logger, log_directory=LOG_DIRECTORY)
예제 #28
0
    if not os.path.exists(logs_dir):
        os.mkdir(logs_dir)
    log_info('Logs will be saved to %s' % (logs_dir))

    # Build trainer
    trainer = Trainer(model) \
        .build_criterion('CrossEntropyLoss') \
        .build_metric('CategoricalError') \
        .build_optimizer('Adam', lr=args.lr, betas=(0.9, 0.999), eps=1e-08) \
        .validate_every((1, 'epochs')) \
        .save_every((5, 'epochs')) \
        .save_to_directory(model_dir) \
        .set_max_num_epochs(10000) \
        .register_callback(GradChecker()) \
        .register_callback(AutoLR(0.96, (1, 'epochs'), monitor_momentum=0.9,
                           monitor_while='validating',
                           consider_improvement_with_respect_to='best'))\
        .build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
                                        log_images_every=(1, 'epoch')),
                      log_directory=logs_dir)

    # Bind loaders
    trainer \
        .bind_loader('train', train_dl) \
        .bind_loader('validate', test_dl)

    if torch.cuda.is_available():
        trainer.cuda()

    trainer.fit()
예제 #29
0
    log_info('Model will be saved to %s' % (curent_model_dir))
    log_info(' + Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    weight_dir = os.path.join(curent_model_dir, 'weights')
    log_info('Weights will be saved to %s' % (weight_dir))
    if not os.path.exists(weight_dir):
        os.mkdir(weight_dir)
    logs_dir = os.path.join(curent_model_dir, 'logs')
    if not os.path.exists(logs_dir):
        os.mkdir(logs_dir)
    log_info('Logs will be saved to %s' % (logs_dir))

    # Build trainer
    logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                               log_images_every=None,
                               log_histograms_every=None)

    def log_hist(self, tag, values=1, step=1, bins=1000):
        """Logs the histogram of a list/vector of values."""
        pass

    logger.log_histogram = log_hist

    trainer = Trainer(model)\
        .build_criterion('CrossEntropyLoss') \
        .build_metric('CategoricalError') \
        .build_optimizer('Adam') \
        .validate_every((400, 'iterations')) \
        .save_every((400, 'iterations')) \
        .save_to_directory(weight_dir) \