예제 #1
0
def set_up_training(project_directory, config, data_config,
                    load_pretrained_model):
    # Get model
    if load_pretrained_model:
        model = Trainer().load(from_directory=project_directory,
                               filename='Weights/checkpoint.pytorch').model
    else:
        model_name = config.get('model_name')
        model = getattr(models, model_name)(**config.get('model_kwargs'))

    criterion = SorensenDiceLoss()
    loss_train = LossWrapper(criterion=criterion,
                             transforms=Compose(ApplyAndRemoveMask(),
                                                InvertTarget()))
    loss_val = LossWrapper(criterion=criterion,
                           transforms=Compose(RemoveSegmentationFromTarget(),
                                              ApplyAndRemoveMask(),
                                              InvertTarget()))

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.95

    offsets = data_config['volume_config']['segmentation']['affinity_config'][
        'offsets']
    metric = ArandErrorFromMulticut(average_slices=False,
                                    use_2d_ws=True,
                                    n_threads=8,
                                    weight_edges=True,
                                    offsets=offsets)

    trainer = Trainer(model)\
        .save_every((1000, 'iterations'),
                    to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss_train)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))\
        .register_callback(GarbageCollection())

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'),
        log_histograms_every='never').observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
예제 #2
0
    def inferno_build_criterion(self):
        print("Building criterion")
        loss_config = self.get('trainer/criterion/losses')

        criterion = SorensenDiceLoss()
        loss_train = LossWrapper(criterion=criterion,
                                 transforms=Compose(ApplyAndRemoveMask(), InvertTarget()))
        loss_val = LossWrapper(criterion=criterion,
                               transforms=Compose(RemoveSegmentationFromTarget(),
                                                  ApplyAndRemoveMask(), InvertTarget()))
        self._trainer.build_criterion(loss_train)
        self._trainer.build_validation_criterion(loss_val)
예제 #3
0
def dice_loss(is_val=False):
    print("Build Dice loss")
    if is_val:
        trafos = [
            RemoveSegmentationFromTarget(),
            ApplyAndRemoveMask(),
            InvertTarget()
        ]
    else:
        trafos = [ApplyAndRemoveMask(), InvertTarget()]
    trafos = Compose(*trafos)
    return LossWrapper(criterion=SorensenDiceLoss(), transforms=trafos)
예제 #4
0
def check_loader(n_batches=1, with_trafo=False, remove_mask=False):

    loader = get_cremi_loader('./configs/validation_config.yml')
    trafo1 = SemanticTargetTrafo([1, 2, 3], torch.float32, ignore_label=-1)
    trafo2 = ApplyAndRemoveMask()

    for ii, (x, y) in enumerate(loader):

        pred_shape = (x.shape[0], 3) + x.shape[2:]
        pred = torch.rand(*pred_shape)
        if with_trafo:
            pred, y = trafo1(pred, y)
            if remove_mask:
                pred, y = trafo2(pred, y)

        x = x.numpy().squeeze()
        y = y.numpy().squeeze()
        pred = pred.numpy().squeeze()

        with napari.gui_qt():
            v = napari.Viewer()
            v.add_image(x)
            v.add_image(pred)
            v.add_labels(y)

        if ii >= n_batches:
            break
예제 #5
0
def dice_loss():
    trafos = [
        SemanticTargetTrafo(class_ids=[1, 2, 3],
                            dtype=torch.float32,
                            ignore_label=-1),
        ApplyAndRemoveMask()
    ]
    trafos = Compose(*trafos)
    return LossWrapper(criterion=SorensenDiceLoss(), transforms=trafos)
예제 #6
0
def set_up_training(project_directory, config, data_config):

    # Get model
    model_name = config.get('model_name')
    model = getattr(models, model_name)(**config.get('model_kwargs'))

    criterion = SorensenDiceLoss()
    loss_train = LossWrapper(criterion=criterion,
                             transforms=Compose(ApplyAndRemoveMask(),
                                                InvertTarget()))
    metric = loss_train

    # Build trainer and validation metric
    logger.info("Building trainer.")
    smoothness = 0.9

    trainer = Trainer(model)\
        .save_every((1000, 'iterations'),
                    to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss_train)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness,
                                                     verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.99,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))\

    logger.info("Building logger.")
    # Build logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations'),
        log_histograms_every='never').observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
예제 #7
0
def set_up_training(project_directory, config):

    # Load the model to train from the configuratuib file ('./config/train_config.yml')
    model_name = config.get('model_name')
    model = getattr(models, model_name)(**config.get('model_kwargs'))

    # Initialize the loss: we use the SorensenDiceLoss, which has the nice property
    # of being fairly robust for un-balanced targets
    criterion = SorensenDiceLoss()
    # Wrap the loss to apply additional transformations before the actual
    # loss is applied. Here, we apply the mask to the target
    # and invert the target (necessary for sorensen dice) during training.
    # In addition, we need to remove the segmentation from the target
    # during validation (we only keep the segmentation in the target during validation)
    loss_train = LossWrapper(criterion=criterion,
                             transforms=Compose(ApplyAndRemoveMask(),
                                                InvertTarget()))
    loss_val = LossWrapper(criterion=criterion,
                           transforms=Compose(RemoveSegmentationFromTarget(),
                                              ApplyAndRemoveMask(),
                                              InvertTarget()))

    # Build the validation metric: we validate by running connected components on
    # the affinities for several thresholds
    # metric = ArandErrorFromConnectedComponentsOnAffinities(thresholds=[.5, .6, .7, .8, .9],
    #                                                        invert_affinities=True)
    metric = ArandErrorFromConnectedComponents(thresholds=[.5, .6, .7, .8, .9],
                                               invert_input=True,
                                               average_input=True)

    logger.info("Building trainer.")
    smoothness = 0.95
    # Build the trainer object
    trainer = Trainer(model)\
        .save_every((1000, 'iterations'), to_directory=os.path.join(project_directory, 'Weights'))\
        .build_criterion(loss_train)\
        .build_validation_criterion(loss_val)\
        .build_optimizer(**config.get('training_optimizer_kwargs'))\
        .evaluate_metric_every('never')\
        .validate_every((100, 'iterations'), for_num_iterations=1)\
        .register_callback(SaveAtBestValidationScore(smoothness=smoothness, verbose=True))\
        .build_metric(metric)\
        .register_callback(AutoLR(factor=0.98,
                                  patience='100 iterations',
                                  monitor_while='validating',
                                  monitor_momentum=smoothness,
                                  consider_improvement_with_respect_to='previous'))
    # .register_callback(DumpHDF5Every(frequency='99 iterations',
    #                                  to_directory=os.path.join(project_directory, 'debug')))

    logger.info("Building logger.")
    # Build tensorboard logger
    tensorboard = TensorboardLogger(
        log_scalars_every=(1, 'iteration'),
        log_images_every=(100, 'iterations')).observe_states(
            ['validation_input', 'validation_prediction, validation_target'],
            observe_while='validating')

    trainer.build_logger(tensorboard,
                         log_directory=os.path.join(project_directory, 'Logs'))
    return trainer
예제 #8
0
def dice_loss():
    print("Build Dice loss")
    trafos = [ApplyAndRemoveMask(), InvertTarget()]
    trafos = Compose(*trafos)
    return LossWrapper(criterion=SorensenDiceLoss(),
                       transforms=trafos)