def unet(config, train_mode):
    if train_mode:
        save_output = False
        load_saved_output = False
    else:
        save_output = False
        load_saved_output = False

    loader = preprocessing(config, model_type='single', is_train=train_mode)
    unet = Step(name='unet',
                transformer=PyTorchUNet(**config.unet),
                input_steps=[loader],
                cache_dirpath=config.env.cache_dirpath,
                save_output=save_output,
                load_saved_output=load_saved_output)

    mask_postprocessed = mask_postprocessing(unet,
                                             config,
                                             save_output=save_output)
    detached = multiclass_object_labeler(mask_postprocessed,
                                         config,
                                         save_output=save_output)
    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[detached],
                  adapter={
                      'y_pred': ([(detached.name, 'labeled_images')]),
                  },
                  cache_dirpath=config.env.cache_dirpath,
                  save_output=save_output,
                  load_saved_output=False)
    return output
Esempio n. 2
0
def unet(config, train_mode):
    if train_mode:
        save_output = True
        load_saved_output = False
        preprocessing = preprocessing_train(config)
    else:
        save_output = True
        load_saved_output = False
        preprocessing = preprocessing_inference(config)

    unet = Step(name='unet',
                transformer=PyTorchUNet(**config.unet),
                input_steps=[preprocessing],
                cache_dirpath=config.env.cache_dirpath,
                save_output=save_output,
                load_saved_output=load_saved_output)

    mask_postprocessed = mask_postprocessing(unet,
                                             config,
                                             save_output=save_output)

    detached = nuclei_labeler(mask_postprocessed,
                              config,
                              save_output=save_output)

    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[detached],
                  adapter={
                      'y_pred': ([(detached.name, 'labels')]),
                  },
                  cache_dirpath=config.env.cache_dirpath)
    return output
Esempio n. 3
0
def unet(config, train_mode):
    save_output = False
    load_saved_output = False

    loader = preprocessing_generator(config, is_train=train_mode)
    unet = Step(name='unet',
                transformer=PyTorchUNetStream(**config.unet) if
                config.execution.stream_mode else PyTorchUNet(**config.unet),
                input_data=['callback_input'],
                input_steps=[loader],
                cache_dirpath=config.env.cache_dirpath,
                save_output=save_output,
                load_saved_output=load_saved_output)

    mask_postprocessed = mask_postprocessing(loader,
                                             unet,
                                             config,
                                             save_output=save_output)

    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[mask_postprocessed],
                  adapter={
                      'y_pred':
                      ([(mask_postprocessed.name, 'images_with_scores')]),
                  },
                  cache_dirpath=config.env.cache_dirpath,
                  save_output=save_output,
                  load_saved_output=False)
    return output
Esempio n. 4
0
def unet_inference(config):
    xy_inference = Step(name='xy_inference',
                        transformer=XYSplit(**config.xy_splitter),
                        input_data=['input'],
                        adapter={'meta': ([('input', 'meta')]),
                                 'train_mode': ([('input', 'train_mode')])
                                 },
                        cache_dirpath=config.env.cache_dirpath)

    loader_inference = Step(name='loader',
                            transformer=MetadataImageSegmentationLoader(**config.loader),
                            input_data=['input'],
                            input_steps=[xy_inference, xy_inference],
                            adapter={'X': ([('xy_inference', 'X')], squeeze_inputs),
                                     'y': ([('xy_inference', 'y')], squeeze_inputs),
                                     'train_mode': ([('input', 'train_mode')]),
                                     },
                            cache_dirpath=config.env.cache_dirpath)

    unet_network = Step(name='unet_network',
                        transformer=PyTorchUNet(**config.unet_network),
                        input_steps=[loader_inference],
                        cache_dirpath=config.env.cache_dirpath)

    mask_resize = Step(name='mask_resize',
                       transformer=Resizer(),
                       input_data=['input'],
                       input_steps=[unet_network],
                       adapter={'images': ([('unet_network', 'predicted_masks')]),
                                'target_sizes': ([('input', 'target_sizes')]),
                                },
                       cache_dirpath=config.env.cache_dirpath)

    thresholding = Step(name='thresholding',
                        transformer=Thresholder(**config.thresholder),
                        input_steps=[mask_resize],
                        adapter={'images': ([('mask_resize', 'resized_images')]),
                                 },
                        cache_dirpath=config.env.cache_dirpath)

    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[thresholding],
                  adapter={'y_pred': ([('thresholding', 'binarized_images')]),
                           },
                  cache_dirpath=config.env.cache_dirpath)
    return output
Esempio n. 5
0
def unet(config, train_mode):
    if train_mode:
        save_output = False
        load_saved_output = False
    else:
        save_output = False
        load_saved_output = False

    loader = preprocessing(config, model_type='single', is_train=train_mode)
    unet = Step(name='unet',
                transformer=PyTorchUNetStream(**config.unet) if
                config.execution.stream_mode else PyTorchUNet(**config.unet),
                input_steps=[loader],
                cache_dirpath=config.env.cache_dirpath,
                save_output=save_output,
                load_saved_output=load_saved_output)

    mask_postprocessed = mask_postprocessing(unet,
                                             config,
                                             save_output=save_output)
    if config.postprocessor["dilate_selem_size"] > 0:
        mask_postprocessed = Step(
            name='mask_dilation',
            transformer=MaskDilatorStream(**config.postprocessor)
            if config.execution.stream_mode else MaskDilator(
                **config.postprocessor),
            input_steps=[mask_postprocessed],
            adapter={
                'images': ([(mask_postprocessed.name, 'categorized_images')]),
            },
            cache_dirpath=config.env.cache_dirpath,
            save_output=save_output,
            load_saved_output=False)
    detached = multiclass_object_labeler(mask_postprocessed,
                                         config,
                                         save_output=save_output)
    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[detached],
                  adapter={
                      'y_pred': ([(detached.name, 'labeled_images')]),
                  },
                  cache_dirpath=config.env.cache_dirpath,
                  save_output=save_output,
                  load_saved_output=False)
    return output
Esempio n. 6
0
def unet(config, train_mode):
    if train_mode:
        save_output = False
        load_saved_output = False
    else:
        save_output = False
        load_saved_output = False

    loader = preprocessing(config, model_type='single', is_train=train_mode)
    unet = Step(name='unet',
                transformer=PyTorchUNetStream(**config.unet) if config.execution.stream_mode else PyTorchUNet(
                    **config.unet),
                input_steps=[loader],
                cache_dirpath=config.env.cache_dirpath,
                save_output=save_output, load_saved_output=load_saved_output)

    mask_postprocessed = mask_postprocessing(loader, unet, config, save_output=save_output)

    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[mask_postprocessed],
                  adapter={'y_pred': ([(mask_postprocessed.name, 'images')]),
                           'y_scores': ([(mask_postprocessed.name, 'scores')])
                           },
                  cache_dirpath=config.env.cache_dirpath,
                  save_output=save_output,
                  load_saved_output=False)
    return output
Esempio n. 7
0
def unet_padded_tta(config):
    save_output = False

    if config.execution.stream_mode:
        raise NotImplementedError('TTA not available in the stream mode')

    loader, tta_generator = preprocessing_generator_padded_tta(config)
    unet = Step(name='unet',
                transformer=PyTorchUNet(**config.unet),
                input_steps=[loader],
                cache_dirpath=config.env.cache_dirpath,
                save_output=save_output)

    tta_aggregator = Step(name='tta_aggregator',
                          transformer=loaders.TestTimeAugmentationAggregator(),
                          input_steps=[unet, tta_generator],
                          adapter={
                              'images':
                              ([(unet.name, 'multichannel_map_prediction')]),
                              'tta_params':
                              ([(tta_generator.name, 'tta_params')]),
                              'img_ids': ([(tta_generator.name, 'img_ids')]),
                          },
                          cache_dirpath=config.env.cache_dirpath,
                          save_output=save_output)

    prediction_crop = Step(name='prediction_crop',
                           transformer=post.PredictionCrop(
                               **config.postprocessor.prediction_crop),
                           input_steps=[tta_aggregator],
                           adapter={
                               'images': ([(tta_aggregator.name,
                                            'aggregated_prediction')]),
                           },
                           cache_dirpath=config.env.cache_dirpath,
                           save_output=save_output)

    prediction_renamed = Step(name='prediction_renamed',
                              transformer=Dummy(),
                              input_steps=[prediction_crop],
                              adapter={
                                  'multichannel_map_prediction':
                                  ([(prediction_crop.name, 'cropped_images')]),
                              },
                              cache_dirpath=config.env.cache_dirpath,
                              save_output=save_output)
    mask_postprocessed = mask_postprocessing(loader,
                                             prediction_renamed,
                                             config,
                                             save_output=save_output)

    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[mask_postprocessed],
                  adapter={
                      'y_pred':
                      ([(mask_postprocessed.name, 'images_with_scores')]),
                  },
                  cache_dirpath=config.env.cache_dirpath,
                  save_output=save_output)
    return output