コード例 #1
0
def unet_multitask(config, train_mode):
    if train_mode:
        save_output = True
        load_saved_output = False
        preprocessing = preprocessing_multitask_train(config)
    else:
        save_output = True
        load_saved_output = False
        preprocessing = preprocessing_multitask_inference(config)

    unet_multitask = Step(name='unet_multitask',
                          transformer=PyTorchUNetMultitask(**config.unet),
                          input_steps=[preprocessing],
                          cache_dirpath=config.env.cache_dirpath,
                          save_output=save_output,
                          load_saved_output=load_saved_output)

    mask_resize = Step(name='mask_resize',
                       transformer=Resizer(),
                       input_data=['input'],
                       input_steps=[unet_multitask],
                       adapter={
                           'images':
                           ([(unet_multitask.name, 'mask_prediction')]),
                           'target_sizes': ([('input', 'target_sizes')]),
                       },
                       cache_dirpath=config.env.cache_dirpath,
                       save_output=save_output)

    contour_resize = Step(name='contour_resize',
                          transformer=Resizer(),
                          input_data=['input'],
                          input_steps=[unet_multitask],
                          adapter={
                              'images':
                              ([(unet_multitask.name, 'contour_prediction')]),
                              'target_sizes': ([('input', 'target_sizes')]),
                          },
                          cache_dirpath=config.env.cache_dirpath,
                          save_output=save_output)

    detached = Step(name='detached',
                    transformer=Postprocessor(),
                    input_steps=[mask_resize, contour_resize],
                    adapter={
                        'images': ([(mask_resize.name, 'resized_images')]),
                        'contours':
                        ([(contour_resize.name, 'resized_images')]),
                    },
                    cache_dirpath=config.env.cache_dirpath,
                    save_output=save_output)

    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[detached],
                  adapter={
                      'y_pred': ([(detached.name, 'labeled_images')]),
                  },
                  cache_dirpath=config.env.cache_dirpath)
    return output
コード例 #2
0
def seq_conv_train(config):
    xy_train = Step(name='xy_train',
                    transformer=XYSplit(**config.xy_splitter),
                    input_data=['input'],
                    adapter={'meta': ([('input', 'meta')]),
                             'train_mode': ([('input', 'train_mode')])
                             },
                    cache_dirpath=config.env.cache_dirpath)

    xy_inference = Step(name='xy_inference',
                        transformer=XYSplit(**config.xy_splitter),
                        input_data=['input'],
                        adapter={'meta': ([('input', 'meta_valid')]),
                                 'train_mode': ([('input', 'train_mode')])
                                 },
                        cache_dirpath=config.env.cache_dirpath)

    loader_train = Step(name='loader',
                        transformer=MetadataImageSegmentationLoader(**config.loader),
                        input_data=['input'],
                        input_steps=[xy_train, xy_inference],
                        adapter={'X': ([('xy_train', 'X')], squeeze_inputs),
                                 'y': ([('xy_train', 'y')], squeeze_inputs),
                                 'train_mode': ([('input', 'train_mode')]),
                                 'X_valid': ([('xy_inference', 'X')], squeeze_inputs),
                                 'y_valid': ([('xy_inference', 'y')], squeeze_inputs),
                                 },
                        cache_dirpath=config.env.cache_dirpath)

    sequential_convnet = Step(name='sequential_convnet',
                              transformer=SequentialConvNet(**config.sequential_convnet),
                              input_steps=[loader_train],
                              cache_dirpath=config.env.cache_dirpath)

    mask_resize = Step(name='mask_resize',
                       transformer=Resizer(),
                       input_data=['input'],
                       input_steps=[sequential_convnet],
                       adapter={'images': ([('sequential_convnet', 'predicted_masks')]),
                                'target_sizes': ([('input', 'target_sizes')]),
                                },
                       cache_dirpath=config.env.cache_dirpath)

    thresholding = Step(name='thresholding',
                        transformer=Thresholder(**config.thresholder),
                        input_steps=[mask_resize],
                        adapter={'images': ([('mask_resize', 'resized_images')]),
                                 },
                        cache_dirpath=config.env.cache_dirpath)

    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[thresholding],
                  adapter={'y_pred': ([('thresholding', 'binarized_images')]),
                           },
                  cache_dirpath=config.env.cache_dirpath)
    return output
コード例 #3
0
def unet_inference(config):
    xy_inference = Step(name='xy_inference',
                        transformer=XYSplit(**config.xy_splitter),
                        input_data=['input'],
                        adapter={'meta': ([('input', 'meta')]),
                                 'train_mode': ([('input', 'train_mode')])
                                 },
                        cache_dirpath=config.env.cache_dirpath)

    loader_inference = Step(name='loader',
                            transformer=MetadataImageSegmentationLoader(**config.loader),
                            input_data=['input'],
                            input_steps=[xy_inference, xy_inference],
                            adapter={'X': ([('xy_inference', 'X')], squeeze_inputs),
                                     'y': ([('xy_inference', 'y')], squeeze_inputs),
                                     'train_mode': ([('input', 'train_mode')]),
                                     },
                            cache_dirpath=config.env.cache_dirpath)

    unet_network = Step(name='unet_network',
                        transformer=PyTorchUNet(**config.unet_network),
                        input_steps=[loader_inference],
                        cache_dirpath=config.env.cache_dirpath)

    mask_resize = Step(name='mask_resize',
                       transformer=Resizer(),
                       input_data=['input'],
                       input_steps=[unet_network],
                       adapter={'images': ([('unet_network', 'predicted_masks')]),
                                'target_sizes': ([('input', 'target_sizes')]),
                                },
                       cache_dirpath=config.env.cache_dirpath)

    thresholding = Step(name='thresholding',
                        transformer=Thresholder(**config.thresholder),
                        input_steps=[mask_resize],
                        adapter={'images': ([('mask_resize', 'resized_images')]),
                                 },
                        cache_dirpath=config.env.cache_dirpath)

    output = Step(name='output',
                  transformer=Dummy(),
                  input_steps=[thresholding],
                  adapter={'y_pred': ([('thresholding', 'binarized_images')]),
                           },
                  cache_dirpath=config.env.cache_dirpath)
    return output
コード例 #4
0
def mask_postprocessing(model, config, save_output=False):
    mask_resize = Step(name='mask_resize',
                       transformer=Resizer(),
                       input_data=['input'],
                       input_steps=[model],
                       adapter={
                           'images':
                           ([(model.name, 'multichannel_map_prediction')]),
                           'target_sizes': ([('input', 'target_sizes')]),
                       },
                       cache_dirpath=config.env.cache_dirpath,
                       save_output=save_output)
    category_mapper = Step(name='category_mapper',
                           transformer=CategoryMapper(),
                           input_steps=[mask_resize],
                           adapter={
                               'images': ([('mask_resize', 'resized_images')]),
                           },
                           cache_dirpath=config.env.cache_dirpath,
                           save_output=save_output)
    return category_mapper
コード例 #5
0
def center_postprocessing(model, config, save_output=True):
    center_resize = Step(name='center_resize',
                         transformer=Resizer(),
                         input_data=['input'],
                         input_steps=[model],
                         adapter={
                             'images': ([(model.name, 'center_prediction')]),
                             'target_sizes': ([('input', 'target_sizes')]),
                         },
                         cache_dirpath=config.env.cache_dirpath,
                         save_output=save_output)

    center_thresholding = Step(name='center_thresholding',
                               transformer=Thresholder(**config.thresholder),
                               input_steps=[center_resize],
                               adapter={
                                   'images':
                                   ([('center_resize', 'resized_images')]),
                               },
                               cache_dirpath=config.env.cache_dirpath,
                               save_output=save_output)
    return center_thresholding
コード例 #6
0
def mask_postprocessing(loader, model, config, save_output=False):
    if config.postprocessor.crf.apply_crf:
        dense_crf = Step(name='dense_crf',
                         transformer=DenseCRFStream(**config.postprocessor.crf) if config.execution.stream_mode \
                             else DenseCRF(**config.postprocessor.crf),
                         input_steps=[loader, model],
                         adapter={'images': ([(model.name, 'multichannel_map_prediction')]),
                                  'raw_images_generator': ([(loader.name, 'datagen')]),
                                  },
                         cache_dirpath=config.env.cache_dirpath,
                         save_output=save_output)

        mask_resize = Step(name='mask_resize',
                           transformer=ResizerStream() if config.execution.stream_mode else Resizer(),
                           input_data=['input'],
                           input_steps=[dense_crf],
                           adapter={'images': ([('dense_crf', 'crf_images')]),
                                    'target_sizes': ([('input', 'target_sizes')]),
                                    },
                           cache_dirpath=config.env.cache_dirpath,
                           save_output=save_output)
    else:
        mask_resize = Step(name='mask_resize',
                           transformer=ResizerStream() if config.execution.stream_mode else Resizer(),
                           input_data=['input'],
                           input_steps=[model],
                           adapter={'images': ([(model.name, 'multichannel_map_prediction')]),
                                    'target_sizes': ([('input', 'target_sizes')]),
                                    },
                           cache_dirpath=config.env.cache_dirpath,
                           save_output=save_output)

    category_mapper = Step(name='category_mapper',
                           transformer=CategoryMapperStream() if config.execution.stream_mode else CategoryMapper(),
                           input_steps=[mask_resize],
                           adapter={'images': ([('mask_resize', 'resized_images')]),
                                    },
                           cache_dirpath=config.env.cache_dirpath,
                           save_output=save_output)

    mask_erosion = Step(name='mask_erosion',
                        transformer=MaskEroderStream(**config.postprocessor) if config.execution.stream_mode
                        else MaskEroder(**config.postprocessor),
                        input_steps=[category_mapper],
                        adapter={'images': ([(category_mapper.name, 'categorized_images')]),
                                 },
                        cache_dirpath=config.env.cache_dirpath,
                        load_saved_output=False)
        
    detached = multiclass_object_labeler(mask_erosion, config, save_output=save_output)

    mask_dilation = Step(name='mask_dilation',
                         transformer=LabeledMaskDilatorStream(**config.postprocessor) if config.execution.stream_mode
                         else LabeledMaskDilator(**config.postprocessor),
                         input_steps=[detached],
                         adapter={'images': ([(detached.name, 'labeled_images')]),
                                  },
                         cache_dirpath=config.env.cache_dirpath,
                         load_saved_output=False)

    score_builder = Step(name='score_builder',
                         transformer=ScoreBuilder(),
                         input_steps=[mask_dilation, mask_resize],
                         adapter={'images': ([(mask_dilation.name, 'dilated_images')]),
                                  'probabilities': ([(mask_resize.name, 'resized_images')]),
                                  },
                         cache_dirpath=config.env.cache_dirpath,
                         save_output=save_output)

    return score_builder