def main(config_file):

    if config_file is None:
        config_file = os.path.join(dirs.CONFIG_DIR, 'test_isic_aleatoric.yaml')

    context = ctx.TorchTestContext('cuda')
    context.load_from_config(config_file)

    build_test = data.BuildData(build_dataset=isic.BuildIsicDataset(), )

    if not hasattr(context.config.others, 'is_log_sigma'):
        raise ValueError('"is_log_sigma" entry missing in configuration file')
    is_log_sigma = context.config.others.is_log_sigma

    test_steps = [AleatoricPredictStep(is_log_sigma), PrepareSubjectStep()]
    subject_steps = [EvalSubjectStep()]

    subject_assembler = assembler.Subject2dAssembler()
    test = loop.Test(test_steps, subject_steps, subject_assembler)

    hook = hooks.ReducedComposeTestLoopHook([
        hooks.ConsoleTestLogHook(),
        hooks.WriteTestMetricsCsvHook('metrics.csv'),
        WriteHook()
    ])
    test(context, build_test, hook=hook)
def main(config_file):

    if config_file is None:
        config_file = os.path.join(dirs.CONFIG_DIR,
                                   'test_isic_auxiliary_segm.yaml')

    context = ctx.TorchTestContext('cuda')
    context.load_from_config(config_file)

    if not hasattr(context.config.others, 'prediction_dir'):
        raise ValueError('"others.prediction_dir" is required in the config')
    prediction_dir = context.config.others.prediction_dir

    build_test = data.BuildData(build_dataset=isic.BuildIsicDataset(),
                                prediction_dir=prediction_dir)

    test_steps = [SegmentationPredictStep()]
    subject_steps = [EvalSubjectStep()]

    subject_assembler = assembler.Subject2dAssembler()
    test = loop.Test(test_steps,
                     subject_steps,
                     subject_assembler,
                     entries=('probabilities', 'labels', 'orig_prediction'))

    hook = hooks.ReducedComposeTestLoopHook([
        hooks.ConsoleTestLogHook(),
        hooks.WriteTestMetricsCsvHook('metrics.csv'),
        WriteHook()
    ])
    test(context, build_test, hook=hook)
Example #3
0
def main(config_file):

    if config_file is None:
        config_file = os.path.join(dirs.CONFIG_DIR,
                                   'test_brats_auxiliary_segm.yaml')

    context = ctx.TorchTestContext('cuda')
    context.load_from_config(config_file)

    build_test = data.BuildData(
        build_dataset=data.BuildParametrizableDataset(), )

    test_steps = [SegmentationPredictStep()]
    subject_steps = [step.ExtractSubjectInfoStep(), EvalSubjectStep()]

    subject_assembler = assembler.SubjectAssembler()
    test = loop.Test(test_steps,
                     subject_steps,
                     subject_assembler,
                     entries=('probabilities', 'orig_prediction'))

    hook = hooks.ReducedComposeTestLoopHook([
        hooks.ConsoleTestLogHook(),
        hooks.WriteTestMetricsCsvHook('metrics.csv'),
        WriteHook()
    ])
    test(context, build_test, hook=hook)
def main(config_file):

    if config_file is None:
        config_file = os.path.join(dirs.CONFIG_DIR, 'test_isic_ensemble.yaml')

    context = ctx.TorchTestContext('cuda')
    context.load_from_config(config_file)

    build_test = data.BuildData(build_dataset=isic.BuildIsicDataset(), )

    if not hasattr(context.config.others, 'model_dir') or not hasattr(
            context.config.others, 'test_at'):
        raise ValueError(
            'missing "model_dir" or "test_at" entry in the configuration (others)'
        )

    model_dirs = context.config.others.model_dir
    if isinstance(model_dirs, str):
        model_dirs = [model_dirs]

    test_models = []
    for i, model_dir in enumerate(model_dirs):
        logging.info('load additional model [{}/{}] {}'.format(
            i + 1, len(model_dirs), os.path.basename(model_dir)))
        mf = mgt.ModelFiles.from_model_dir(model_dir)
        checkpoint_path = mgt.model_service.find_checkpoint_file(
            mf.weight_checkpoint_dir, context.config.others.test_at)

        model = mgt.model_service.load_model_from_parameters(
            mf.model_path(), with_optimizer=False)
        mgt.model_service.load_checkpoint(checkpoint_path, model)
        test_model = model.to(context.device)

        test_model.eval()
        for params in test_model.parameters():
            params.requires_grad = False
        test_models.append(test_model)

    test_steps = [
        EnsemblePredictionStep(test_models),
        customstep.MultiPredictionSummary(),
        PrepareSubjectStep()
    ]
    subject_steps = [EvalSubjectStep()]

    subject_assembler = assembler.Subject2dAssembler()
    test = loop.Test(test_steps,
                     subject_steps,
                     subject_assembler,
                     entries=None)

    hook = hooks.ReducedComposeTestLoopHook([
        hooks.ConsoleTestLogHook(),
        hooks.WriteTestMetricsCsvHook('metrics.csv'),
        WriteHook()
    ])
    test(context, build_test, hook=hook)
def main(config_file, config_id):

    if config_file is None:
        if config_id == 'baseline':
            config_file = os.path.join(dirs.CONFIG_DIR,
                                       'test_brats_baseline.yaml')
        elif config_id == 'baseline_mc':
            config_file = os.path.join(dirs.CONFIG_DIR,
                                       'test_brats_baseline_mc.yaml')
        elif config_id == 'center':
            config_file = os.path.join(dirs.CONFIG_DIR,
                                       'test_brats_center.yaml')
        elif config_id == 'center_mc':
            config_file = os.path.join(dirs.CONFIG_DIR,
                                       'test_brats_center_mc.yaml')
        elif config_id in ('cv0', 'cv1', 'cv2', 'cv3', 'cv4'):
            config_file = os.path.join(
                dirs.CONFIG_DIR, 'baseline_cv',
                'test_brats_baseline_cv{}.yaml'.format(config_id[-1]))
        else:
            config_file = os.path.join(dirs.CONFIG_DIR,
                                       'test_brats_baseline.yaml')

    context = ctx.TorchTestContext('cuda')
    context.load_from_config(config_file)

    build_test = data.BuildData(
        build_dataset=data.BuildParametrizableDataset(), )

    if hasattr(context.config.others, 'mc'):
        test_steps = [
            customstep.McPredictStep(context.config.others.mc),
            customstep.MultiPredictionSummary()
        ]
    else:
        test_steps = [step.SegmentationPredictStep(do_probs=True)]
    subject_steps = [step.ExtractSubjectInfoStep(), EvalSubjectStep()]

    subject_assembler = assembler.SubjectAssembler()
    test = loop.Test(test_steps,
                     subject_steps,
                     subject_assembler,
                     entries=('probabilities', ))

    hook = hooks.ReducedComposeTestLoopHook([
        hooks.ConsoleTestLogHook(),
        hooks.WriteTestMetricsCsvHook('metrics.csv'),
        WriteHook()
    ])
    test(context, build_test, hook=hook)
def main(config_file):

    if config_file is None:
        config_file = os.path.join(dirs.CONFIG_DIR,
                                   'test_brats_auxiliary_feat.yaml')

    context = ctx.TorchTestContext('cuda')
    context.load_from_config(config_file)

    build_test = data.BuildData(
        build_dataset=data.BuildParametrizableDataset(), )

    if hasattr(context.config.others, 'model_dir') and hasattr(
            context.config.others, 'test_at'):
        mf = mgt.ModelFiles.from_model_dir(context.config.others.model_dir)
        checkpoint_path = mgt.model_service.find_checkpoint_file(
            mf.weight_checkpoint_dir, context.config.others.test_at)

        model = mgt.model_service.load_model_from_parameters(
            mf.model_path(), with_optimizer=False)
        model.provide_features = True
        mgt.model_service.load_checkpoint(checkpoint_path, model)
        test_model = model.to(context.device)

        test_model.eval()
        for params in test_model.parameters():
            params.requires_grad = False

    test_steps = [SegmentationPredictStep(test_model)]
    subject_steps = [step.ExtractSubjectInfoStep(), EvalSubjectStep()]

    subject_assembler = assembler.SubjectAssembler()
    test = loop.Test(test_steps,
                     subject_steps,
                     subject_assembler,
                     entries=('probabilities', 'segm_probabilities'))

    hook = hooks.ReducedComposeTestLoopHook([
        hooks.ConsoleTestLogHook(),
        hooks.WriteTestMetricsCsvHook('metrics.csv'),
        WriteHook()
    ])
    test(context, build_test, hook=hook)