예제 #1
0
    return inference_step(image, architecture=model, activation=torch.sigmoid)


# metrics
individual_metrics = {
    'dice': lambda gt, pred: dice_score(gt.astype(bool), pred >= .5)
}
val_metrics = convert_to_aggregated(individual_metrics)


# run experiment
logger = TBLogger(EXPERIMENT_PATH / FOLD / 'logs')
save_json(CONFIG, EXPERIMENT_PATH / 'config.json')
train(train_step, batch_iter, n_epochs=CONFIG['n_epochs'], logger=logger,
      validate=lambda : compute_metrics(predict, dataset.load_image, dataset.load_gt, val_ids, val_metrics),
      architecture=model, optimizer=optimizer, criterion=criterion, lr=CONFIG['lr'])
save_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')

commands.predict(
    ids=test_ids,
    output_path=EXPERIMENT_PATH / FOLD / 'predictions',
    load_x=dataset.load_image,
    predict_fn=predict
)
commands.evaluate_individual_metrics(
    load_y_true=dataset.load_gt,
    metrics=individual_metrics,
    predictions_path=EXPERIMENT_PATH / FOLD / 'predictions',
    results_path=EXPERIMENT_PATH / FOLD / 'metrics'
)
예제 #2
0
val_metrics = convert_to_aggregated(individual_metrics)


# run experiment
logger = TBLogger(EXPERIMENT_PATH / FOLD / 'logs')
commands.populate(EXPERIMENT_PATH / 'config.json', save_json, CONFIG, EXPERIMENT_PATH / 'config.json')
commands.populate(EXPERIMENT_PATH / FOLD / 'model.pth', lambda : [
    train(train_step, batch_iter, n_epochs=CONFIG['n_epochs'], logger=logger,
          validate=lambda : compute_metrics(predict, train_dataset.load_image,
                                            lambda i: (train_dataset.load_gt(i), train_dataset.load_spacing(i)),
                                            val_ids, val_metrics),
          architecture=model, optimizer=optimizer, criterion=criterion, lr=CONFIG['lr']),
    save_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
])

load_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
for target_slice_spacing in CONFIG['target_slice_spacing']:
    test_dataset = ChangeSliceSpacing(dataset, new_slice_spacing=target_slice_spacing)
    commands.predict(
        ids=test_ids,
        output_path=EXPERIMENT_PATH / FOLD / f"predictions_{CONFIG['source_slice_spacing']}_to_{target_slice_spacing}",
        load_x=test_dataset.load_image,
        predict_fn=predict
    )
    commands.evaluate_individual_metrics(
        load_y_true=lambda i: (test_dataset.load_gt(i), test_dataset.load_spacing(i)),
        metrics=individual_metrics,
        predictions_path=EXPERIMENT_PATH / FOLD / f"predictions_{CONFIG['source_slice_spacing']}_to_{target_slice_spacing}",
        results_path=EXPERIMENT_PATH / FOLD / f"metrics_{CONFIG['source_slice_spacing']}_to_{target_slice_spacing}"
    )
예제 #3
0
        train(crf_train_step,
              batch_iter,
              n_epochs=CONFIG['n_epochs'],
              logger=logger,
              validate=lambda:
              compute_metrics(predict=predict,
                              load_x=lambda i:
                              (dataset.load_image(i), dataset.load_spacing(i)),
                              load_y=lambda i:
                              (dataset.load_gt(i), dataset.load_spacing(i)),
                              ids=val_ids,
                              metrics=val_metrics),
              architecture=model,
              optimizer=optimizer,
              criterion=criterion,
              lr=CONFIG['lr'],
              with_crf=with_crf),
        save_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
    ])

load_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
commands.predict(ids=test_ids,
                 output_path=EXPERIMENT_PATH / FOLD / f"predictions",
                 load_x=lambda i:
                 (dataset.load_image(i), dataset.load_spacing(i)),
                 predict_fn=predict)
commands.evaluate_individual_metrics(
    load_y_true=lambda i: (dataset.load_gt(i), dataset.load_spacing(i)),
    metrics=individual_metrics,
    predictions_path=EXPERIMENT_PATH / FOLD / f"predictions",
    results_path=EXPERIMENT_PATH / FOLD / f"metrics")