Esempio n. 1
0
# run experiment
logger = TBLogger(EXPERIMENT_PATH / FOLD / 'logs')
commands.populate(EXPERIMENT_PATH / 'config.json', save_json, CONFIG,
                  EXPERIMENT_PATH / 'config.json')
commands.populate(
    EXPERIMENT_PATH / FOLD / 'model.pth', lambda: [
        train(crf_train_step,
              batch_iter,
              n_epochs=CONFIG['n_epochs'],
              logger=logger,
              validate=lambda:
              compute_metrics(predict=predict,
                              load_x=lambda i:
                              (dataset.load_image(i), dataset.load_spacing(i)),
                              load_y=lambda i:
                              (dataset.load_gt(i), dataset.load_spacing(i)),
                              ids=val_ids,
                              metrics=val_metrics),
              architecture=model,
              optimizer=optimizer,
              criterion=criterion,
              lr=CONFIG['lr'],
              with_crf=with_crf),
        save_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
    ])

load_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
commands.predict(ids=test_ids,
                 output_path=EXPERIMENT_PATH / FOLD / f"predictions",
                 load_x=lambda i:
Esempio n. 2
0
# metrics
individual_metrics = {
    **{f'dice_{c}': multiclass_to_bool(drop_spacing(dice_score), c) for c in range(1, dataset.n_classes)},
    **{f'surface_dice_{c}': multiclass_to_bool(surface_dice, c) for c in range(1, dataset.n_classes)}
}
val_metrics = convert_to_aggregated(individual_metrics)


# run experiment
logger = TBLogger(EXPERIMENT_PATH / FOLD / 'logs')
commands.populate(EXPERIMENT_PATH / 'config.json', save_json, CONFIG, EXPERIMENT_PATH / 'config.json')
commands.populate(EXPERIMENT_PATH / FOLD / 'model.pth', lambda : [
    train(train_step, batch_iter, n_epochs=CONFIG['n_epochs'], logger=logger,
          validate=lambda : compute_metrics(predict, train_dataset.load_image,
                                            lambda i: (train_dataset.load_gt(i), train_dataset.load_spacing(i)),
                                            val_ids, val_metrics),
          architecture=model, optimizer=optimizer, criterion=criterion, lr=CONFIG['lr']),
    save_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
])

load_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
for target_slice_spacing in CONFIG['target_slice_spacing']:
    test_dataset = ChangeSliceSpacing(dataset, new_slice_spacing=target_slice_spacing)
    commands.predict(
        ids=test_ids,
        output_path=EXPERIMENT_PATH / FOLD / f"predictions_{CONFIG['source_slice_spacing']}_to_{target_slice_spacing}",
        load_x=test_dataset.load_image,
        predict_fn=predict
    )
    commands.evaluate_individual_metrics(
Esempio n. 3
0
# for more details about the batch iterators we use

batch_iter = Infinite(
    # get a random pair of paths
    sample(paths),
    # load the image-contour pair
    unpack_args(load_pair),
    # get a random slice
    unpack_args(get_random_slice),
    # simple augmentation
    unpack_args(random_flip),
    batch_size=batch_size,
    batches_per_epoch=samples_per_train // (batch_size * n_epochs),
    combiner=combiner)

model = to_device(Network(), args.device)
optimizer = Adam(model.parameters(), lr=lr)

# Here we use a general training function with a custom `train_step`.
# See the tutorial for more details: https://deep-pipe.readthedocs.io/en/latest/tutorials/training.html

train(
    train_step,
    batch_iter,
    n_epochs=n_epochs,
    logger=ConsoleLogger(),
    # additional arguments to `train_step`
    model=model,
    optimizer=optimizer)
save_model_state(model, args.output)