def load_model_state_cv3_wise(architecture, baseline_exp_path):
    val_path = os.path.abspath('.')
    exp = val_path.split('/')[-1]
    n_val = int(exp.split('_')[-1])

    n_fold = n_val // 15
    n_cv_block = n_val % 3

    path_to_pretrained_model = os.path.join(
        baseline_exp_path, f'experiment_{n_fold * 3 + n_cv_block}',
        'model.pth')
    load_model_state(architecture, path=path_to_pretrained_model)
def load_model_state_fold_wise(architecture,
                               baseline_exp_path,
                               n_folds=6,
                               modify_state_fn=None,
                               n_first_exclude=0):
    val_path = os.path.abspath('.')
    exp = val_path.split('/')[-1]
    n_val = int(exp.split('_')[-1]) + n_first_exclude
    path_to_pretrained_model = os.path.join(
        baseline_exp_path, f'experiment_{n_val // (n_folds - 1)}', 'model.pth')
    load_model_state(architecture,
                     path=path_to_pretrained_model,
                     modify_state_fn=modify_state_fn)
Example #3
0
def denoise_on_test(model,
                    data,
                    shapes,
                    length,
                    path,
                    result_path,
                    names=None):
    '''
    :param model: PyTorch Neural Network model
    :param data: Loaded numpy data
    :param shapes: Shapes to convert back cyclic data
    :param length: The length to convert back cyclic data
    :param path: Model's weights path
    :param result_path: Experiment path to save denoised input
    :param names: If None each entry save with name 'i.npy', in other way name for each entry can be provided
    :return: None
    '''
    # load_best_model
    model = load_model_state(model, path)

    result = [to_np(model(to_var(d, is_on_cuda(model))[None])) for d in data]
    output = postprocessing(result, shapes, length)

    result_path = Path(result_path)

    if names is not None:
        for i, n in enumerate(names):
            s = (result_path / n)
            s.mkdir(exist_ok=True)
            np.save(s, np.squeeze(output[i]).T)
    else:
        for i, o in enumerate(output):
            name = str(i) + '.npy'
            np.save(result_path / name, np.squeeze(o).T)
Example #4
0
def evaluate_on_test(model, data, labels, path, result_path):
    '''
    :param model: PyTorch Neural Network model
    :param data: Loaded numpy data
    :param labels: Loaded categorical target
    :param path: Path to model weights
    :param result_path: Experiment path to save accuracy on test
    :return: Accuracy score on given data
    '''
    # load_best_model
    model = load_model_state(model, path)
    score = evaluate(model, tqdm(data), labels)
    dump_json(score, Path(result_path) / 'test_accuracy.json')
    return score
Example #5
0
val_metrics = convert_to_aggregated(individual_metrics)


# run experiment
logger = TBLogger(EXPERIMENT_PATH / FOLD / 'logs')
commands.populate(EXPERIMENT_PATH / 'config.json', save_json, CONFIG, EXPERIMENT_PATH / 'config.json')
commands.populate(EXPERIMENT_PATH / FOLD / 'model.pth', lambda : [
    train(train_step, batch_iter, n_epochs=CONFIG['n_epochs'], logger=logger,
          validate=lambda : compute_metrics(predict, train_dataset.load_image,
                                            lambda i: (train_dataset.load_gt(i), train_dataset.load_spacing(i)),
                                            val_ids, val_metrics),
          architecture=model, optimizer=optimizer, criterion=criterion, lr=CONFIG['lr']),
    save_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
])

load_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')
for target_slice_spacing in CONFIG['target_slice_spacing']:
    test_dataset = ChangeSliceSpacing(dataset, new_slice_spacing=target_slice_spacing)
    commands.predict(
        ids=test_ids,
        output_path=EXPERIMENT_PATH / FOLD / f"predictions_{CONFIG['source_slice_spacing']}_to_{target_slice_spacing}",
        load_x=test_dataset.load_image,
        predict_fn=predict
    )
    commands.evaluate_individual_metrics(
        load_y_true=lambda i: (test_dataset.load_gt(i), test_dataset.load_spacing(i)),
        metrics=individual_metrics,
        predictions_path=EXPERIMENT_PATH / FOLD / f"predictions_{CONFIG['source_slice_spacing']}_to_{target_slice_spacing}",
        results_path=EXPERIMENT_PATH / FOLD / f"metrics_{CONFIG['source_slice_spacing']}_to_{target_slice_spacing}"
    )
    x = to_var(x, args.device)
    prediction = model(x[..., 0])
    prediction[:, 1] = torch.sigmoid(prediction[:, 1])
    return to_np(prediction)[..., None]


input_path = Path(args.input)
output_path = Path(args.output)

if input_path.is_dir():
    output_path.mkdir(parents=True, exist_ok=True)
    assert output_path.is_dir()

    files = [[output_path / f'{i}.json', image] for i, image in gather_nifty(input_path)]
else:
    files = [[output_path, input_path]]

bar = tqdm(files)
model = to_device(load_model_state(Network(), args.model), args.device)

for output_file, input_image in bar:
    bar.set_description(input_image.name)

    contours = predict(nibabel.load(str(input_image)))
    # filter out `undefined` points
    contours = [
        [[y, x] for y, x in enumerate(contour) if not np.isnan(x)] for contour in contours
    ]

    save_json(contours, output_file)