def training():

    c = get_config()

    dataset_name = 'Task04_Hippocampus'
    # dataset_name = 'Task02_Heart'
    # download_dataset(dest_path=c.data_root_dir, dataset=dataset_name, id=c.google_drive_id)
    # c.do_load_checkpoint = True
    # c.checkpoint_dir = c.base_dir + '/20190801-_unet_experiment' + '/checkpoint/checkpoint_current'
    # c.checkpoint_file = "checkpoint_last.pth.tar"

    if not exists(os.path.join(os.path.join(c.data_root_dir, dataset_name), 'preprocessed')):
        print('Preprocessing data. [STARTED]')
        preprocess_data(root_dir=os.path.join(c.data_root_dir, dataset_name))
        create_splits(output_dir=c.split_dir, image_dir=c.data_dir)
        print('Preprocessing data. [DONE]')
    else:
        print('The data has already been preprocessed. It will not be preprocessed again. Delete the folder to enforce it.')


    exp = UNetExperiment(config=c, name='unet_experiment', n_epochs=c.n_epochs,
                        seed=42, append_rnd_to_name=c.append_rnd_string)   # visdomlogger_kwargs={"auto_start": c.start_visdom}

    exp.run()
    exp.run_test(setup=False)
def testing():

    c = get_config()

    c.do_load_checkpoint = True
    #c.checkpoint_dir = c.base_dir + '/20190424-020641_unet_experiment' + '/checkpoint/checkpoint_current' # dice_cost train
    # c.checkpoint_dir = c.base_dir + '/20190424-234657_unet_experiment' + '/checkpoint/checkpoint_last' # SDG
    c.checkpoint_dir = c.base_dir + '/20190906-085449_unet_experiment' + '/checkpoint/checkpoint_current'
    # c.checkpoint_file = "checkpoint_last.pth.tar"
    # c.cross_vali_index = valiIndex


    cross_vali_result_all_dir = os.path.join(c.base_dir, c.dataset_name
                                             + '_' + str(
        c.batch_size) + c.cross_vali_result_all_dir + datetime.datetime.now().strftime("_%Y%m%d-%H%M%S"))
    if not os.path.exists(cross_vali_result_all_dir):
        os.makedirs(cross_vali_result_all_dir)
        print('Created' + cross_vali_result_all_dir + '...')
        c.base_dir = cross_vali_result_all_dir
        c.cross_vali_result_all_dir = os.path.join(cross_vali_result_all_dir, "results")
        os.makedirs(c.cross_vali_result_all_dir)


    exp = UNetExperiment(config=c, name='unet_test', n_epochs=c.n_epochs,
                               seed=42, globs=globals())
    exp.run_test(setup=True)
示例#3
0
    c = get_config()

    #download_dataset(dest_path=c.data_root_dir, dataset=c.dataset_name, id=c.google_drive_id)

    if not exists(
            os.path.join(os.path.join(c.data_root_dir, c.dataset_name),
                         'preprocessed')):
        print('Preprocessing data. [STARTED]')
        #preprocess_data(root_dir=os.path.join(c.data_root_dir, c.dataset_name), y_shape=c.patch_size, z_shape=c.patch_size)
        #create_splits(output_dir=c.split_dir, image_dir=c.data_dir)
        print('Preprocessing data. [DONE]')
    else:
        print(
            'The data has already been preprocessed. It will not be preprocessed again. Delete the folder to enforce it.'
        )

    exp = UNetExperiment(
        config=c,
        name=c.name,
        n_epochs=c.n_epochs,
        seed=42,
        append_rnd_to_name=c.append_rnd_string,
        globs=globals(),
        # visdomlogger_kwargs={"auto_start": c.start_visdom},
        loggers={"visdom": ("visdom", {
            "auto_start": c.start_visdom
        })})

    exp.run()
    exp.run_test(setup=False)
示例#4
0
    #__solution
    # We are splitting 60:20:20 for train: val and test for the dataset
    split = {
        'train': keys[0:int(max(keys) * 0.6)],
        'val': keys[int(max(keys) * 0.6):int(max(keys) * 0.8)],
        'test': keys[int(max(keys) * 0.8):max(keys)]
    }

    # Set up and run experiment

    # TASK: Class UNetExperiment has missing pieces. Go to the file and fill them in
    #__solution in UNetExperiment
    exp = UNetExperiment(c, split, data)

    # You could free up memory by deleting the dataset
    # as it has been copied into loaders
    # del dataset

    # run training
    exp.run()

    # prep and run testing

    # TASK: Test method is not complete. Go to the method and complete it
    results_json = exp.run_test()

    results_json["config"] = vars(c)

    with open(os.path.join(exp.out_dir, "results.json"), 'w') as out_file:
        json.dump(results_json, out_file, indent=2, separators=(',', ': '))
示例#5
0
def run_on_dataset():

    #models = os.listdir('./models/')
    models = glob('./output_experiment/*/checkpoint/checkpoint_last.pth.tar')

    metric_list = [
        "Dice", "Hausdorff Distance", "Hausdorff Distance 95",
        "Avg. Symmetric Surface Distance", "Avg. Surface Distance"
    ]

    summary = pd.DataFrame(columns=['modelname'] + metric_list)
    summary_mean = pd.DataFrame(columns=['modelname'] + metric_list)

    for model_name in models:
        # Load data
        print('starting with ' + model_name)
        exp = UNetExperiment(config=c,
                             name=c.name,
                             n_epochs=c.n_epochs,
                             seed=42,
                             append_rnd_to_name=c.append_rnd_string,
                             globs=globals())
        exp.setup()
        exp.test_data_loader.do_reshuffle = False

        # Load checkpoint
        checkpoint = torch.load(model_name)
        exp.model.load_state_dict(checkpoint['model'])

        # exp.model.eval() # done in UNetExperiment

        exp.run_test(setup=False)

        #TODO get metrics
        # select interesting ones, add to pandas dataframe

        json_scores = exp.scores

        result_dict = json_scores[
            "results"]  #{"all": results, "mean": results_mean}

        im_scores = result_dict['all']

        model_res = {'modelname': model_name}

        print('adding results to summary..')
        for nr, im in enumerate(im_scores):
            for cl in range(c.num_classes):
                for m in metric_list:
                    model_res['class_' + str(cl) + '_imagenr_' + str(nr) +
                              '_' + m] = im[str(cl)][m]

        summary = summary.append(model_res, ignore_index=True)

        im_mean_scores = result_dict['mean']
        model_res_mean = {'modelname': model_name}

        for cl in range(c.num_classes):
            for m in metric_list:
                model_res_mean['class_' + str(cl) + '_' +
                               m] = im_mean_scores[str(cl)][m]

        summary_mean = summary_mean.append(model_res_mean, ignore_index=True)

        summary.to_csv('./output_experiment/summary_ims' + c.dataset_name +
                       '.csv')
        summary_mean.to_csv('./output_experiment/summary_mean_' +
                            c.dataset_name + '.csv')
        print('..summary updated')