def testing():

    c = get_config()

    c.do_load_checkpoint = True
    #c.checkpoint_dir = c.base_dir + '/20190424-020641_unet_experiment' + '/checkpoint/checkpoint_current' # dice_cost train
    # c.checkpoint_dir = c.base_dir + '/20190424-234657_unet_experiment' + '/checkpoint/checkpoint_last' # SDG
    c.checkpoint_dir = c.base_dir + '/20190906-085449_unet_experiment' + '/checkpoint/checkpoint_current'
    # c.checkpoint_file = "checkpoint_last.pth.tar"
    # c.cross_vali_index = valiIndex


    cross_vali_result_all_dir = os.path.join(c.base_dir, c.dataset_name
                                             + '_' + str(
        c.batch_size) + c.cross_vali_result_all_dir + datetime.datetime.now().strftime("_%Y%m%d-%H%M%S"))
    if not os.path.exists(cross_vali_result_all_dir):
        os.makedirs(cross_vali_result_all_dir)
        print('Created' + cross_vali_result_all_dir + '...')
        c.base_dir = cross_vali_result_all_dir
        c.cross_vali_result_all_dir = os.path.join(cross_vali_result_all_dir, "results")
        os.makedirs(c.cross_vali_result_all_dir)


    exp = UNetExperiment(config=c, name='unet_test', n_epochs=c.n_epochs,
                               seed=42, globs=globals())
    exp.run_test(setup=True)
def training():

    c = get_config()

    dataset_name = 'Task04_Hippocampus'
    # dataset_name = 'Task02_Heart'
    # download_dataset(dest_path=c.data_root_dir, dataset=dataset_name, id=c.google_drive_id)
    # c.do_load_checkpoint = True
    # c.checkpoint_dir = c.base_dir + '/20190801-_unet_experiment' + '/checkpoint/checkpoint_current'
    # c.checkpoint_file = "checkpoint_last.pth.tar"

    if not exists(os.path.join(os.path.join(c.data_root_dir, dataset_name), 'preprocessed')):
        print('Preprocessing data. [STARTED]')
        preprocess_data(root_dir=os.path.join(c.data_root_dir, dataset_name))
        create_splits(output_dir=c.split_dir, image_dir=c.data_dir)
        print('Preprocessing data. [DONE]')
    else:
        print('The data has already been preprocessed. It will not be preprocessed again. Delete the folder to enforce it.')


    exp = UNetExperiment(config=c, name='unet_experiment', n_epochs=c.n_epochs,
                        seed=42, append_rnd_to_name=c.append_rnd_string)   # visdomlogger_kwargs={"auto_start": c.start_visdom}

    exp.run()
    exp.run_test(setup=False)
Пример #3
0
    c = get_config()

    #download_dataset(dest_path=c.data_root_dir, dataset=c.dataset_name, id=c.google_drive_id)

    if not exists(
            os.path.join(os.path.join(c.data_root_dir, c.dataset_name),
                         'preprocessed')):
        print('Preprocessing data. [STARTED]')
        #preprocess_data(root_dir=os.path.join(c.data_root_dir, c.dataset_name), y_shape=c.patch_size, z_shape=c.patch_size)
        #create_splits(output_dir=c.split_dir, image_dir=c.data_dir)
        print('Preprocessing data. [DONE]')
    else:
        print(
            'The data has already been preprocessed. It will not be preprocessed again. Delete the folder to enforce it.'
        )

    exp = UNetExperiment(
        config=c,
        name=c.name,
        n_epochs=c.n_epochs,
        seed=42,
        append_rnd_to_name=c.append_rnd_string,
        globs=globals(),
        # visdomlogger_kwargs={"auto_start": c.start_visdom},
        loggers={"visdom": ("visdom", {
            "auto_start": c.start_visdom
        })})

    exp.run()
    exp.run_test(setup=False)
Пример #4
0
class LabelTensorToColor(object):
   def __call__(self, label):
       label = label.squeeze()
       colored_label = torch.zeros(3, label.size(0), label.size(1)).byte()
       for i, color in enumerate(class_color):
           mask = label.eq(i)
           for j in range(3):
               colored_label[j].masked_fill_(mask, color[j])

       return colored_label

color_class_converter = LabelTensorToColor()

# Load data
c = get_config()
exp = UNetExperiment(config=c, name=c.name, n_epochs=c.n_epochs,
                         seed=42, append_rnd_to_name=c.append_rnd_string, globs=globals())
exp.setup()
exp.test_data_loader.do_reshuffle = True

# Load checkpoint
checkpoint = torch.load('./output_experiment/20190604-154315_fine_tune_spleen_for_heart/checkpoint/checkpoint_last.pth.tar')
exp.model.load_state_dict(checkpoint['model'])

exp.model.eval()

batch_counter = 0
with torch.no_grad():
    for data_batch in exp.test_data_loader:
        # Get data_batches
        mr_data = data_batch['data'][0].float().to(exp.device)
        mr_target = data_batch['seg'][0].float().to(exp.device)
Пример #5
0
    # the array with indices of training volumes to be used for training, validation
    # and testing respectively.
    # <YOUR CODE GOES HERE>
    #__solution
    # We are splitting 60:20:20 for train: val and test for the dataset
    split = {
        'train': keys[0:int(max(keys) * 0.6)],
        'val': keys[int(max(keys) * 0.6):int(max(keys) * 0.8)],
        'test': keys[int(max(keys) * 0.8):max(keys)]
    }

    # Set up and run experiment

    # TASK: Class UNetExperiment has missing pieces. Go to the file and fill them in
    #__solution in UNetExperiment
    exp = UNetExperiment(c, split, data)

    # You could free up memory by deleting the dataset
    # as it has been copied into loaders
    # del dataset

    # run training
    exp.run()

    # prep and run testing

    # TASK: Test method is not complete. Go to the method and complete it
    results_json = exp.run_test()

    results_json["config"] = vars(c)
        )
        exit()

    shapesize = 64
    if len(sys.argv) > 3:
        shapesize = int(sys.argv[3])

    # Get the header in order to preserve voxel dimensions to store the segmented image later on
    print('Preprocessing data.')
    data, header = preprocess_single_file(filename,
                                          y_shape=shapesize,
                                          z_shape=shapesize)

    print('Setting up model and start segmentation.')
    exp = UNetExperiment(config=c,
                         name=c.name,
                         n_epochs=c.n_epochs,
                         seed=42,
                         append_rnd_to_name=c.append_rnd_string,
                         globs=globals())

    result = exp.segment_single_image(data)

    print('Postprocessing data.')
    result = postprocess_single_image(result)

    pathname, fname = os.path.split(filename)
    destination_filename = pathname + "/segmented_" + fname
    print('Saving file to disk: {}'.format(destination_filename))
    save_single_image(result, header, destination_filename)
Пример #7
0
if __name__ == "__main__":
    c = get_config()

    dataset_name = 'Task04_Hippocampus'
    # dataset_name = 'Task01_BrainTumour'
    download_dataset(dest_path=c.data_root_dir,
                     dataset=dataset_name,
                     id=c.google_drive_id)

    if not exists(
            os.path.join(os.path.join(c.data_root_dir, dataset_name),
                         'preprocessed')):
        print('Preprocessing data. [STARTED]')
        preprocess_data(root_dir=os.path.join(c.data_root_dir, dataset_name))
        create_splits(output_dir=c.split_dir, image_dir=c.data_dir)
        print('Preprocessing data. [DONE]')
    else:
        print(
            'The data has already been preprocessed. It will not be preprocessed again. Delete the folder to enforce it.'
        )

    exp = UNetExperiment(config=c,
                         name='unet_experiment',
                         n_epochs=c.n_epochs,
                         seed=42,
                         append_rnd_to_name=c.append_rnd_string,
                         visdomlogger_kwargs={"auto_start": c.start_visdom})

    exp.run()
    exp.run_test(setup=False)
Пример #8
0
    # a k-fold training and combining the results.
    # lets split our dataset as 60, 20, 20
    train, validate, test = np.split(
        all_idx, [int(.6 * len(all_idx)),
                  int(.8 * len(all_idx))])

    split = dict()

    # create three keys in the dictionary: "train", "val" and "test". In each key, store
    # the array with indices of training volumes to be used for training, validation
    # and testing respectively.
    split = {"train": train, "val": validate, "test": test}

    # Set up and run experiment

    exp = UNetExperiment(c, split, data)

    # You could free up memory by deleting the dataset
    # as it has been copied into loaders
    # del dataset

    # summary
    exp.print_summary()

    # run training
    #exp.run()
    exp.load_model_parameters(path='..\\out\\model.pth')

    # prep and run testing

    results_json = exp.run_test()
Пример #9
0
# limitations under the License.

import matplotlib
matplotlib.use('Agg')

from configs.Config_unet import get_config
from experiments.UNetExperiment import UNetExperiment

if __name__ == "__main__":
    c = get_config()

    exp = UNetExperiment(
        config=c,
        name=c.name,
        n_epochs=c.n_epochs,
        seed=42,
        append_rnd_to_name=c.append_rnd_string,
        globs=globals(),
        # visdomlogger_kwargs={"auto_start": c.start_visdom},
        loggers={
            "visdom": ("visdom", {
                "auto_start": c.start_visdom
            }),
            # "tb": ("tensorboard"),
            # "slack": ("slack", {"token": "XXXXXXXX",
            #                     "user_email": "x"})
        })

    exp.run()
    exp.run_test(setup=False)
Пример #10
0
def run_on_dataset():

    #models = os.listdir('./models/')
    models = glob('./output_experiment/*/checkpoint/checkpoint_last.pth.tar')

    metric_list = [
        "Dice", "Hausdorff Distance", "Hausdorff Distance 95",
        "Avg. Symmetric Surface Distance", "Avg. Surface Distance"
    ]

    summary = pd.DataFrame(columns=['modelname'] + metric_list)
    summary_mean = pd.DataFrame(columns=['modelname'] + metric_list)

    for model_name in models:
        # Load data
        print('starting with ' + model_name)
        exp = UNetExperiment(config=c,
                             name=c.name,
                             n_epochs=c.n_epochs,
                             seed=42,
                             append_rnd_to_name=c.append_rnd_string,
                             globs=globals())
        exp.setup()
        exp.test_data_loader.do_reshuffle = False

        # Load checkpoint
        checkpoint = torch.load(model_name)
        exp.model.load_state_dict(checkpoint['model'])

        # exp.model.eval() # done in UNetExperiment

        exp.run_test(setup=False)

        #TODO get metrics
        # select interesting ones, add to pandas dataframe

        json_scores = exp.scores

        result_dict = json_scores[
            "results"]  #{"all": results, "mean": results_mean}

        im_scores = result_dict['all']

        model_res = {'modelname': model_name}

        print('adding results to summary..')
        for nr, im in enumerate(im_scores):
            for cl in range(c.num_classes):
                for m in metric_list:
                    model_res['class_' + str(cl) + '_imagenr_' + str(nr) +
                              '_' + m] = im[str(cl)][m]

        summary = summary.append(model_res, ignore_index=True)

        im_mean_scores = result_dict['mean']
        model_res_mean = {'modelname': model_name}

        for cl in range(c.num_classes):
            for m in metric_list:
                model_res_mean['class_' + str(cl) + '_' +
                               m] = im_mean_scores[str(cl)][m]

        summary_mean = summary_mean.append(model_res_mean, ignore_index=True)

        summary.to_csv('./output_experiment/summary_ims' + c.dataset_name +
                       '.csv')
        summary_mean.to_csv('./output_experiment/summary_mean_' +
                            c.dataset_name + '.csv')
        print('..summary updated')