コード例 #1
0
def predict_and_save(patch_size, num_epochs, batch_size, lr, dropout, patient, num_patients_train, num_patients_val,
                     data_dirs, dataset):
    print('________________________________________________________________________________')
    print('patch size', patch_size)
    print('batch size', batch_size)
    print('learning rate', lr)
    print('dropout', dropout)
    print('patient:', patient)

    # create the name of current run
    run_name = config.get_run_name(patch_size, num_epochs, batch_size, lr, dropout, num_patients_train,
                                   num_patients_val)
    print(run_name)

    # -----------------------------------------------------------
    # LOADING MODEL, RESULTS AND WHOLE BRAIN MATRICES
    # -----------------------------------------------------------
    model_filepath = config.get_model_filepath(run_name)
    print(model_filepath)
    model = load_model(model_filepath,
                       custom_objects={'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef})

    train_metadata_filepath = config.get_train_metadata_filepath(run_name)
    with open(train_metadata_filepath, 'rb') as handle:
        train_metadata = pickle.load(handle)
    print('train setting: ', train_metadata['params'])

    print('> Loading image...')
    img_mat = helper.load_nifti_mat_from_file(
        data_dirs[dataset] + patient + '_img.nii')  # values between 0 and 255
    print('> Loading mask...')
    mask_mat = helper.load_nifti_mat_from_file(
        data_dirs[dataset] + patient + '_mask.nii')  # values 0 and 1

    # -----------------------------------------------------------
    # PREDICTION
    # -----------------------------------------------------------
    # the segmentation is going to be saved in this probability matrix
    prob_mat = np.zeros(img_mat.shape, dtype=np.float32)
    x_dim, y_dim, z_dim = prob_mat.shape

    # get the x, y and z coordinates where there is brain
    x, y, z = np.where(mask_mat)
    print('x shape:', x.shape)
    print('y shape:', y.shape)
    print('z shape:', z.shape)

    # get the z slices with brain
    z_slices = np.unique(z)

    # start cutting out and predicting the patches
    starttime_total = time.time()
    # proceed slice by slice
    for i in z_slices:
        print('Slice:', i)
        starttime_slice = time.time()
        slice_vox_inds = np.where(z == i)
        # find all x and y coordinates with brain in given slice
        x_in_slice = x[slice_vox_inds]
        y_in_slice = y[slice_vox_inds]
        # find min and max x and y coordinates
        slice_x_min = min(x_in_slice)
        slice_x_max = max(x_in_slice)
        slice_y_min = min(y_in_slice)
        slice_y_max = max(y_in_slice)

        # calculate number of predicted patches in x and y direction in given slice
        num_of_x_patches = np.int(np.ceil((slice_x_max - slice_x_min) / patch_size))
        num_of_y_patches = np.int(np.ceil((slice_y_max - slice_y_min) / patch_size))
        print('num x patches', num_of_x_patches)
        print('num y patches', num_of_y_patches)

        # predict patch by patch in given slice
        for j in range(num_of_x_patches):
            for k in range(num_of_y_patches):
                # find the starting and ending x and y coordinates of given patch
                patch_start_x = slice_x_min + patch_size * j
                patch_end_x = slice_x_min + patch_size * (j + 1)
                patch_start_y = slice_y_min + patch_size * k
                patch_end_y = slice_y_min + patch_size * (k + 1)
                # if the dimensions of the probability matrix are exceeded shift back the last patch
                if patch_end_x > x_dim:
                    patch_end_x = slice_x_max
                    patch_start_x = slice_x_max - patch_size
                if patch_end_y > y_dim:
                    patch_end_y = slice_y_max
                    patch_start_y = slice_y_max - patch_size

                # get the patch with the found coordinates from the image matrix
                img_patch = img_mat[patch_start_x: patch_end_x, patch_start_y: patch_end_y, i]

                # normalize the patch with mean and standard deviation calculated over training set
                img_patch = img_patch.astype(np.float)
                img_patch -= train_metadata['params']['mean']
                img_patch /= train_metadata['params']['std']

                # predict the patch with the model and save to probability matrix
                prob_mat[patch_start_x: patch_end_x, patch_start_y: patch_end_y, i] = np.reshape(
                    model.predict(
                        np.reshape(img_patch,
                                   (1, patch_size, patch_size, 1)), batch_size=1, verbose=0),
                    (patch_size, patch_size))

        # how long does the prediction take for one slice
        duration_slice = time.time() - starttime_slice
        print('prediction in slice took:', (duration_slice // 3600) % 60, 'hours',
              (duration_slice // 60) % 60, 'minutes',
              duration_slice % 60, 'seconds')

    # how long does the prediction take for a patient
    duration_total = time.time() - starttime_total
    print('prediction in total took:', (duration_total // 3600) % 60, 'hours',
          (duration_total // 60) % 60, 'minutes',
          duration_total % 60, 'seconds')

    # -----------------------------------------------------------
    # SAVE AS NIFTI
    # -----------------------------------------------------------
    helper.create_and_save_nifti(prob_mat, config.get_probs_filepath(run_name, patient, dataset))
コード例 #2
0
        assert num_patients == len(
            patients['working']
        ), "Check config.PATIENT_FILES. The lists with names do not have the same length."

    for i in range(num_patients):
        print('DATA SET: ', dataset)
        if len(patients['working_augmented']) != 0:
            print(patients['original'][i], patients['working'][i],
                  patients['working_augmented'][i])
        else:
            print(patients['original'][i], patients['working'][i])

        # load image, mask and label stacks as matrices
        print('Loading image...')
        img_mat = helper.load_nifti_mat_from_file(original_data_dir +
                                                  patients['original'][i] +
                                                  '/' + img_filename)
        print('Loading mask...')
        mask_mat = helper.load_nifti_mat_from_file(original_data_dir +
                                                   patients['original'][i] +
                                                   '/' + mask_filename)
        print('Loading label...')
        label_mat = helper.load_nifti_mat_from_file(original_data_dir +
                                                    patients['original'][i] +
                                                    '/' + label_filename)
        if plot:  # visualize some slices
            helper.plot_some_images([img_mat, mask_mat, label_mat], 1, 80, 100,
                                    patients['original'][i],
                                    nr_rotations_for_plotting, 60)

        # check the dimensions
    ['working_augmented'])  # number of patients in validation category
################################################

data_dirs = config.MODEL_DATA_DIRS
run_name = config.get_run_name(patch_size, num_epochs, batch_size,
                               learning_rate, dropout, num_patients_train,
                               num_patients_val)
print(run_name)
print('Patient:', patient)
print()

# -----------------------------------------------------------
# LOADING RESULTS
# -----------------------------------------------------------
print('> Loading image ...')
img_mat = helper.load_nifti_mat_from_file(data_dirs[dataset] + patient +
                                          '_img.nii')
print('> Loading label...')
label_mat = helper.load_nifti_mat_from_file(data_dirs[dataset] + patient +
                                            '_label.nii')
print('> Loading probability map...')
prob_mat = helper.load_nifti_mat_from_file(
    config.get_probs_filepath(run_name, patient, dataset))
pred_class = (prob_mat > threshold).astype(
    np.uint8)  # convert from boolean to int

converted_pred_class = pred_class.copy().astype(int)
converted_pred_class[converted_pred_class == 1] = -1
converted_pred_class[converted_pred_class == 0] = 1
error_map = label_mat - converted_pred_class

print()
コード例 #4
0
def measure_performance_and_save_to_csv(patch_size, num_epochs, batch_size, lr,
                                        dropout, threshold, num_patients_train,
                                        num_patients_val,
                                        patients_segmentation, data_dirs,
                                        dataset, result_file):
    print(
        '________________________________________________________________________________'
    )
    print('patch size', patch_size)
    print('batch size', batch_size)
    print('learning rate', lr)
    print('dropout', dropout)
    print('threshold', threshold)

    start_row = time.time()

    # create the name of current run
    run_name = config.get_run_name(patch_size, num_epochs, batch_size, lr,
                                   dropout, num_patients_train,
                                   num_patients_val)
    print(run_name)

    # -----------------------------------------------------------
    # TRAINING RESULTS
    # -----------------------------------------------------------
    train_metadata_filepath = config.get_train_metadata_filepath(run_name)
    with open(train_metadata_filepath, 'rb') as handle:
        train_metadata = pickle.load(handle)

    print('Train params:')
    print(train_metadata['params'])
    print('Train performance:')
    tr_perf = train_metadata['performance']
    print(tr_perf)

    row = [
        patch_size, num_epochs, batch_size, lr, dropout,
        tr_perf['train_true_positives'], tr_perf['train_false_negatives'],
        tr_perf['train_false_positives'], tr_perf['train_true_negatives'],
        tr_perf['train_auc'], tr_perf['train_acc'], tr_perf['train_avg_acc'],
        tr_perf['train_dice'], tr_perf['val_true_positives'],
        tr_perf['val_false_negatives'], tr_perf['val_false_positives'],
        tr_perf['val_true_negatives'], tr_perf['val_auc'], tr_perf['val_acc'],
        tr_perf['val_avg_acc'], tr_perf['val_dice']
    ]

    # -----------------------------------------------------------
    # VALIDATION / TEST RESULTS
    # -----------------------------------------------------------
    tp_list = []
    fn_list = []
    fp_list = []
    tn_list = []
    auc_list = []
    acc_list = []
    avg_acc_list = []
    dice_list = []

    for patient in patients_segmentation:
        print(patient)
        print('> Loading label...')
        label_mat = helper.load_nifti_mat_from_file(
            data_dirs[dataset] + patient + '_label.nii')  # values 0 or 1
        print('> Loading probability map...')
        prob_mat = helper.load_nifti_mat_from_file(
            config.get_probs_filepath(run_name, patient,
                                      dataset))  # values between 0 and 1
        pred_class = (prob_mat > threshold).astype(
            np.uint8)  # convert from boolean to int, values 0 or 1

        print()
        print('Computing performance measures...')
        label_mat_f = np.asarray(label_mat).flatten()
        prob_mat_f = np.asarray(prob_mat).flatten()
        pred_classes_f = np.asarray(pred_class).flatten()
        val_auc = roc_auc_score(label_mat_f, prob_mat_f)
        val_acc = accuracy_score(label_mat_f, pred_classes_f)
        val_avg_acc, val_tn, val_fp, val_fn, val_tp = avg_class_acc(
            label_mat_f, pred_classes_f)
        val_dice = f1_score(label_mat_f, pred_classes_f)

        tp_list.append(val_tp)
        fn_list.append(val_fn)
        fp_list.append(val_fp)
        tn_list.append(val_tn)
        auc_list.append(val_auc)
        acc_list.append(val_acc)
        avg_acc_list.append(val_avg_acc)
        dice_list.append(val_dice)

    row = row + [
        np.mean(tp_list),
        np.mean(fn_list),
        np.mean(fp_list),
        np.mean(tn_list),
        np.mean(auc_list),
        np.mean(acc_list),
        np.mean(avg_acc_list),
        np.mean(dice_list)
    ]
    print('Complete row:', row)

    print('Writing to csv...')
    with open(result_file, 'a') as f:
        writer = csv.writer(f)
        writer.writerow(row)

    duration_row = int(time.time() - start_row)
    print('performance assessment took:', (duration_row // 3600) % 60, 'hours',
          (duration_row // 60) % 60, 'minutes', duration_row % 60, 'seconds')
コード例 #5
0
    else:
        assert len(patients['original']) == len(
            patients['working']
        ), "Check config.PATIENT_FILES. The lists with names do not have the same length."

    all_patients = patients['working'] + patients['working_augmented']
    print('All patients:', all_patients)

    for patient in all_patients:
        print('DATA SET: ', dataset)
        print('PATIENT: ', patient)

        # load image and label stacks as matrices
        print('> Loading image...')
        img_mat = helper.load_nifti_mat_from_file(
            data_dirs[dataset] + patient +
            '_img.nii')  # values between 0 and 255
        print('> Loading mask...')
        mask_mat = helper.load_nifti_mat_from_file(
            data_dirs[dataset] + patient + '_mask.nii')  # values 0 or 1
        print('> Loading label...')
        label_mat = helper.load_nifti_mat_from_file(
            data_dirs[dataset] + patient + '_label.nii')  # values 0 or 1

        current_nr_extracted_patches = 0  # counts already extracted patches
        img_patches = {}  # dictionary to save image patches
        label_patches = {}  # dictionary to save label patches
        # make lists in dictionaries for each extracted patch size
        for size in patch_sizes:
            img_patches[str(size)] = []
            label_patches[str(size)] = []