示例#1
0
def tile_model_reconstruction_comparison(data, reconstruction_data, time_step, z_slice, threshold):

    rows = 4
    cols = 3
    tiling = np.zeros((rows * data.shape[1], cols * data.shape[2]), dtype = data.dtype)

    rmses = []

    for j in range(cols):
        img = data[z_slice,:,:,time_step,j+1]
        reconstruction = reconstruction_data[z_slice,:,:,time_step,j+1]
        difference = np.abs(img-reconstruction)


        # Input
        i = 0
        tiling[
            i*data.shape[1]:(i+1)*data.shape[1],
            j*data.shape[2]:(j+1)*data.shape[2]] = img

        # Reconstruction
        i = 1
        tiling[
            i*data.shape[1]:(i+1)*data.shape[1],
            j*data.shape[2]:(j+1)*data.shape[2]] = reconstruction

        # Difference
        i = 2
        tiling[
            i*data.shape[1]:(i+1)*data.shape[1],
            j*data.shape[2]:(j+1)*data.shape[2]] = difference

        # Thresholded difference
        difference[difference <= threshold] = 0.
        i = 3
        tiling[
            i*data.shape[1]:(i+1)*data.shape[1],
            j*data.shape[2]:(j+1)*data.shape[2]] = difference

        error = rmse(img, reconstruction)
        rmses.append(error)

    difference = np.abs(data[z_slice,:,:,time_step,:] - reconstruction_data[z_slice,:,:,time_step,:])
    difference[difference <= threshold] = 0.

    return tiling, difference, rmses
示例#2
0
    # For the selected subject index, grab the full data for that one subject: We only evaluate one data which has information from 0:64 only  
    subject_sliced = images_vl_sl[0:64,:,:,:,:]

    # Initialize the while loop variables
    start_idx = 0
    end_idx = config["batch_size"]
    subject_rmse = []

    while end_idx <= config["spatial_size_z"]:
        # Selected slices for batch and run through the model
        feed_dict = {model.image_matrix: subject_sliced[start_idx:end_idx]}
        out_mu = model.sess.run(model.decoder_output, feed_dict)

        # Compute rmse of these slices and append it to the subject error
        error = rmse(subject_sliced[start_idx:end_idx], out_mu)
        subject_rmse.append(error)

        # Visualization
        if config["visualization_mode"] == 'all':
            out_path = os.path.join(project_code_root,'Results/Evaluation/' + model_name + '/validation_segmentedFlowMRI/' + 'Subject_' + str(subject_index) + '_' + str(start_idx) + '_' + str(end_idx) + '.png')
            plot_batch_3d_complete(subject_sliced[start_idx:end_idx], out_mu, every_x_time_step=1, out_path= out_path)

        # Save it to the hdf5 file
        #If all the subjects are saved into same hdf5 file, then change hdf5 path description and bring the multiplication back.
        #Not sure if I need to multiple with subject_index here as we are only dealing with one data. So I remove the multiplication for the moment. 
        print("out_mu shape: ") 
        print(out_mu.shape)
        dataset['reconstruction'][start_idx:end_idx, :, :, :, :] = out_mu
        # dataset['reconstruction'][start_idx:end_idx, :, :, :, 0:3] = out_mu
        # dataset['reconstruction'][start_idx:end_idx, :, :, :, 4] = segmentation_prob? 
                # Selected slices for batch and run through the model
                # digitize and subtract 1 to get classes starting from 0 (digitize starts counting at 1 for the first bin)
                bins = np.linspace(
                    0, 64,
                    9)  # 64 slices, 8 classes + 1 since outer limit excluded
                batch_slice_info = np.digitize(range(start_idx, end_idx),
                                               bins) - 1

                feed_dict = {
                    model.image_matrix: subject_sliced[start_idx:end_idx],
                    model.batch_slice_info: batch_slice_info
                }
                out_mu = model.sess.run(model.decoder_output, feed_dict)

                # Compute rmse of these slices and append it to the subject error
                error = rmse(subject_sliced[start_idx:end_idx], out_mu)
                subject_rmse.append(error)

                # Visualization
                if config["visualization_mode"] == 'all':
                    out_path = os.path.join(
                        project_code_root, 'Results/Evaluation/' + model_name +
                        '/' + which_dataset + '/' + 'Subject_' + str(i) + '_' +
                        str(start_idx) + '_' + str(end_idx) + '.png')
                    plot_batch_3d_complete(subject_sliced[start_idx:end_idx],
                                           out_mu,
                                           every_x_time_step=1,
                                           out_path=out_path)

                # Save it to the hdf5 file
                dataset['reconstruction'][