def get_actiwave_ecg_data(subject, hdf5_file):
    """
	Read actiwave ECG data from HDF5 file

	Parameters
	---------
	subject : string
		subject ID		
	hdf5_file : os.path()
		location of the HDF5 file where the data is stored. For example ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE

	Returns
	---------
	actiwave_ecg : np.array((n_samples, 1))
		actiwave ECG data
	actiwave_ecg_time : np.array((n_samples, 1))
		time array in np.datetime
	"""

    # read actiwave acceleration data
    actiwave_ecg = read_dataset_from_group(group_name=subject,
                                           dataset='actiwave_ecg',
                                           hdf5_file=hdf5_file)
    # read actiwave acceleration time
    actiwave_ecg_time = np.asarray(read_dataset_from_group(
        group_name=subject, dataset='actiwave_ecg_time', hdf5_file=hdf5_file),
                                   dtype='datetime64[ns]')

    return actiwave_ecg, actiwave_ecg_time
def _read_epoch_and_true_nw_data(subject, i = 1, total = 1, return_epoch = True, return_true = True):

	logging.info('Loading subject {} into memory {}/{}'.format(subject, i, total))

	"""
		EPOCH DATA
	"""
	if return_epoch:
		# read subject epoch data
		subject_epoch_data = read_dataset_from_group(group_name = subject, dataset = 'epoch60', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)

		# lower precision
		subject_epoch_data = subject_epoch_data.astype('float16')
	else:
		# read raw data and downscale to 1s
		# subject_data, *_ = get_actigraph_acc_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
		# subject_data = subject_data[::100]
		# subject_epoch_data = subject_data

		# set data to none, no epoch data is returned. 
		subject_epoch_data = None

	"""
		TRUE NON WEAR TIME
	"""
	if return_true:
		# read true non wear time and convert 0>1 and 1->0
		subject_true_nw = 1 - read_dataset_from_group(group_name = subject, dataset = 'actigraph_true_non_wear', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE).astype('uint8').reshape(-1,1)
		# convert to 1s instead of 100hz
		subject_true_nw = subject_true_nw[::100]
	else:
		subject_true_nw = None

	return subject, subject_epoch_data, subject_true_nw
def get_actigraph_acc_data(subject,
                           hdf5_file,
                           autocalibrate=False,
                           acc_dataset='actigraph_acc',
                           time_dataset='actigraph_time'):
    """
	Read actigraph acceleration data from HDF5 file, if autocalibrate is set to True, then perform autocalibration. Also create the correct
	time array

	Parameters
	---------
	subject : string
		subject ID
	hdf5_file : os.path()
		location of the HDF5 file where the data is stored
	autocalibrate: Boolean (optional)
		set to true if autocalibration need to be done

	Returns
	---------
	actigraph_acc : np.array((n_samples, axes = 3))
		actigraph acceleration data YXZ
	actigraph_meta_data : dic
		dictionary with meta data
	actigraph_time : np.array((n_samples, 1))
		time array in np.datetime
	"""

    try:

        # read actigraph acceleration data
        actigraph_acc = read_dataset_from_group(group_name=subject,
                                                dataset=acc_dataset,
                                                hdf5_file=hdf5_file)
        # read actigraph meta-data
        actigraph_meta_data = read_metadata_from_group_dataset(
            group_name=subject, dataset=acc_dataset, hdf5_file=hdf5_file)
        # convert the values of the dictionary from bytes to string
        actigraph_meta_data = dictionary_values_bytes_to_string(
            actigraph_meta_data)

        if autocalibrate:
            # parse out the weights
            actigraph_weights = parse_calibration_weights(actigraph_meta_data)
            # autocalibrate actigraph acceleration data
            actigraph_acc = calibrate_accelerometer_data(
                actigraph_acc, actigraph_weights)
        # read actigraph acceleration time
        actigraph_time = np.asarray(read_dataset_from_group(
            group_name=subject, dataset=time_dataset, hdf5_file=hdf5_file),
                                    dtype='datetime64[ms]')

        return actigraph_acc, actigraph_meta_data, actigraph_time

    except Exception as e:
        logging.error('[{}] : {}'.format(sys._getframe().f_code.co_name, e))
        exit(1)
def get_actiwave_acc_data(subject, hdf5_file, autocalibrate=False):
    """
	Read actiwave acceleration data from HDF5 file, if autocalibrate is set to True, then perform autocalibration. Also create the correct time array

	Parameters
	---------
	subject : string
		subject ID
	hdf5_file : os.path()
		location of the HDF5 file where the data is stored. For example ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE
	autocalibrate: Boolean (optional)
		set to true if autocalibration need to be done

	Returns
	---------
	actiwave_acc : np.array((n_samples, axes = 3))
		actiwave acceleration data YXZ
	actiwave_meta_data : dic
		dictionary with meta data
	actiwave_time : np.array((n_samples, 1))
		time array in np.datetime
	"""

    # read actiwave acceleration data
    actiwave_acc = read_dataset_from_group(group_name=subject,
                                           dataset='actiwave_acc',
                                           hdf5_file=hdf5_file)

    # read actigraph meta-data
    actiwave_meta_data = read_metadata_from_group_dataset(
        group_name=subject, dataset='actiwave_acc', hdf5_file=hdf5_file)

    # convert the values of the dictionary from bytes to string
    actiwave_meta_data = dictionary_values_bytes_to_string(actiwave_meta_data)

    if autocalibrate:
        logging.debug('Perform autocalibration of acceleration signal')
        # read actigraph meta-data
        actiwave_meta_data = read_metadata_from_group_dataset(
            group_name=subject, dataset='actiwave_acc', hdf5_file=hdf5_file)
        # parse out the weights
        actiwave_weights = parse_calibration_weights(actiwave_meta_data)
        # autocalibrate actigraph acceleration data
        actiwave_acc = calibrate_accelerometer_data(actiwave_acc,
                                                    actiwave_weights)

    # read actiwave acceleration time
    actiwave_time = np.asarray(read_dataset_from_group(group_name=subject,
                                                       dataset='actiwave_time',
                                                       hdf5_file=hdf5_file),
                               dtype='datetime64[ns]')

    return actiwave_acc, actiwave_meta_data, actiwave_time
def get_actigraph_epoch_data(subject, epoch_dataset, hdf5_file):
    """
	Return epoch data for subject

	Parameters
	---------
	subject : string
		subject iD
	epoch_dataset : string
		name of dataset where epoch data is stored
	hdf5_file : os.path()
		location of the HDF5 file where the data is stored. For example ACTIGRAPH_HDF5_FILE

	Returns
	--------
	epoch_data : np.array((n_samples, 3 axes with XYZ counts + 1 with steps ))
		epoch data for the subject ID
	epoch_meta_data : dict()
		dictionary with meta data
	epoch_time_data: np.array((n_samples, 1))
		datetime array for the epoch data	
	"""

    try:

        # read epoch data
        epoch_data = read_dataset_from_group(group_name=subject,
                                             dataset=epoch_dataset,
                                             hdf5_file=hdf5_file)

        # check if subject has epoch data
        if epoch_data is None:
            logging.warning(
                'Subject {} has no epoch data, skipping...'.format(subject))
            return None, None, None

        # read epoch meta data
        epoch_meta_data = read_metadata_from_group_dataset(
            group_name=subject, dataset=epoch_dataset, hdf5_file=hdf5_file)
        # convert the values of the dictionary from bytes to string
        epoch_meta_data = dictionary_values_bytes_to_string(epoch_meta_data)

        # create time array of the epoch data
        epoch_time_data = create_epoch_time_array(
            start_date=epoch_meta_data['Start Date'],
            start_date_format=epoch_meta_data['Date Format'],
            start_time=epoch_meta_data['Start Time'],
            epoch_data_length=len(epoch_data),
            epoch_sec=10)

        # make epoch time data on a similar scale as raw time data (thus on ms scale and not on s)
        epoch_time_data = np.asarray(epoch_time_data, dtype='datetime64[ms]')

        return epoch_data, epoch_meta_data, epoch_time_data

    except Exception as e:

        logging.error('[{}] : {}'.format(sys._getframe().f_code.co_name, e))
        exit(1)
Пример #6
0
def process_plot_mri_images(paths, params):
    """
	Plot MRI images from HDF5 file
	"""

    # dynamically create hdf5 file
    hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

    # read datasets from HDF5 file
    D = get_datasets_from_group(group_name=params['group_no_bg'],
                                hdf5_file=hdf5_file)

    # read data from each dataset and plot mri data
    for i, d in enumerate(D):

        logging.info(f'Processing dataset : {d} {i}/{len(D)}')

        # read data from group
        data = read_dataset_from_group(group_name=params['group_no_bg'],
                                       dataset=d,
                                       hdf5_file=hdf5_file)

        # image plot folder
        image_plot_folder = os.path.join(paths['plot_folder'],
                                         params['group_no_bg'],
                                         d.split()[-1], d)

        # create folder to store image to
        create_directory(image_plot_folder)

        # a single image for each image in dimensions[0]
        for i in range(data.shape[0]):

            # create figure and axes
            fig, ax = plt.subplots(1, 1, figsize=(10, 10))

            # plot mri image
            ax.imshow(data[i], cmap='gray', vmax=1000)

            # remove all white space and axes
            plt.gca().set_axis_off()
            plt.subplots_adjust(top=1,
                                bottom=0,
                                right=1,
                                left=0,
                                hspace=0,
                                wspace=0)
            plt.margins(0, 0)
            plt.gca().xaxis.set_major_locator(plt.NullLocator())
            plt.gca().yaxis.set_major_locator(plt.NullLocator())

            # save the figure
            fig.savefig(os.path.join(image_plot_folder, f'{i}.png'), dpi=300)

            # close the plot environment
            plt.close()
Пример #7
0
def perform_inference_segmentation(paths, params):

	# hdf5 file that contains the original images
	hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

	# path to trained CNN model
	model_file = os.path.join(paths['model_folder'], params['cnn_model'], 'model.h5')

	# get all patient names from original MRI group
	patients = get_datasets_from_group(group_name = params['group_no_bg'], hdf5_file = hdf5_file)
	
	# loop over each patient, read data, perform inference
	for i, patient in enumerate(patients):

		logging.info(f'Processing patient: {patient} {i}/{len(patients)}')

		# read images
		images = read_dataset_from_group(dataset = patient, group_name = params['group_no_bg'], hdf5_file = hdf5_file)

		# rescale 12bit images to 0-1
		images = images * params['rescale_factor']

		# create empty array to save reconstructed images
		segmented_images = np.empty_like(images, dtype = 'uint8')

		# use parallel processing to speed up processing time
		executor = Parallel(n_jobs = cpu_count(), backend = 'multiprocessing')

		# create tasks so we can execute them in parallel
		tasks = (delayed(classify_img_feature)(img = images[img_slice], 
												slice_idx = img_slice, 
												feature_size = params['feature_size'], 
												step_size = params['step_size'],
												model_file = model_file,
												verbose = True) for img_slice in range(images.shape[0]))
		
		# execute tasks and process the return values
		for segmented_image, slice_idx in executor(tasks):

			# add each segmented image slice to the overall array that holds all the slices
			segmented_images[slice_idx] = segmented_image

		# save segmentated image to HDF5 file
		save_data_to_group_hdf5(group = params['group_segmented_classification_mri'],
								data = segmented_images,
								data_name = patient,
								hdf5_file = hdf5_file,
								overwrite = True)
Пример #8
0
def process_plot_non_wear_algorithms(subject, plot_folder, epoch_dataset = 'epoch10', use_optimized_parameters = True, optimized_data_folder = os.path.join('files', 'grid-search', 'final')):
	"""
	Plot acceleration data from actigraph and actiwave including the 4 non wear methods and the true non wear time

	- plot actigraph XYZ
	- plot actiwave YXZ
	- plot actiwave heart rate
	- plot hecht, choi, troiano, v. Hees
	- plot true non wear time

	Parameters
	---------
	subject : string
		subject ID
	plot_folder : os.path
		folder location to save plots to
	epoch_dataset : string (optional)
		name of hdf5 dataset that contains 10s epoch data
	"""

	optimized_parameters = {}
	classification = 'f1'
	for nw_method in ['hecht', 'troiano', 'choi', 'hees']:
		
		# load data
		data = load_pickle('grid-search-results-{}'.format(nw_method), optimized_data_folder)
		# obtain top parameters
		top_results = sorted(data.items(), key = lambda item: item[1][classification], reverse = True)[0]
		optimized_parameters[nw_method] = top_results[0]
	
	"""
		GET ACTIGRAPH DATA
	"""
	actigraph_acc, _ , actigraph_time = get_actigraph_acc_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
	# get start and stop time
	start_time, stop_time = actigraph_time[0], actigraph_time[-1]

	"""
		GET ACTIWAVE DATA
	"""
	actiwave_acc, _ , actiwave_time = get_actiwave_acc_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
	actiwave_hr, actiwave_hr_time = get_actiwave_hr_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)

	"""
		EPOCH DATA
	"""

	if epoch_dataset in get_datasets_from_group(group_name = subject, hdf5_file = ACTIGRAPH_HDF5_FILE):

		# get 10s epoch data from HDF5 file
		epoch_data, _ , epoch_time_data = get_actigraph_epoch_data(subject, epoch_dataset = epoch_dataset, hdf5_file = ACTIGRAPH_HDF5_FILE)

		# convert to 60s epoch data	
		epoch_60_data, epoch_60_time_data = get_actigraph_epoch_60_data(epoch_data, epoch_time_data)

		# calculate epoch 60 VMU
		epoch_60_vmu_data = calculate_vector_magnitude(epoch_60_data[:,:3], minus_one = False, round_negative_to_zero = False)


		"""
			GET NON WEAR TIME
		"""
		
		# true non wear time
		true_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'actigraph_true_non_wear', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE) 
		# hecht 3-axes non wear time
		hecht_3_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'hecht_2009_3_axes_non_wear_data', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE) 
		# troiano non wear time
		troiano_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'troiano_2007_non_wear_data', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
		# choi non wear time
		choi_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'choi_2011_non_wear_data', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE) 
		# hees non wear time
		hees_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'hees_2013_non_wear_data', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
	
		# if set to True, then update the matrix column with non-wear data
		if use_optimized_parameters:
	
			"""
				READ 60S EPOCH DATA
			"""
			subject_epoch_data = read_dataset_from_group(group_name = subject, dataset = 'epoch60', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE).astype('float16')
			subject_epoch_data_vmu = calculate_vector_magnitude(subject_epoch_data, minus_one = False, round_negative_to_zero = False)

			"""
				HECHT
			"""
			# unpack variables
			t, i, m = optimized_parameters['hecht'].split('-')
			# use optimized parameters
			hecht_3_non_wear_time[:,1] = hecht_2009_triaxial_calculate_non_wear_time(data = subject_epoch_data_vmu, epoch_sec = 60, threshold = int(t), time_interval_mins = int(i), min_count = int(m))[:,0]
			
			"""
				TROIANO
			"""
			# unpack variables
			at, mpl, st, ss, vm = optimized_parameters['troiano'].split('-')
			# use optimized variables to calculate non wear vector
			troiano_non_wear_time[:,1] = troiano_2007_calculate_non_wear_time(subject_epoch_data, None, activity_threshold = int(at), min_period_len = int(mpl), spike_tolerance = int(st), spike_stoplevel = int(ss), use_vector_magnitude = eval(vm), print_output = False)[:,0]
			
			"""
				CHOI
			"""
			at, mpl, st, mwl, wst, vm = optimized_parameters['choi'].split('-')
			choi_non_wear_time[:,1] = choi_2011_calculate_non_wear_time(subject_epoch_data, None, activity_threshold = int(at), min_period_len = int(mpl), spike_tolerance = int(st),  min_window_len = int(mwl), window_spike_tolerance = int(wst), use_vector_magnitude = eval(vm), print_output = False)[:,0]

			"""
				HEES
			"""
			mw, wo, st, sa, vt, va = optimized_parameters['hees'].split('-')
			hees_non_wear_time = hees_2013_calculate_non_wear_time(actigraph_acc, hz = 100, min_non_wear_time_window = int(mw), window_overlap = int(wo), std_mg_threshold = float(st), std_min_num_axes = int(sa) , value_range_mg_threshold = float(vt), value_range_min_num_axes = int(va))
			
		"""
			CREATING THE DATAFRAMES
		"""

		# convert actigraph data to pandas dataframe
		df_actigraph_acc = pd.DataFrame(actigraph_acc, index = actigraph_time, columns = ['ACTIGRAPH Y', 'ACTIGRAPH X', 'ACTIGRAPH Z'])
		# convert actiwave data to pandas dataframe
		df_actiwave_acc = pd.DataFrame(actiwave_acc, index = actiwave_time, columns = ['ACTIWAVE Y', 'ACTIWAVE X', 'ACTIWAVE Z'])
		# convert actiwave hr to pandas dataframe
		df_actiwave_hr = pd.DataFrame(actiwave_hr, index = actiwave_hr_time, columns = ['ESTIMATED HR'])
		# convert 60s epoch vmu to dataframe
		df_epoch_60_vmu = pd.DataFrame(epoch_60_vmu_data, index = epoch_60_time_data, columns = ['EPOCH 60s VMU'])
		# slice based on start and stop time
		df_epoch_60_vmu = df_epoch_60_vmu.loc[start_time:stop_time]
		# create dataframe of true non wear time
		df_true_non_wear_time = pd.DataFrame(true_non_wear_time, index = actigraph_time, columns = ['TRUE NON WEAR TIME'])
		# create dataframe of hecht 3-axes non wear time
		df_hecht_3_non_wear_time = pd.DataFrame(hecht_3_non_wear_time[:,1], index = np.asarray(hecht_3_non_wear_time[:,0], dtype ='datetime64[ns]'), columns = ['HECHT-3 NON WEAR TIME'])
		# create dataframe of troiano non wear time
		df_troiano_non_wear_time = pd.DataFrame(troiano_non_wear_time[:,1], index = np.asarray(troiano_non_wear_time[:,0], dtype = 'datetime64[ns]'), columns = ['TROIANO NON WEAR TIME'])
		# create dataframe of choi non wear time
		df_choi_non_wear_time = pd.DataFrame(choi_non_wear_time[:,1], index = np.asarray(choi_non_wear_time[:,0], dtype = 'datetime64[ns]'), columns = ['CHOI NON WEAR TIME'])
		# create dataframe of hees non wear time
		df_hees_non_wear_time = pd.DataFrame(hees_non_wear_time, index = actigraph_time, columns = ['HEES NON WEAR TIME'])
		
		# merge all dataframes
		df_joined = df_actigraph_acc.join(df_true_non_wear_time, how='outer').join(df_actiwave_acc, how='outer').join(df_hecht_3_non_wear_time, how='outer') \
					.join(df_troiano_non_wear_time, how='outer').join(df_choi_non_wear_time, how='outer').join(df_hees_non_wear_time, how='outer').join(df_epoch_60_vmu, how='outer').join(df_actiwave_hr, how='outer')

		# call plot function
		plot_non_wear_algorithms(data = df_joined, subject = subject, plot_folder = plot_folder)
def process_convert_segmentation_to_features(paths, params, verbose=True):

    # read in all segmentation files
    F = [
        x for x in read_directory(paths['segmentation_folder'])
        if x[-4:] == '.nii' or x[-7:] == '.nii.gz'
    ]

    # get feature size from params
    feature_size = params['feature_size']

    # process each segmentation file
    for f_idx, file in enumerate(F):

        logging.info(f'Processing segmentation file : {file} {f_idx}/{len(F)}')

        # extract patient name from file
        patient = file.split(os.sep)[-1][:-7]

        # read patient original MRI image
        original_images = read_dataset_from_group(
            group_name=params['group_original_mri'],
            dataset=patient,
            hdf5_file=os.path.join(paths['hdf5_folder'], params['hdf5_file']))

        # check if original image can be found
        if original_images is None:
            logging.error(
                f'No original image found, please check patient name : {patient}'
            )
            exit(1)

        # read in nifti file with segmenation data. shape 256,256,54
        images = nib.load(file)

        # empty lists to store X and Y features
        X = []
        Y = []

        # fig, axs = plt.subplots(6,4, figsize = (10,10))
        # axs = axs.ravel()
        # plt_idx = 0

        # process each slice
        for mri_slice in range(images.shape[2]):

            if verbose:
                logging.debug(f'Slice : {mri_slice}')

            # extract image slice
            img = images.dataobj[:, :, mri_slice]

            # test image for patchers
            # img_patches = np.zeros((img.shape))

            # check if there are any segmentations to be found
            if np.sum(img) == 0:
                if verbose:
                    logging.debug('No segmentations found, skipping...')
                continue

            # we have to now flip and rotate the image to make them comparable with original dicom orientation when reading it into pyhon
            img = np.flip(img, 1)
            img = np.rot90(img)

            # unique segmentation classes
            seg_classes = np.unique(img)
            # remove zero class (this is the background)
            seg_classes = seg_classes[seg_classes != 0]

            # get features for each class
            for seg_class in seg_classes:

                if verbose:
                    logging.debug(
                        f'Processing segmentation class : {seg_class}')

                # check which rows have an annotation (we skip the rows that don't have the annotation)
                rows = np.argwhere(np.any(img[:] == seg_class, axis=1))
                # check which colums have an annotation
                cols = np.argwhere(np.any(img[:] == seg_class, axis=0))
                # get start and stop rows
                min_rows, max_rows = rows[0][0], rows[-1][0]
                # get start and stop columns
                min_cols, max_cols = cols[0][0], cols[-1][0]

                logging.debug(f'Processing rows: {min_rows}-{max_rows}')
                logging.debug(f'Processing cols: {min_cols}-{max_cols}')

                # loop over rows and columns to extract patches of the image and check if there are annotations
                for i in range(min_rows, max_rows - feature_size[0]):
                    for j in range(min_cols, max_cols - feature_size[1]):

                        # extract image patch with the dimensions of the feature
                        img_patch = img[i:i + feature_size[0],
                                        j:j + feature_size[1]]

                        # check if all cells have been annotated
                        if np.all(img_patch == seg_class):

                            # extract patch from original MRI image, these will contain the features.
                            patch = original_images[mri_slice][i:i +
                                                               feature_size[0],
                                                               j:j +
                                                               feature_size[1]]

                            # add patch to X and segmentation class to Y
                            X.append([patch])
                            Y.append([seg_class])

        # 					img_patches[i:i + feature_size[0], j : j + feature_size[1]] = seg_class

        # 	axs[plt_idx].imshow(original_images[mri_slice], cmap = 'gray')
        # 	axs[plt_idx + 1].imshow(img_patches, vmin = 0, vmax = 3, interpolation = 'nearest')

        # 	plt_idx += 2

        # plt.show()
        # continue

        # convert X and Y to numpy arrays
        X = np.vstack(X)
        Y = np.vstack(Y)

        # create save folder location
        save_folder = os.path.join(paths['feature_folder'], patient)
        # create folder
        create_directory(save_folder)
        # save features to disk
        np.savez(file=os.path.join(save_folder, f'{patient}.npz'), X=X, Y=Y)
Пример #10
0
def perform_calculate_tissue_distributions(paths, params):

	# hdf5 file that contains the original images
	hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])
	
	# get all patient names from original MRI group
	patients = get_datasets_from_group(group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)

	# empty pandas dataframe to hold all data
	data = pd.DataFrame()

	# loop over each patient, read data, perform inference
	for i, patient in enumerate(patients):

		logging.info(f'Processing patient: {patient} {i + 1}/{len(patients)}')

		# parse out treatment, sample, and state from patient name
		treatment, sample, state = parse_patientname(patient_name = patient)

		# read images
		images = read_dataset_from_group(dataset = patient, group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)

		# handle connected tissue (for example, set connected damaged tissue to damaged tissue)
		images = process_connected_tissue(images = images, params = params)

		# reshape image to unroll pixels in last two dimensions. Go from (54, 256, 256) to (54, 65536)
		images = images.reshape(images.shape[0], -1)
		
		# count damaged tissue
		damaged_tissue = np.sum((images == 1), axis = 1)
		# count non_damaged tissue
		non_damaged_tissue = np.sum((images == 2), axis = 1)
		# relative damaged
		rel_damaged = damaged_tissue / (damaged_tissue + non_damaged_tissue) * 100
		# relative non-damaged
		rel_non_damaged = 100 - rel_damaged
		
		# process data for each slice
		for mri_slice in range(images.shape[0]):

			# check slice validity
			if check_mri_slice_validity(patient = patient, mri_slice = mri_slice, total_num_slices = images.shape[0]):

				# add data to dictionary
				mri_slice_data = {	'patient' : patient,
									'treatment' : treatment,
									'sample' : sample,
									'state' : state,
									'mri_slice' : mri_slice,
									'damaged_pixels' : damaged_tissue[mri_slice],
									'non_damaged_pixels' : non_damaged_tissue[mri_slice],
									'rel_damaged' : rel_damaged[mri_slice],
									'rel_non_damaged' : rel_non_damaged[mri_slice],
									}
				# create unique ID
				mri_slice_id = f'{treatment}_{sample}_{state}_{mri_slice}'
				
				# add to pandas dataframe
				data[mri_slice_id] = pd.Series(mri_slice_data)

	# transpose and save dataframe as CSV
	data.T.to_csv(os.path.join(paths['table_folder'], 'tissue_distributions.csv'))
def process_plot_mri_with_damaged(paths, params):
    """
	Plot original MRI on left and MRI image with damaged overlayed on the right
	"""

    # hdf5 file that contains the original images
    hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

    # get all patient names from original MRI group
    patients = get_datasets_from_group(group_name=params['group_original_mri'],
                                       hdf5_file=hdf5_file)

    # get list of patients without state
    patients = set(
        [re.search('(.*) (fersk|Tint)', x).group(1) for x in patients])

    # loop over each patient, read data, perform inference
    for i, patient in enumerate(patients):

        logging.info(f'Processing patient: {patient} {i + 1}/{len(patients)}')

        # parse out treatment, sample, and state from patient name
        treatment, _, _ = parse_patientname(patient_name=f'{patient} fersk')
        """
		Get fresh state
		"""
        # read original images
        fresh_original_images = read_dataset_from_group(
            dataset=f'{patient} fersk',
            group_name=params['group_original_mri'],
            hdf5_file=hdf5_file)
        # read reconstructed images
        fresh_reconstructed_images = read_dataset_from_group(
            dataset=f'{patient} fersk',
            group_name=params['group_segmented_classification_mri'],
            hdf5_file=hdf5_file)
        # only take damaged tissue and set connected tissue
        fresh_reconstructed_damaged_images = (process_connected_tissue(
            images=fresh_reconstructed_images.copy(), params=params) == 1)
        """
		Get frozen/thawed
		"""
        # read original images
        frozen_original_images = read_dataset_from_group(
            dataset=f'{patient} Tint',
            group_name=params['group_original_mri'],
            hdf5_file=hdf5_file)
        # read reconstructed images
        frozen_reconstructed_images = read_dataset_from_group(
            dataset=f'{patient} Tint',
            group_name=params['group_segmented_classification_mri'],
            hdf5_file=hdf5_file)
        # only take damaged tissue and set connected tissue
        frozen_reconstructed_damaged_images = (process_connected_tissue(
            images=frozen_reconstructed_images.copy(), params=params) == 1)

        # get total number of slices to process
        total_num_slices = fresh_original_images.shape[0]
        # loop over each slice
        for mri_slice in range(total_num_slices):

            # check slice validity of fresh patient
            if check_mri_slice_validity(patient=f'{patient} fersk',
                                        mri_slice=mri_slice,
                                        total_num_slices=total_num_slices):

                if check_mri_slice_validity(patient=f'{patient} Tint',
                                            mri_slice=mri_slice,
                                            total_num_slices=total_num_slices):

                    # setting up the plot environment
                    fig, axs = plt.subplots(2, 2, figsize=(8, 8))
                    axs = axs.ravel()

                    # define the colors we want
                    plot_colors = ['#250463', '#e34a33']
                    # create a custom listed colormap (so we can overwrite the colors of predefined cmaps)
                    cmap = colors.ListedColormap(plot_colors)
                    # subfigure label for example, a, b, c, d etc
                    sf = cycle(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
                    """
					Plot fresh state
					"""
                    # obtain vmax score so image grayscales are normalized better
                    vmax_percentile = 99.9
                    vmax = np.percentile(fresh_original_images[mri_slice],
                                         vmax_percentile)

                    # plot fresh original MRI image
                    axs[0].imshow(fresh_original_images[mri_slice],
                                  cmap='gray',
                                  vmin=0,
                                  vmax=vmax)
                    axs[0].set_title(
                        rf'$\bf({next(sf)})$ Fresh - Original MRI')

                    # plot fresh reconstucted image overlayed on top of the original image
                    # axs[1].imshow(fresh_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)
                    # im = axs[1].imshow(fresh_reconstructed_images[mri_slice],alpha = 0.7, interpolation = 'none')
                    # axs[1].set_title(rf'$\bf({next(sf)})$ Fresh - Reconstructed')

                    # plot fresh reconstucted image overlayed on top of the original image
                    axs[1].imshow(fresh_original_images[mri_slice],
                                  cmap='gray',
                                  vmin=0,
                                  vmax=vmax)
                    axs[1].imshow(
                        fresh_reconstructed_damaged_images[mri_slice],
                        cmap=cmap,
                        alpha=.5,
                        interpolation='none')
                    axs[1].set_title(
                        rf'$\bf({next(sf)})$ Fresh - Reconstructed')
                    """
					Plot frozen/thawed state
					"""
                    # plot frozen/thawed original MRI image
                    # obtain vmax score so image grayscales are normalized better
                    vmax = np.percentile(frozen_original_images[mri_slice],
                                         vmax_percentile)
                    axs[2].imshow(frozen_original_images[mri_slice],
                                  cmap='gray',
                                  vmin=0,
                                  vmax=vmax)
                    axs[2].set_title(
                        rf'$\bf({next(sf)})$ {treatment_to_title(treatment)} - Original MRI'
                    )

                    # plot frozen reconstucted all classes
                    # axs[4].imshow(frozen_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)
                    # im = axs[4].imshow(frozen_reconstructed_images[mri_slice], alpha = 0.7, interpolation = 'none')
                    # axs[4].set_title(rf'$\bf({next(sf)})$ {treatment_to_title(treatment)} - Reconstructed')

                    # # plot frozen/thawed reconstucted image overlayed on top of the original image
                    axs[3].imshow(frozen_original_images[mri_slice],
                                  cmap='gray',
                                  vmin=0,
                                  vmax=vmax)
                    axs[3].imshow(
                        frozen_reconstructed_damaged_images[mri_slice],
                        cmap=cmap,
                        alpha=.5,
                        interpolation='none')
                    axs[3].set_title(
                        rf'$\bf({next(sf)})$ {treatment_to_title(treatment)} - Reconstructed'
                    )
                    """
					Create custom legend
					"""
                    # add custom legend
                    class_labels = {0: 'background', 1: 'damaged tissue'}
                    class_values = list(class_labels.keys())
                    # create a patch
                    patches = [
                        mpatches.Patch(color=plot_colors[i],
                                       label=class_labels[i])
                        for i in range(len(class_values))
                    ]
                    axs[1].legend(
                        handles=patches
                    )  #, bbox_to_anchor=(1.05, 1), loc = 2, borderaxespad=0. )

                    # legend for fully reconstructed image
                    # get class labels
                    # class_labels = params['class_labels']
                    # # get class indexes from dictionary
                    # values = class_labels.keys()
                    # # get the colors of the values, according to the
                    # # colormap used by imshow
                    # plt_colors = [ im.cmap(im.norm(value)) for value in values]
                    # # create a patch (proxy artist) for every color
                    # patches = [ mpatches.Patch(color = plt_colors[i], label= class_labels[i]) for i in range(len(values)) ]
                    # # put those patched as legend-handles into the legend
                    # axs[1].legend(handles = patches)#, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
                    """
					Adjust figures
					"""
                    # remove axis of all subplots
                    [ax.axis('off') for ax in axs]
                    # define plot subfolder
                    subfolder = os.path.join(paths['paper_plot_folder'],
                                             'original_vs_reconstructed',
                                             patient)
                    # create subfolder
                    create_directory(subfolder)
                    # crop white space
                    fig.set_tight_layout(True)
                    # save the figure
                    fig.savefig(
                        os.path.join(subfolder, f'slice_{mri_slice}.pdf'))

                    # close the figure environment
                    plt.close()
Пример #12
0
def plot_segmented_images(paths, params):
	"""
	Plot segmented images
	"""

	# create hdf5 file
	hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

	# get list of patient names to plot
	patients = get_datasets_from_group(group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)

	# plot each patient
	for i, patient in enumerate(patients):

		logging.info(f'Processing patient: {patient} {i}/{len(patients)}')

		# read segmented images
		images = read_dataset_from_group(dataset = patient, group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)

		# set up plotting environment
		fig, axs = plt.subplots(6,9, figsize = (20,20))		
		axs = axs.ravel()

		# loop over each slice and print
		for mri_slice in range(images.shape[0]):

			logging.debug(f'Processing slice: {mri_slice}') 

			# check slice validity
			if check_mri_slice_validity(patient = patient, mri_slice = mri_slice, total_num_slices = images.shape[0]):

				# plot image
				im = axs[mri_slice].imshow(images[mri_slice], vmin = 0, vmax = 5, interpolation='none')
			axs[mri_slice].set_title(f'{mri_slice}')

		# get class labels
		class_labels = params['class_labels']
		# get class indexes from dictionary
		values = class_labels.keys()
		# get the colors of the values, according to the 
		# colormap used by imshow
		colors = [ im.cmap(im.norm(value)) for value in values]
		# create a patch (proxy artist) for every color 
		patches = [ mpatches.Patch(color = colors[i], label= class_labels[i]) for i in range(len(values)) ]
		# put those patched as legend-handles into the legend
		plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
		
		# make adjustments to each subplot	
		for ax in axs:
			ax.axis('off')

		# create plotfolder subfolder
		plot_sub_folder = os.path.join(paths['plot_folder'], 'segmentation', params['cnn_model'])
		create_directory(plot_sub_folder)

		# crop white space
		fig.set_tight_layout(True)
		# save the figure
		fig.savefig(os.path.join(plot_sub_folder, f'{patient}.png'))

		# close the figure environment
		plt.close()
Пример #13
0
def remove_bg(paths, params):
    """
	Remove background from MRI images

	Parameters
	--------------
	hdf5_file : os.path
		location of HDF5 that contains the raw MRI data, and where we want to save data to
	img_group_name : string
		name of HDF5 group that contains the raw MRI images
	save_group_name : string
		name of HDF5 group to store images with background removed
	"""

    # dynamically create hdf5 file
    hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

    # read original MRI datasets from HDF5 file
    D = get_datasets_from_group(group_name=params['group_original_mri'],
                                hdf5_file=hdf5_file)

    # read data from each dataset and plot mri data
    for d_idx, d in enumerate(D):

        logging.info(f'Processing dataset : {d} {d_idx}/{len(D)}')

        # read data from group
        data = read_dataset_from_group(group_name=params['group_original_mri'],
                                       dataset=d,
                                       hdf5_file=hdf5_file)

        # read meta data
        meta_data = read_metadata_from_group_dataset(
            group_name=params['group_original_mri'],
            dataset=d,
            hdf5_file=hdf5_file)

        logging.info(f'Processing patient : {meta_data["PatientName"]}')

        # new numpy array to hold segmented data
        data_segmented = np.empty_like(data, dtype='int16')

        # process each slice
        for i in range(data.shape[0]):

            # ind_cycle = cycle(range(10))
            # fig, axs = plt.subplots(1,8, figsize = (20,5))
            # axs = axs.ravel()

            # original MRI
            img = data[i]
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Original MRI')

            # change grayscale
            img = change_img_contrast(img, phi=10, theta=1)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Changed gray scale')

            # convert to 8 bit
            if d not in ['Torsk 1-4 fersk']:
                img = np.array(img, dtype='uint8')
                # plt_index = next(ind_cycle)
                # axs[plt_index].imshow(img, cmap = 'gray')
                # axs[plt_index].set_title('Convert to 8 bit')

            # inverted colors
            # img = (255) - img
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Inverted MRI')

            # max filter
            img = ndimage.maximum_filter(img, size=7)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Max filter')

            # erosion
            img = cv2.erode(img, None, iterations=4)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Erosion')

            # gaussian filter
            img = cv2.GaussianBlur(img, (11, 11), 0)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Gaussian Blur')

            # knn bg remove
            segmented_img = perform_knn_segmentation(n_clusters=2, img=img)
            img = mask_image(img=data[i],
                             segmented_img=segmented_img,
                             mask_value=segmented_img[0][0],
                             fill_value=0)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('KNN BG remove')

            # add masked image to data_segmented, where we store each slice
            data_segmented[i] = img

            # plt.show()

        # save data to HDF5
        save_data_to_group_hdf5(group=params['group_no_bg'],
                                data=data_segmented,
                                data_name=d,
                                hdf5_file=hdf5_file,
                                meta_data=meta_data,
                                overwrite=True)