コード例 #1
0
def process_plot_mri_images(paths, params):
    """
	Plot MRI images from HDF5 file
	"""

    # dynamically create hdf5 file
    hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

    # read datasets from HDF5 file
    D = get_datasets_from_group(group_name=params['group_no_bg'],
                                hdf5_file=hdf5_file)

    # read data from each dataset and plot mri data
    for i, d in enumerate(D):

        logging.info(f'Processing dataset : {d} {i}/{len(D)}')

        # read data from group
        data = read_dataset_from_group(group_name=params['group_no_bg'],
                                       dataset=d,
                                       hdf5_file=hdf5_file)

        # image plot folder
        image_plot_folder = os.path.join(paths['plot_folder'],
                                         params['group_no_bg'],
                                         d.split()[-1], d)

        # create folder to store image to
        create_directory(image_plot_folder)

        # a single image for each image in dimensions[0]
        for i in range(data.shape[0]):

            # create figure and axes
            fig, ax = plt.subplots(1, 1, figsize=(10, 10))

            # plot mri image
            ax.imshow(data[i], cmap='gray', vmax=1000)

            # remove all white space and axes
            plt.gca().set_axis_off()
            plt.subplots_adjust(top=1,
                                bottom=0,
                                right=1,
                                left=0,
                                hspace=0,
                                wspace=0)
            plt.margins(0, 0)
            plt.gca().xaxis.set_major_locator(plt.NullLocator())
            plt.gca().yaxis.set_major_locator(plt.NullLocator())

            # save the figure
            fig.savefig(os.path.join(image_plot_folder, f'{i}.png'), dpi=300)

            # close the plot environment
            plt.close()
コード例 #2
0
def perform_inference_segmentation(paths, params):

	# hdf5 file that contains the original images
	hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

	# path to trained CNN model
	model_file = os.path.join(paths['model_folder'], params['cnn_model'], 'model.h5')

	# get all patient names from original MRI group
	patients = get_datasets_from_group(group_name = params['group_no_bg'], hdf5_file = hdf5_file)
	
	# loop over each patient, read data, perform inference
	for i, patient in enumerate(patients):

		logging.info(f'Processing patient: {patient} {i}/{len(patients)}')

		# read images
		images = read_dataset_from_group(dataset = patient, group_name = params['group_no_bg'], hdf5_file = hdf5_file)

		# rescale 12bit images to 0-1
		images = images * params['rescale_factor']

		# create empty array to save reconstructed images
		segmented_images = np.empty_like(images, dtype = 'uint8')

		# use parallel processing to speed up processing time
		executor = Parallel(n_jobs = cpu_count(), backend = 'multiprocessing')

		# create tasks so we can execute them in parallel
		tasks = (delayed(classify_img_feature)(img = images[img_slice], 
												slice_idx = img_slice, 
												feature_size = params['feature_size'], 
												step_size = params['step_size'],
												model_file = model_file,
												verbose = True) for img_slice in range(images.shape[0]))
		
		# execute tasks and process the return values
		for segmented_image, slice_idx in executor(tasks):

			# add each segmented image slice to the overall array that holds all the slices
			segmented_images[slice_idx] = segmented_image

		# save segmentated image to HDF5 file
		save_data_to_group_hdf5(group = params['group_segmented_classification_mri'],
								data = segmented_images,
								data_name = patient,
								hdf5_file = hdf5_file,
								overwrite = True)
def _read_epoch_dataset(subject, epoch_dataset, start_time, stop_time, use_vmu = False, upscale_epoch = True, start_epoch_sec = 10, end_epoch_sec = 60):
	"""
	Read epoch dataset

	Parameters
	----------
	subject : string
		subject ID
	epoch_dataset : string
		name of HDF5 dataset that contains the epoch data

	Returns
	----------
	"""

	# check if epoch dataset is part of HDF5 group
	if epoch_dataset in get_datasets_from_group(group_name = subject, hdf5_file = ACTIGRAPH_HDF5_FILE):

		# get actigraph 10s epoch data
		epoch_data, _ , epoch_time_data = get_actigraph_epoch_data(subject, epoch_dataset = epoch_dataset, hdf5_file = ACTIGRAPH_HDF5_FILE)

		# if upscale_epoch is set to True, then upsample epoch counts to epoch_sec
		if upscale_epoch:
				
			# convert to 60s epoch data	
			epoch_data, epoch_time_data = get_actigraph_epoch_60_data(epoch_data, epoch_time_data, start_epoch_sec, end_epoch_sec)

		# if set to true, then calculate the vector magnitude of all axes
		if use_vmu:

			# calculate epoch VMU
			epoch_data = calculate_vector_magnitude(epoch_data[:,:3], minus_one = False, round_negative_to_zero = False)

		# create dataframe of actigraph acceleration 
		df_epoch_data = pd.DataFrame(epoch_data, index = epoch_time_data).loc[start_time:stop_time]
	
		return df_epoch_data

	else:
		logging.warning('Subject {} has no {} dataset'.format(subject, epoch_dataset))
		return None
コード例 #4
0
def process_hecht_2009_triaxial(subject, save_hdf5, idx = 1, total = 1, epoch_dataset = 'epoch10'):
	"""
	Calculate the non-wear time from a data array that contains the vector magnitude (VMU) according to Hecht 2009 algorithm

	Paper:
	COPD. 2009 Apr;6(2):121-9. doi: 10.1080/15412550902755044.
	Methodology for using long-term accelerometry monitoring to describe daily activity patterns in COPD.
	Hecht A1, Ma S, Porszasz J, Casaburi R; COPD Clinical Research Network.

	Parameters
	---------
	subject : string
		subject ID
	save_hdf5 : os.path
		location of HDF5 file to save non wear data to
	idx : int (optional)
		index of counter, only useful when processing large batches and you want to monitor the status
	total: int (optional)
		total number of subjects to process, only useful when processing large batches and you want to monitor the status
	epoch_dataset : string (optional)
		name of dataset within an HDF5 group that contains the 10sec epoch data
	"""

	logging.info('{style} Processing subject: {} {}/{} {style}'.format(subject, idx, total, style = '='*10))
	
	"""
		ACTIGRAPH DATA
	"""

	# read actigraph acceleration time
	_, _, actigraph_time = get_actigraph_acc_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
	
	# get start and stop time
	start_time, stop_time = actigraph_time[0], actigraph_time[-1]

	"""
		EPOCH DATA
	"""

	# check if epoch dataset is part of HDF5 group
	if epoch_dataset in get_datasets_from_group(group_name = subject, hdf5_file = ACTIGRAPH_HDF5_FILE):

		# get actigraph 10s epoch data
		epoch_data, _ , epoch_time_data = get_actigraph_epoch_data(subject, epoch_dataset = epoch_dataset, hdf5_file = ACTIGRAPH_HDF5_FILE)

		# convert to 60s epoch data	
		epoch_60_data, epoch_60_time_data = get_actigraph_epoch_60_data(epoch_data, epoch_time_data)

		# calculate epoch 60 VMU
		epoch_60_vmu_data = calculate_vector_magnitude(epoch_60_data[:,:3], minus_one = False, round_negative_to_zero = False)


		"""
			GET NON WEAR VECTOR
		"""

		# create dataframe of actigraph acceleration 
		df_epoch_60_vmu = pd.DataFrame(epoch_60_vmu_data, index = epoch_60_time_data, columns = ['VMU']).loc[start_time:stop_time]

		# retrieve non-wear vector
		epoch_60_vmu_non_wear_vector = hecht_2009_triaxial_calculate_non_wear_time(data = df_epoch_60_vmu.values)

		# get the croped time array as int64 (cropped because we selected the previous dataframe to be between start and stop slice)
		epoch_60_time_data_cropped = np.array(df_epoch_60_vmu.index).astype('int64')
		# reshape
		epoch_60_time_data_cropped = epoch_60_time_data_cropped.reshape(len(epoch_60_time_data_cropped), 1)

		# add two arrays
		combined_data = np.hstack((epoch_60_time_data_cropped, epoch_60_vmu_non_wear_vector))
		
		"""
			SAVE TO HDF5 FILE
		"""
		save_data_to_group_hdf5(group = subject, data = combined_data, data_name = 'hecht_2009_3_axes_non_wear_data', overwrite = True, create_group_if_not_exists = True, hdf5_file = save_hdf5)

	else:
		logging.warning('Subject {} has no corresponding epoch data, skipping...'.format(subject))
コード例 #5
0
def process_plot_non_wear_algorithms(subject, plot_folder, epoch_dataset = 'epoch10', use_optimized_parameters = True, optimized_data_folder = os.path.join('files', 'grid-search', 'final')):
	"""
	Plot acceleration data from actigraph and actiwave including the 4 non wear methods and the true non wear time

	- plot actigraph XYZ
	- plot actiwave YXZ
	- plot actiwave heart rate
	- plot hecht, choi, troiano, v. Hees
	- plot true non wear time

	Parameters
	---------
	subject : string
		subject ID
	plot_folder : os.path
		folder location to save plots to
	epoch_dataset : string (optional)
		name of hdf5 dataset that contains 10s epoch data
	"""

	optimized_parameters = {}
	classification = 'f1'
	for nw_method in ['hecht', 'troiano', 'choi', 'hees']:
		
		# load data
		data = load_pickle('grid-search-results-{}'.format(nw_method), optimized_data_folder)
		# obtain top parameters
		top_results = sorted(data.items(), key = lambda item: item[1][classification], reverse = True)[0]
		optimized_parameters[nw_method] = top_results[0]
	
	"""
		GET ACTIGRAPH DATA
	"""
	actigraph_acc, _ , actigraph_time = get_actigraph_acc_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
	# get start and stop time
	start_time, stop_time = actigraph_time[0], actigraph_time[-1]

	"""
		GET ACTIWAVE DATA
	"""
	actiwave_acc, _ , actiwave_time = get_actiwave_acc_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
	actiwave_hr, actiwave_hr_time = get_actiwave_hr_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)

	"""
		EPOCH DATA
	"""

	if epoch_dataset in get_datasets_from_group(group_name = subject, hdf5_file = ACTIGRAPH_HDF5_FILE):

		# get 10s epoch data from HDF5 file
		epoch_data, _ , epoch_time_data = get_actigraph_epoch_data(subject, epoch_dataset = epoch_dataset, hdf5_file = ACTIGRAPH_HDF5_FILE)

		# convert to 60s epoch data	
		epoch_60_data, epoch_60_time_data = get_actigraph_epoch_60_data(epoch_data, epoch_time_data)

		# calculate epoch 60 VMU
		epoch_60_vmu_data = calculate_vector_magnitude(epoch_60_data[:,:3], minus_one = False, round_negative_to_zero = False)


		"""
			GET NON WEAR TIME
		"""
		
		# true non wear time
		true_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'actigraph_true_non_wear', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE) 
		# hecht 3-axes non wear time
		hecht_3_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'hecht_2009_3_axes_non_wear_data', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE) 
		# troiano non wear time
		troiano_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'troiano_2007_non_wear_data', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
		# choi non wear time
		choi_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'choi_2011_non_wear_data', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE) 
		# hees non wear time
		hees_non_wear_time = read_dataset_from_group(group_name = subject, dataset = 'hees_2013_non_wear_data', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)
	
		# if set to True, then update the matrix column with non-wear data
		if use_optimized_parameters:
	
			"""
				READ 60S EPOCH DATA
			"""
			subject_epoch_data = read_dataset_from_group(group_name = subject, dataset = 'epoch60', hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE).astype('float16')
			subject_epoch_data_vmu = calculate_vector_magnitude(subject_epoch_data, minus_one = False, round_negative_to_zero = False)

			"""
				HECHT
			"""
			# unpack variables
			t, i, m = optimized_parameters['hecht'].split('-')
			# use optimized parameters
			hecht_3_non_wear_time[:,1] = hecht_2009_triaxial_calculate_non_wear_time(data = subject_epoch_data_vmu, epoch_sec = 60, threshold = int(t), time_interval_mins = int(i), min_count = int(m))[:,0]
			
			"""
				TROIANO
			"""
			# unpack variables
			at, mpl, st, ss, vm = optimized_parameters['troiano'].split('-')
			# use optimized variables to calculate non wear vector
			troiano_non_wear_time[:,1] = troiano_2007_calculate_non_wear_time(subject_epoch_data, None, activity_threshold = int(at), min_period_len = int(mpl), spike_tolerance = int(st), spike_stoplevel = int(ss), use_vector_magnitude = eval(vm), print_output = False)[:,0]
			
			"""
				CHOI
			"""
			at, mpl, st, mwl, wst, vm = optimized_parameters['choi'].split('-')
			choi_non_wear_time[:,1] = choi_2011_calculate_non_wear_time(subject_epoch_data, None, activity_threshold = int(at), min_period_len = int(mpl), spike_tolerance = int(st),  min_window_len = int(mwl), window_spike_tolerance = int(wst), use_vector_magnitude = eval(vm), print_output = False)[:,0]

			"""
				HEES
			"""
			mw, wo, st, sa, vt, va = optimized_parameters['hees'].split('-')
			hees_non_wear_time = hees_2013_calculate_non_wear_time(actigraph_acc, hz = 100, min_non_wear_time_window = int(mw), window_overlap = int(wo), std_mg_threshold = float(st), std_min_num_axes = int(sa) , value_range_mg_threshold = float(vt), value_range_min_num_axes = int(va))
			
		"""
			CREATING THE DATAFRAMES
		"""

		# convert actigraph data to pandas dataframe
		df_actigraph_acc = pd.DataFrame(actigraph_acc, index = actigraph_time, columns = ['ACTIGRAPH Y', 'ACTIGRAPH X', 'ACTIGRAPH Z'])
		# convert actiwave data to pandas dataframe
		df_actiwave_acc = pd.DataFrame(actiwave_acc, index = actiwave_time, columns = ['ACTIWAVE Y', 'ACTIWAVE X', 'ACTIWAVE Z'])
		# convert actiwave hr to pandas dataframe
		df_actiwave_hr = pd.DataFrame(actiwave_hr, index = actiwave_hr_time, columns = ['ESTIMATED HR'])
		# convert 60s epoch vmu to dataframe
		df_epoch_60_vmu = pd.DataFrame(epoch_60_vmu_data, index = epoch_60_time_data, columns = ['EPOCH 60s VMU'])
		# slice based on start and stop time
		df_epoch_60_vmu = df_epoch_60_vmu.loc[start_time:stop_time]
		# create dataframe of true non wear time
		df_true_non_wear_time = pd.DataFrame(true_non_wear_time, index = actigraph_time, columns = ['TRUE NON WEAR TIME'])
		# create dataframe of hecht 3-axes non wear time
		df_hecht_3_non_wear_time = pd.DataFrame(hecht_3_non_wear_time[:,1], index = np.asarray(hecht_3_non_wear_time[:,0], dtype ='datetime64[ns]'), columns = ['HECHT-3 NON WEAR TIME'])
		# create dataframe of troiano non wear time
		df_troiano_non_wear_time = pd.DataFrame(troiano_non_wear_time[:,1], index = np.asarray(troiano_non_wear_time[:,0], dtype = 'datetime64[ns]'), columns = ['TROIANO NON WEAR TIME'])
		# create dataframe of choi non wear time
		df_choi_non_wear_time = pd.DataFrame(choi_non_wear_time[:,1], index = np.asarray(choi_non_wear_time[:,0], dtype = 'datetime64[ns]'), columns = ['CHOI NON WEAR TIME'])
		# create dataframe of hees non wear time
		df_hees_non_wear_time = pd.DataFrame(hees_non_wear_time, index = actigraph_time, columns = ['HEES NON WEAR TIME'])
		
		# merge all dataframes
		df_joined = df_actigraph_acc.join(df_true_non_wear_time, how='outer').join(df_actiwave_acc, how='outer').join(df_hecht_3_non_wear_time, how='outer') \
					.join(df_troiano_non_wear_time, how='outer').join(df_choi_non_wear_time, how='outer').join(df_hees_non_wear_time, how='outer').join(df_epoch_60_vmu, how='outer').join(df_actiwave_hr, how='outer')

		# call plot function
		plot_non_wear_algorithms(data = df_joined, subject = subject, plot_folder = plot_folder)
コード例 #6
0
def process_choi_2011(subject, save_hdf5, idx = 1, total = 1, epoch_dataset = 'epoch10'):
	"""
	Estimate non-wear time based on Choi 2011 paper:

	Med Sci Sports Exerc. 2011 Feb;43(2):357-64. doi: 10.1249/MSS.0b013e3181ed61a3.
	Validation of accelerometer wear and nonwear time classification algorithm.
	Choi L1, Liu Z, Matthews CE, Buchowski MS.

	Parameters
	---------
	subject : string
		subject ID
	save_hdf5 : os.path
		location of HDF5 file to save non wear data to
	idx : int (optional)
		index of counter, only useful when processing large batches and you want to monitor the status
	total: int (optional)
		total number of subjects to process, only useful when processing large batches and you want to monitor the status
	epoch_dataset : string (optional)
		name of dataset within an HDF5 group that contains the 10sec epoch data
	"""

	logging.info('{style} Processing subject: {} {}/{} {style}'.format(subject, idx, total, style = '='*10))

	"""
		ACTIGRAPH DATA
	"""

	# read actigraph acceleration time
	_, _, actigraph_time = get_actigraph_acc_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)

	# get start and stop time
	start_time, stop_time = actigraph_time[0], actigraph_time[-1]


	"""
		EPOCH DATA
	"""
	if epoch_dataset in get_datasets_from_group(group_name = subject, hdf5_file = ACTIGRAPH_HDF5_FILE):

		# get actigraph 10s epoch data
		epoch_data, _ , epoch_time_data = get_actigraph_epoch_data(subject, epoch_dataset = epoch_dataset, hdf5_file = ACTIGRAPH_HDF5_FILE)

		# convert to 60s epoch data	
		epoch_60_data, epoch_60_time_data = get_actigraph_epoch_60_data(epoch_data, epoch_time_data)

		# obtain counts values
		epoch_60_count_data = epoch_60_data[:,:3]

		"""
			GET NON WEAR VECTOR
		"""

		# create dataframe of actigraph acceleration 
		df_epoch_60_count = pd.DataFrame(epoch_60_count_data, index = epoch_60_time_data, columns = ['X - COUNT', 'Y - COUNT', 'Z - COUNT']).loc[start_time:stop_time]

		# retrieve non-wear vector
		epoch_60_count_non_wear_vector = choi_2011_calculate_non_wear_time(data = df_epoch_60_count.values, time = df_epoch_60_count.index.values)

		# get the croped time array as int64 (cropped because we selected the previous dataframe to be between start and stop slice)
		epoch_60_time_data_cropped = np.array(df_epoch_60_count.index).astype('int64')
		
		# reshape
		epoch_60_time_data_cropped = epoch_60_time_data_cropped.reshape(len(epoch_60_time_data_cropped), 1)

		# add two arrays
		combined_data = np.hstack((epoch_60_time_data_cropped, epoch_60_count_non_wear_vector))

		
		"""
			SAVE TO HDF5 FILE
		"""
		
		save_data_to_group_hdf5(group = subject, data = combined_data, data_name = 'choi_2011_non_wear_data', overwrite = True, create_group_if_not_exists = False, hdf5_file = save_hdf5)

	else:
		logging.warning('Subject {} has no corresponding epoch data, skipping...'.format(subject))
コード例 #7
0
def process_troiano_2007(subject, save_hdf5, idx = 1, total = 1, epoch_dataset = 'epoch10'):
	"""
	Calculate non wear time by using Troiano 2007 algorithm

	Troiano 2007 non-wear algorithm
		detects non wear time from 60s epoch counts
		Nonwear was defined by an interval of at least 60 consecutive minutes of zero activity intensity counts, with allowance for 1–2 min of counts between 0 and 100
	Paper:
		Physical Activity in the United States Measured by Accelerometer
	DOI:
		10.1249/mss.0b013e31815a51b3

	Parameters
	---------
	subject : string
		subject ID
	save_hdf5 : os.path
		location of HDF5 file to save non wear data to
	idx : int (optional)
		index of counter, only useful when processing large batches and you want to monitor the status
	total: int (optional)
		total number of subjects to process, only useful when processing large batches and you want to monitor the status
	epoch_dataset : string (optional)
		name of dataset within an HDF5 group that contains the 10sec epoch data
	"""

	logging.info('{style} Processing subject: {} {}/{} {style}'.format(subject, idx, total, style = '='*10))

	"""
		ACTIGRAPH DATA
	"""

	# read actigraph acceleration time
	_, _, actigraph_time = get_actigraph_acc_data(subject, hdf5_file = ACTIWAVE_ACTIGRAPH_MAPPING_HDF5_FILE)

	# get start and stop time
	start_time, stop_time = actigraph_time[0], actigraph_time[-1]


	"""
		EPOCH DATA
	"""
	if epoch_dataset in get_datasets_from_group(group_name = subject, hdf5_file = ACTIGRAPH_HDF5_FILE):

		# get actigraph 10s epoch data
		epoch_data, _ , epoch_time_data = get_actigraph_epoch_data(subject, epoch_dataset = epoch_dataset, hdf5_file = ACTIGRAPH_HDF5_FILE)

		# convert to 60s epoch data	
		epoch_60_data, epoch_60_time_data = get_actigraph_epoch_60_data(epoch_data, epoch_time_data)

		# obtain counts values
		epoch_60_count_data = epoch_60_data[:,:3]

		"""
			GET NON WEAR VECTOR
		"""

		# create dataframe of actigraph acceleration 
		df_epoch_60_count = pd.DataFrame(epoch_60_count_data, index = epoch_60_time_data, columns = ['X - COUNT', 'Y - COUNT', 'Z - COUNT']).loc[start_time:stop_time]

		# retrieve non-wear vector
		epoch_60_count_non_wear_vector = troiano_2007_calculate_non_wear_time(data = df_epoch_60_count.values, time = df_epoch_60_count.index.values)

		# get the croped time array as int64 (cropped because we selected the previous dataframe to be between start and stop slice)
		epoch_60_time_data_cropped = np.array(df_epoch_60_count.index).astype('int64')
		# reshape
		epoch_60_time_data_cropped = epoch_60_time_data_cropped.reshape(len(epoch_60_time_data_cropped), 1)

		# add two arrays
		combined_data = np.hstack((epoch_60_time_data_cropped, epoch_60_count_non_wear_vector))

		
		"""
			SAVE TO HDF5 FILE
		"""
	
		save_data_to_group_hdf5(group = subject, data = combined_data, data_name = 'troiano_2007_non_wear_data', overwrite = True, create_group_if_not_exists = True, hdf5_file = save_hdf5)

	else:
		logging.warning('Subject {} has no corresponding epoch data, skipping...'.format(subject))
コード例 #8
0
def perform_calculate_tissue_distributions(paths, params):

	# hdf5 file that contains the original images
	hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])
	
	# get all patient names from original MRI group
	patients = get_datasets_from_group(group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)

	# empty pandas dataframe to hold all data
	data = pd.DataFrame()

	# loop over each patient, read data, perform inference
	for i, patient in enumerate(patients):

		logging.info(f'Processing patient: {patient} {i + 1}/{len(patients)}')

		# parse out treatment, sample, and state from patient name
		treatment, sample, state = parse_patientname(patient_name = patient)

		# read images
		images = read_dataset_from_group(dataset = patient, group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)

		# handle connected tissue (for example, set connected damaged tissue to damaged tissue)
		images = process_connected_tissue(images = images, params = params)

		# reshape image to unroll pixels in last two dimensions. Go from (54, 256, 256) to (54, 65536)
		images = images.reshape(images.shape[0], -1)
		
		# count damaged tissue
		damaged_tissue = np.sum((images == 1), axis = 1)
		# count non_damaged tissue
		non_damaged_tissue = np.sum((images == 2), axis = 1)
		# relative damaged
		rel_damaged = damaged_tissue / (damaged_tissue + non_damaged_tissue) * 100
		# relative non-damaged
		rel_non_damaged = 100 - rel_damaged
		
		# process data for each slice
		for mri_slice in range(images.shape[0]):

			# check slice validity
			if check_mri_slice_validity(patient = patient, mri_slice = mri_slice, total_num_slices = images.shape[0]):

				# add data to dictionary
				mri_slice_data = {	'patient' : patient,
									'treatment' : treatment,
									'sample' : sample,
									'state' : state,
									'mri_slice' : mri_slice,
									'damaged_pixels' : damaged_tissue[mri_slice],
									'non_damaged_pixels' : non_damaged_tissue[mri_slice],
									'rel_damaged' : rel_damaged[mri_slice],
									'rel_non_damaged' : rel_non_damaged[mri_slice],
									}
				# create unique ID
				mri_slice_id = f'{treatment}_{sample}_{state}_{mri_slice}'
				
				# add to pandas dataframe
				data[mri_slice_id] = pd.Series(mri_slice_data)

	# transpose and save dataframe as CSV
	data.T.to_csv(os.path.join(paths['table_folder'], 'tissue_distributions.csv'))
def process_plot_mri_with_damaged(paths, params):
    """
	Plot original MRI on left and MRI image with damaged overlayed on the right
	"""

    # hdf5 file that contains the original images
    hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

    # get all patient names from original MRI group
    patients = get_datasets_from_group(group_name=params['group_original_mri'],
                                       hdf5_file=hdf5_file)

    # get list of patients without state
    patients = set(
        [re.search('(.*) (fersk|Tint)', x).group(1) for x in patients])

    # loop over each patient, read data, perform inference
    for i, patient in enumerate(patients):

        logging.info(f'Processing patient: {patient} {i + 1}/{len(patients)}')

        # parse out treatment, sample, and state from patient name
        treatment, _, _ = parse_patientname(patient_name=f'{patient} fersk')
        """
		Get fresh state
		"""
        # read original images
        fresh_original_images = read_dataset_from_group(
            dataset=f'{patient} fersk',
            group_name=params['group_original_mri'],
            hdf5_file=hdf5_file)
        # read reconstructed images
        fresh_reconstructed_images = read_dataset_from_group(
            dataset=f'{patient} fersk',
            group_name=params['group_segmented_classification_mri'],
            hdf5_file=hdf5_file)
        # only take damaged tissue and set connected tissue
        fresh_reconstructed_damaged_images = (process_connected_tissue(
            images=fresh_reconstructed_images.copy(), params=params) == 1)
        """
		Get frozen/thawed
		"""
        # read original images
        frozen_original_images = read_dataset_from_group(
            dataset=f'{patient} Tint',
            group_name=params['group_original_mri'],
            hdf5_file=hdf5_file)
        # read reconstructed images
        frozen_reconstructed_images = read_dataset_from_group(
            dataset=f'{patient} Tint',
            group_name=params['group_segmented_classification_mri'],
            hdf5_file=hdf5_file)
        # only take damaged tissue and set connected tissue
        frozen_reconstructed_damaged_images = (process_connected_tissue(
            images=frozen_reconstructed_images.copy(), params=params) == 1)

        # get total number of slices to process
        total_num_slices = fresh_original_images.shape[0]
        # loop over each slice
        for mri_slice in range(total_num_slices):

            # check slice validity of fresh patient
            if check_mri_slice_validity(patient=f'{patient} fersk',
                                        mri_slice=mri_slice,
                                        total_num_slices=total_num_slices):

                if check_mri_slice_validity(patient=f'{patient} Tint',
                                            mri_slice=mri_slice,
                                            total_num_slices=total_num_slices):

                    # setting up the plot environment
                    fig, axs = plt.subplots(2, 2, figsize=(8, 8))
                    axs = axs.ravel()

                    # define the colors we want
                    plot_colors = ['#250463', '#e34a33']
                    # create a custom listed colormap (so we can overwrite the colors of predefined cmaps)
                    cmap = colors.ListedColormap(plot_colors)
                    # subfigure label for example, a, b, c, d etc
                    sf = cycle(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
                    """
					Plot fresh state
					"""
                    # obtain vmax score so image grayscales are normalized better
                    vmax_percentile = 99.9
                    vmax = np.percentile(fresh_original_images[mri_slice],
                                         vmax_percentile)

                    # plot fresh original MRI image
                    axs[0].imshow(fresh_original_images[mri_slice],
                                  cmap='gray',
                                  vmin=0,
                                  vmax=vmax)
                    axs[0].set_title(
                        rf'$\bf({next(sf)})$ Fresh - Original MRI')

                    # plot fresh reconstucted image overlayed on top of the original image
                    # axs[1].imshow(fresh_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)
                    # im = axs[1].imshow(fresh_reconstructed_images[mri_slice],alpha = 0.7, interpolation = 'none')
                    # axs[1].set_title(rf'$\bf({next(sf)})$ Fresh - Reconstructed')

                    # plot fresh reconstucted image overlayed on top of the original image
                    axs[1].imshow(fresh_original_images[mri_slice],
                                  cmap='gray',
                                  vmin=0,
                                  vmax=vmax)
                    axs[1].imshow(
                        fresh_reconstructed_damaged_images[mri_slice],
                        cmap=cmap,
                        alpha=.5,
                        interpolation='none')
                    axs[1].set_title(
                        rf'$\bf({next(sf)})$ Fresh - Reconstructed')
                    """
					Plot frozen/thawed state
					"""
                    # plot frozen/thawed original MRI image
                    # obtain vmax score so image grayscales are normalized better
                    vmax = np.percentile(frozen_original_images[mri_slice],
                                         vmax_percentile)
                    axs[2].imshow(frozen_original_images[mri_slice],
                                  cmap='gray',
                                  vmin=0,
                                  vmax=vmax)
                    axs[2].set_title(
                        rf'$\bf({next(sf)})$ {treatment_to_title(treatment)} - Original MRI'
                    )

                    # plot frozen reconstucted all classes
                    # axs[4].imshow(frozen_original_images[mri_slice], cmap = 'gray', vmin = 0, vmax = vmax)
                    # im = axs[4].imshow(frozen_reconstructed_images[mri_slice], alpha = 0.7, interpolation = 'none')
                    # axs[4].set_title(rf'$\bf({next(sf)})$ {treatment_to_title(treatment)} - Reconstructed')

                    # # plot frozen/thawed reconstucted image overlayed on top of the original image
                    axs[3].imshow(frozen_original_images[mri_slice],
                                  cmap='gray',
                                  vmin=0,
                                  vmax=vmax)
                    axs[3].imshow(
                        frozen_reconstructed_damaged_images[mri_slice],
                        cmap=cmap,
                        alpha=.5,
                        interpolation='none')
                    axs[3].set_title(
                        rf'$\bf({next(sf)})$ {treatment_to_title(treatment)} - Reconstructed'
                    )
                    """
					Create custom legend
					"""
                    # add custom legend
                    class_labels = {0: 'background', 1: 'damaged tissue'}
                    class_values = list(class_labels.keys())
                    # create a patch
                    patches = [
                        mpatches.Patch(color=plot_colors[i],
                                       label=class_labels[i])
                        for i in range(len(class_values))
                    ]
                    axs[1].legend(
                        handles=patches
                    )  #, bbox_to_anchor=(1.05, 1), loc = 2, borderaxespad=0. )

                    # legend for fully reconstructed image
                    # get class labels
                    # class_labels = params['class_labels']
                    # # get class indexes from dictionary
                    # values = class_labels.keys()
                    # # get the colors of the values, according to the
                    # # colormap used by imshow
                    # plt_colors = [ im.cmap(im.norm(value)) for value in values]
                    # # create a patch (proxy artist) for every color
                    # patches = [ mpatches.Patch(color = plt_colors[i], label= class_labels[i]) for i in range(len(values)) ]
                    # # put those patched as legend-handles into the legend
                    # axs[1].legend(handles = patches)#, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
                    """
					Adjust figures
					"""
                    # remove axis of all subplots
                    [ax.axis('off') for ax in axs]
                    # define plot subfolder
                    subfolder = os.path.join(paths['paper_plot_folder'],
                                             'original_vs_reconstructed',
                                             patient)
                    # create subfolder
                    create_directory(subfolder)
                    # crop white space
                    fig.set_tight_layout(True)
                    # save the figure
                    fig.savefig(
                        os.path.join(subfolder, f'slice_{mri_slice}.pdf'))

                    # close the figure environment
                    plt.close()
コード例 #10
0
def plot_segmented_images(paths, params):
	"""
	Plot segmented images
	"""

	# create hdf5 file
	hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

	# get list of patient names to plot
	patients = get_datasets_from_group(group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)

	# plot each patient
	for i, patient in enumerate(patients):

		logging.info(f'Processing patient: {patient} {i}/{len(patients)}')

		# read segmented images
		images = read_dataset_from_group(dataset = patient, group_name = params['group_segmented_classification_mri'], hdf5_file = hdf5_file)

		# set up plotting environment
		fig, axs = plt.subplots(6,9, figsize = (20,20))		
		axs = axs.ravel()

		# loop over each slice and print
		for mri_slice in range(images.shape[0]):

			logging.debug(f'Processing slice: {mri_slice}') 

			# check slice validity
			if check_mri_slice_validity(patient = patient, mri_slice = mri_slice, total_num_slices = images.shape[0]):

				# plot image
				im = axs[mri_slice].imshow(images[mri_slice], vmin = 0, vmax = 5, interpolation='none')
			axs[mri_slice].set_title(f'{mri_slice}')

		# get class labels
		class_labels = params['class_labels']
		# get class indexes from dictionary
		values = class_labels.keys()
		# get the colors of the values, according to the 
		# colormap used by imshow
		colors = [ im.cmap(im.norm(value)) for value in values]
		# create a patch (proxy artist) for every color 
		patches = [ mpatches.Patch(color = colors[i], label= class_labels[i]) for i in range(len(values)) ]
		# put those patched as legend-handles into the legend
		plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
		
		# make adjustments to each subplot	
		for ax in axs:
			ax.axis('off')

		# create plotfolder subfolder
		plot_sub_folder = os.path.join(paths['plot_folder'], 'segmentation', params['cnn_model'])
		create_directory(plot_sub_folder)

		# crop white space
		fig.set_tight_layout(True)
		# save the figure
		fig.savefig(os.path.join(plot_sub_folder, f'{patient}.png'))

		# close the figure environment
		plt.close()
コード例 #11
0
def remove_bg(paths, params):
    """
	Remove background from MRI images

	Parameters
	--------------
	hdf5_file : os.path
		location of HDF5 that contains the raw MRI data, and where we want to save data to
	img_group_name : string
		name of HDF5 group that contains the raw MRI images
	save_group_name : string
		name of HDF5 group to store images with background removed
	"""

    # dynamically create hdf5 file
    hdf5_file = os.path.join(paths['hdf5_folder'], params['hdf5_file'])

    # read original MRI datasets from HDF5 file
    D = get_datasets_from_group(group_name=params['group_original_mri'],
                                hdf5_file=hdf5_file)

    # read data from each dataset and plot mri data
    for d_idx, d in enumerate(D):

        logging.info(f'Processing dataset : {d} {d_idx}/{len(D)}')

        # read data from group
        data = read_dataset_from_group(group_name=params['group_original_mri'],
                                       dataset=d,
                                       hdf5_file=hdf5_file)

        # read meta data
        meta_data = read_metadata_from_group_dataset(
            group_name=params['group_original_mri'],
            dataset=d,
            hdf5_file=hdf5_file)

        logging.info(f'Processing patient : {meta_data["PatientName"]}')

        # new numpy array to hold segmented data
        data_segmented = np.empty_like(data, dtype='int16')

        # process each slice
        for i in range(data.shape[0]):

            # ind_cycle = cycle(range(10))
            # fig, axs = plt.subplots(1,8, figsize = (20,5))
            # axs = axs.ravel()

            # original MRI
            img = data[i]
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Original MRI')

            # change grayscale
            img = change_img_contrast(img, phi=10, theta=1)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Changed gray scale')

            # convert to 8 bit
            if d not in ['Torsk 1-4 fersk']:
                img = np.array(img, dtype='uint8')
                # plt_index = next(ind_cycle)
                # axs[plt_index].imshow(img, cmap = 'gray')
                # axs[plt_index].set_title('Convert to 8 bit')

            # inverted colors
            # img = (255) - img
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Inverted MRI')

            # max filter
            img = ndimage.maximum_filter(img, size=7)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Max filter')

            # erosion
            img = cv2.erode(img, None, iterations=4)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Erosion')

            # gaussian filter
            img = cv2.GaussianBlur(img, (11, 11), 0)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('Gaussian Blur')

            # knn bg remove
            segmented_img = perform_knn_segmentation(n_clusters=2, img=img)
            img = mask_image(img=data[i],
                             segmented_img=segmented_img,
                             mask_value=segmented_img[0][0],
                             fill_value=0)
            # plt_index = next(ind_cycle)
            # axs[plt_index].imshow(img, cmap = 'gray')
            # axs[plt_index].set_title('KNN BG remove')

            # add masked image to data_segmented, where we store each slice
            data_segmented[i] = img

            # plt.show()

        # save data to HDF5
        save_data_to_group_hdf5(group=params['group_no_bg'],
                                data=data_segmented,
                                data_name=d,
                                hdf5_file=hdf5_file,
                                meta_data=meta_data,
                                overwrite=True)