def test(object_folder, model_path, filename_list, result_path, flags, igpu): checkpoint_dir = os.path.join(object_folder, 'checkpoint') mat_contents = matlab.load( os.path.join(checkpoint_dir, 'network_stats.mat')) mean_image = np.float32(mat_contents['mean_image']) variance_image = np.float32(mat_contents['variance_image']) ########################################################### os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = igpu ########################################################### with tf.Graph().as_default(), tf.device(flags['gpu']): keep_prob = tf.placeholder(tf.float32) # Place holder for patches images_test = tf.placeholder(tf.float32, shape=(np.hstack([ flags['test_batch_size'], flags['size_input_patch'] ]))) # Network with tf.variable_scope("network") as scope: logits_test, parameters = tf_model.inference( images_test, keep_prob, flags) # Saver and initialisation saver = tf.train.Saver() init = tf.initialize_all_variables() gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=flags['gpu_memory_fraction']) config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) with tf.Session(config=config) as sess: # Initialise and load variables sess.run(init) saver.restore(sess, model_path) result_dir = result_path routine.create_dir(result_dir) for iImage, file in enumerate(filename_list): start_time = time.time() file = file[0] basename = os.path.basename(file) basename = os.path.splitext(basename)[0] savename = os.path.join(result_dir, basename + '.png') if not os.path.exists(savename): result = batch_processing(file, sess, logits_test, parameters, images_test, keep_prob, mean_image, variance_image, flags) # matlab.save(savename,{'mask':result}) KSimage.imwrite(result, savename) duration = time.time() - start_time print( 'Finish segmenting DCIS regions on the H&E image of sample %d out of %d samples (%.2f sec)' % (iImage + 1, len(filename_list), duration))
def testWSI(object_folder, model_path, directory, flags): """ testWSI segments all of the WSIs in a given directory param: object_folder param: model_path param: directory param: gpu param: gpu_memory_fraction param: test_batch_size param: size_input_patch return: writes segmentation result to corresponding segmentation result directory """ checkpoint_dir = os.path.join(object_folder, 'checkpoint') mat_contents = matlab.load(os.path.join(checkpoint_dir, 'network_stats.mat')) mean_image = np.float32(mat_contents['mean_image']) variance_image = np.float32(mat_contents['variance_image']) startTime = time.time() with tf.Graph().as_default(), tf.device(flags['gpu']): keep_prob = tf.placeholder(tf.float32) # Place holder for patches images_test = tf.placeholder(tf.float32, shape=(np.hstack([flags['test_batch_size'], flags['size_input_patch']]))) # Network with tf.variable_scope("network") as scope: logits_test, parameters = tf_model.inference(images_test, keep_prob, flags) # Saver and initialisation saver = tf.train.Saver() init = tf.global_variables_initializer() gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=flags['gpu_memory_fraction']) config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) with tf.Session(config = config) as sess: # Initialise and load variables sess.run(init) saver.restore(sess, model_path) print("Current directory: " + str(directory)) result_dir = os.path.join(directory +'_epiStromalSeg') create_dir(result_dir) filename_list = glob.glob(os.path.join(directory, '*.png')) #the main statement, returns all the files in the directory print("Num Files: " + str(len(filename_list))) for file in filename_list: print(file) basename = os.path.basename(file) basename = os.path.splitext(basename)[0] savename = os.path.join(result_dir, basename + '.png') if not os.path.exists(savename) and not ('mask' in file or 'thumbnail' in file): result = batch_processing(file, sess, logits_test, parameters, images_test, keep_prob, mean_image, variance_image, flags) KSimage.imwrite(result, savename) print("Total Time: " + str(time.time() - startTime))
def gen_train_val_data(nth_fold, flags): """ gen_train_val_data generates training and validation data for training the network. It builds directories for train and test and extract patches according to the provided 'method', and it maintains a log file containing the contents of all the data splits param: nth_fold param method: sliding_window return: void """ ########## check whether 'cv' or 'perm' exists and which one to use ########## list_dir = os.listdir(os.path.join(flags['experiment_folder'])) if ('cv' + str(nth_fold) in list_dir) and ('perm' + str(nth_fold) in list_dir): raise ValueError('Dangerous! You have both cv and perm on the path.') elif 'cv' + str(nth_fold) in list_dir: object_folder = os.path.join(flags['experiment_folder'], 'cv' + str(nth_fold)) elif 'perm' + str(nth_fold) in list_dir: object_folder = os.path.join(flags['experiment_folder'], 'perm' + str(nth_fold)) else: raise ValueError('No cv or perm folder!') ########## create train and val paths ########## path_dict = dict() path_dict['train_folder'] = os.path.join(object_folder, 'train') path_dict['val_folder'] = os.path.join(object_folder, 'val') create_dir(path_dict['train_folder']) create_dir(path_dict['val_folder']) print("Gets to the beginning of an if statement") ########## extract patches and put in a designated directory ########## if flags['gen_train_val_method'] == 'sliding_window': key_list = ['image', 'groundtruth', 'weight'] for key in key_list: path_dict['train_' + key + '_folder'] = os.path.join( path_dict['train_folder'], key) create_dir(path_dict['train_' + key + '_folder']) path_dict['val_' + key + '_folder'] = os.path.join( path_dict['val_folder'], key) create_dir(path_dict['val_' + key + '_folder']) list_dict = dict() for key in key_list: list_dict['train_' + key + '_list'] = KScsv.read_csv( os.path.join(object_folder, 'train_' + key + '_list.csv')) list_dict['val_' + key + '_list'] = KScsv.read_csv( os.path.join(object_folder, 'val_' + key + '_list.csv')) ########## train ########## for key in ['train', 'val']: if not os.path.isfile( os.path.join(path_dict[key + '_folder'], key + '_log.csv')): log_data = list() for i_image in range(len(list_dict[key + '_image_list'])): tic = time.time() path_image = list_dict[key + '_image_list'][i_image][0] path_groundtruth = list_dict[ key + '_groundtruth_list'][i_image][0] path_weight = list_dict[key + '_weight_list'][i_image][0] #Resize image, groundtruth, and weight from 10x input size to 2.5x (level at which network operates) image = KSimage.imread(path_image) image = KSimage.imresize(image, 0.25) groundtruth = KSimage.imread(path_groundtruth) groundtruth = KSimage.imresize(groundtruth, 0.25) weight = KSimage.imread(path_weight) weight = KSimage.imresize(weight, 0.25) #make sure that groundtruth images have depth = 1 if (len(groundtruth.shape) > 2 and groundtruth.shape[2] > 1): groundtruth = groundtruth[:, :, 1] groundtruth[ groundtruth == 3] = 2 #remove all intra-stromal epithelium labels and set them simply to stroma groundtruth[ groundtruth == 4] = 3 #fat label was originally 4 but is now changed to 3 dict_obj = { 'image': image, 'groundtruth': groundtruth, 'weight': weight } extractor = extract_patches.sliding_window( dict_obj, flags['size_input_patch'], flags['size_output_patch'], flags['stride']) for j, (out_obj_dict, coord_dict) in enumerate(extractor): images = out_obj_dict['image'] groundtruths = out_obj_dict['groundtruth'] weights = out_obj_dict['weight'] coord_images = coord_dict['image'] ############################################################# basename = os.path.basename(path_image) basename = os.path.splitext(basename)[0] image_name = os.path.join( path_dict[key + '_image_folder'], basename + '_idx' + str(j) + '_row' + str(coord_images[0]) + '_col' + str(coord_images[1]) + flags['image_ext']) label_name = os.path.join( path_dict[key + '_groundtruth_folder'], basename + '_idx' + str(j) + '_row' + str(coord_images[0]) + '_col' + str(coord_images[1]) + flags['groundtruth_ext']) weight_name = os.path.join( path_dict[key + '_weight_folder'], basename + '_idx' + str(j) + '_row' + str(coord_images[0]) + '_col' + str(coord_images[1]) + flags['weight_ext']) if not os.path.isfile(image_name): KSimage.imwrite(images, image_name) if not os.path.isfile(label_name): KSimage.imwrite(groundtruths, label_name) if not os.path.isfile(weight_name): KSimage.imwrite(weights, weight_name) log_data.append((image_name, label_name, weight_name)) print('finish processing %d image from %d images : %.2f' % (i_image + 1, len(list_dict[key + '_image_list']), time.time() - tic)) KScsv.write_csv( log_data, os.path.join(path_dict[key + '_folder'], key + '_log.csv')) #################################################################################################################### else: print( "ONLY SLIDING WINDOW TRAINING IS SUPPORTED!!!! Training terminated." ) return
def test(object_folder, model_path, filename_list, flags): """ test uses either whole image segmentation or patch based segmentation to segment an entire directory of test images param: object_folder param: model_path param: filename_list param: gpu param: gpu_memory_fraction return: writes segmentation result to appropriate image file """ checkpoint_dir = os.path.join(object_folder, 'checkpoint') mat_contents = matlab.load( os.path.join(checkpoint_dir, 'network_stats.mat')) mean_image = np.float32(mat_contents['mean_image']) variance_image = np.float32(mat_contents['variance_image']) startTime = time.time() with tf.Graph().as_default(), tf.device(flags['gpu']): keep_prob = tf.placeholder(tf.float32) # Place holder for patches images_test = tf.placeholder(tf.float32, shape=(np.hstack([ flags['test_batch_size'], flags['size_input_patch'] ]))) # Network with tf.variable_scope("network") as scope: logits_test, parameters = tf_model.inference( images_test, keep_prob, flags) # Saver and initialisation saver = tf.train.Saver() init = tf.initialize_all_variables() gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=flags['gpu_memory_fraction']) config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) with tf.Session(config=config) as sess: # Initialise and load variables sess.run(init) saver.restore(sess, model_path) result_dir = os.path.join(object_folder, 'result') routine.create_dir(result_dir) print("FILENAME LIST", filename_list) for iImage, file in enumerate(filename_list): file = file[0] basename = os.path.basename(file) basename = os.path.splitext(basename)[0] savename = os.path.join(result_dir, basename + '.png') if not os.path.exists(savename): print('processing image %d/%d' % (iImage + 1, len(filename_list))) result = batch_processing(file, sess, logits_test, parameters, images_test, keep_prob, mean_image, variance_image, flags) KSimage.imwrite(result, savename) print("Total Time: " + str(time.time() - startTime))
he_dcis_segmentation_result_path = os.path.join('Result_tumour') dict_path = {'he': he_dir} dict_ext = {'he': '.tiff'} gpu_list = ['0'] ####################################################################### # generate mask mask_path = 'Mask' routine.create_dir(mask_path) files = glob.glob(os.path.join(dict_path['he'], '*' + dict_ext['he'])) for file in files: basename = os.path.basename(file) basename = os.path.splitext(basename)[0] savename = os.path.join(mask_path, basename + '.png') I = KSimage.imread(file) mask = 255 * np.ones(shape=(I.shape[0], I.shape[1]), dtype=np.uint8) KSimage.imwrite(mask, savename) ####################################################################### # he cell segmentation Modules.he_cell_segmentation(he_dir, dict_ext, mask_path, he_cell_segmentation_result_path, gpu_list) # he tumour_seg Modules.he_dcis_segmentation(he_dir, dict_ext, he_dcis_segmentation_result_path, gpu_list)
# Perform Color Deconvolution of Source Image to get stain concentration matrix C, M_source = deconvolve(source, M_source, I0_source) # Vectorize to Nx3 matrix C = C.reshape(-1, 3) # Find the 99th percentile of stain concentration(for each channel) max_C_source = np.percentile(a=C, q=99, axis=0) # main normalisation C = C / max_C_source C = C * max_C_target # Reconstruct the RGB image norm = I0_source * np.exp(np.matmul(C, -M_target)) - 1 norm = norm.reshape(h, w, 3) norm = norm.clip(0, I0_source).astype(source.dtype) return norm ##################################################################### target = KSimage.imread('1_421_1_2_7_999_11.jpg') source = KSimage.imread('b001.tif') norm = stain_normalisation_macenko(source, target) KSimage.imwrite(norm, 'result.tiff') print('done')
def test(object_folder, model_path, filename_list, flags): """ test uses either whole image segmentation or patch based segmentation to segment an entire directory of test images param: object_folder param: model_path param: filename_list param: gpu param: gpu_memory_fraction return: writes segmentation result to appropriate image file """ checkpoint_dir = os.path.join(object_folder, 'checkpoint') mat_contents = matlab.load(os.path.join(checkpoint_dir, 'network_stats.mat')) mean_image = np.float32(mat_contents['mean_image']) variance_image = np.float32(mat_contents['variance_image']) # turns 256 x 256 x 3 into 1 x 1 x 3 mean_image_new = np.array([mean_image[:, :, 0].mean(), mean_image[:, :, 1].mean(), mean_image[:, :, 2].mean()]) variance_image_new = np.array([variance_image[:, :, 0].mean(), variance_image[:, :, 1].mean(), variance_image[:, :, 2].mean()]) with tf.Graph().as_default(), tf.device(flags['gpu']): keep_prob = tf.placeholder(tf.float32) # Place holder for patches images_test = tf.placeholder(tf.float32) # Network with tf.variable_scope("network") as scope: logits_test, parameters = tf_model.inference(images_test, keep_prob, flags) # Saver and initialisation saver = tf.train.Saver() init = tf.global_variables_initializer() gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=flags['gpu_memory_fraction']) config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) with tf.Session(config=config) as sess: # Initialise and load variables sess.run(init) saver.restore(sess, model_path) result_dir = os.path.join(object_folder, 'result') create_dir(result_dir) start_time = time.time() for iImage, file in enumerate(filename_list): file = file[0] basename = os.path.basename(file) basename = os.path.splitext(basename)[0] savename = os.path.join(result_dir, basename + '.png') print('processing image %d/%d' % (iImage + 1, len(filename_list))) print("FILE!!!!!!!!!!!" + str(file)) if (flags['use_patches'] == False): result = whole_image_processing(file, sess, logits_test, parameters, images_test, keep_prob, mean_image_new, variance_image_new, flags) else: result = batch_processing(file, sess, logits_test, parameters, images_test, keep_prob mean_image, variance_image, flags) print("Image processed") KSimage.imwrite(result, savename) #Write result back to image once segmentation is fixed print("TOTAL DURATION : " + str(time.time() - start_time))