def visualize_samples(session, experiment_path, patches_imgs_samples, patches_gts_samples, patch_size): patches_imgs_samples = (patches_imgs_samples[0:20] + 3) * 255. / 6. patches_gts_samples = tf.cast(patches_gts_samples[0:20, 1] * 255., tf.float32) patches_gts_samples = tf.reshape(patches_gts_samples, (20, 1, patch_size[0], patch_size[1])) imgs_samples = session.run( tf.concat([patches_imgs_samples, patches_gts_samples], 0)) visualize(group_images(imgs_samples, 5), experiment_path + '/' + "sample_input")
def get_data_training(train_imgs_original, train_groundTruth, patch_height, patch_width, N_subimgs, inside_FOV, patches): # Load train images from hdf5 files from pre-processing train_imgs_original = load_hdf5(train_imgs_original) train_groundTruth = load_hdf5(train_groundTruth) #masks always the same # Normalize images train_imgs = my_PreProc(train_imgs_original) train_groundTruth = train_groundTruth / 255. visualize(group_images(train_imgs[100:120, :, :, :], 5), 'imgs_train') #.show() #check original imgs train # shuffle indices to shuffle data idx = np.random.permutation(train_imgs.shape[0]) train_imgs = train_imgs[idx] train_groundTruth = train_groundTruth[idx] visualize(group_images(train_imgs[100:120, :, :, :], 5), 'imgs_train_random') visualize(group_images(train_groundTruth[100:120, :, :, :], 5), 'gTruths_train_random') #train_imgs = train_imgs[:,:,9:574,:] #cut bottom and top so now it is 565*565 #train_groundTruth = train_groundTruth[:,:,9:574,:] #cut bottom and top so now it is 565*565 # data_consistency_check(train_imgs,train_groundTruth) # Check masks are within 0-1 assert (np.min(train_groundTruth) == 0 and np.max(train_groundTruth) == 1) print "train images shape:" + str(train_imgs.shape) print "train images range (min-max): " + str( np.min(train_imgs)) + ' - ' + str(np.max(train_imgs)) print "train ground truths shape:" + str(train_groundTruth.shape) print "train ground truths range (min-max): " + str( np.min(train_groundTruth)) + ' - ' + str(np.max(train_groundTruth)) if patches == True: # Extract the TRAINING patches from the full images patches_imgs_train, patches_groundTruths_train = extract_random( train_imgs, train_groundTruth, patch_height, patch_width, N_subimgs, inside_FOV) data_consistency_check(patches_imgs_train, patches_groundTruths_train) print "train PATCHES images shape: " + str(patches_imgs_train.shape) print "train PATCHES images range (min-max): " + str( np.min(patches_imgs_train)) + ' - ' + str( np.max(patches_imgs_train)) print "train PATCHES ground truths shape: " + str( patches_groundTruths_train.shape) print "train PATCHES ground truths range (min-max): " + str( np.min(patches_groundTruths_train)) + ' - ' + str( np.max(patches_groundTruths_train)) # visualize(group_images(patches_imgs_train[100:120,:,:,:],5),'imgs_train_patches') # visualize(group_images(patches_groundTruths_train[100:120,:,:,:],5),'gTruth_train_patches') return patches_imgs_train, patches_groundTruths_train else: return train_imgs, train_groundTruth
def get_data_training(train_imgs_original, train_groudTruth, patch_height, patch_width, num_subimgs, label_mapping, inside_FOV, save_path): if os.path.isdir(save_path) == False: os.mkdir(save_path) else: print('already exist the folder in this path : {}'.format(save_path)) print('number of subimages : ', num_subimgs) train_imgs_original = load_hdf5(train_imgs_original) #Pillow (RGB format) train_masks = load_hdf5(train_groudTruth) print('[DEBUG] shape of train_imgs_original : ', np.shape(train_imgs_original)) print('[DEBUG] shape of train_imgs_label : ', np.shape(train_masks)) visualize(group_images(train_imgs_original[:, :, :, :], 5), './' + save_path + '/imgs_train') #check original imgs train visualize(group_images(train_masks[:, :, :, :], 5), './' + save_path + '/imgs_labels') train_imgs = my_preprocessing(train_imgs_original) visualize(group_images(train_imgs[:, :, :, :], 5), './' + save_path + '/preprocessed') print('\n\n[get_data_training] preprocessed image shape : ', train_imgs.shape) print('\n[get_data_training] preprocessed mask shape : ', train_masks.shape) print('mask maximum val : ', np.max(train_masks)) #train_masks = train_masks/255. ''' train_masks mapper 5classes (RGB format) example) class01 : 0, class02 : 1, class03 : 2 ''' # convert images shape to square, input = 565 * 565 print('[get_data_training] preprocessed2 image shape : ', train_imgs.shape) #zero padding train_imgs, train_masks = zero_padding(train_imgs, train_masks, patch_height) ''' print(np.shape(train_masks)) temp_path = './data/inha_oct_20_01_09/pad_label/' temp = train_masks temp = np.transpose(temp,(0,2,3,1)) for i in range(40): temp2 = Image.fromarray(temp[i].astype('uint8')) temp2.save(temp_path+str(i)+'.png') ''' print('\n\n[get_data_training] train images/masks shape : {}'.format( train_imgs.shape)) print('[get_data_training] train images range (min-max) [{} , {}] '.format( str(np.min(train_imgs)), str(np.max(train_imgs)))) print('[get_data_training] train masks are within 0-1\n') patches_imgs_train, patches_masks_train, class_freq_tabel = extract_random( train_imgs, train_masks, patch_height, patch_width, num_subimgs, label_mapping, inside_FOV) print('[After patch] mask shape : ', patches_masks_train.shape) patches_imgs_train, patches_masks_train = augmentations( patches_imgs_train, patches_masks_train, 20, 0.3) #visualize(group_images(patches_masks_train[:,:,:,:],5),'./'+save_path+'/imgs_labels_aug') mode = None if mode == 'dynamic': #extract the TRAINING patches from the full images #extraact the random patches for data augmentation patches_imgs_train, patches_masks_train = extract_dynamic_random( train_imgs, train_masks, patch_height, patch_width, num_subimgs, inside_FOV) temp3_totimg = visualize( group_images(patches_imgs_train[0:50, :, :, :], 5), './' + save_path + '/train_patch_img') patches_imgs_train = augmentations(patches_imgs_train, 0.2) print('[augmentation] patches_imgs_train : {}'.format( patches_imgs_train.shape)) temp3_totimg = visualize( group_images(patches_imgs_train[0:50, :, :, :], 5), './' + save_path + '/augmented_patch_img') data_consistency_check(patches_imgs_train, patches_masks_train) return patches_imgs_train, patches_masks_train print( '\n\n[get_data_training] train PATCHES images/masks shape : {}'.format( patches_imgs_train.shape)) print('[get_data_training] train PATCHES images range (min-max): ' + str(np.min(patches_imgs_train)) + ' - ' + str(np.max(patches_imgs_train))) print('[get_data_training] patches_imgs_train : {}'.format( patches_imgs_train.shape)) temp3_totimg = visualize( group_images(patches_imgs_train[0:50, :, :, :], 5), './' + save_path + '/train_patch_img') #data_consistency_check(patches_imgs_train, patches_masks_train) return patches_imgs_train, patches_masks_train, class_freq_tabel
def predict_and_save(list_input_images, model): print("-------------------------------------") print("Found " + str(len(list_input_images)) + " images in directory " + PATH_INPUT + "\n") print( "It should take minutes per image, depending on your CPU capacity. You can go out and come back later ;)\n" ) for input_image in list_input_images: start_time_image = time.time() print("Working on " + input_image) img_test = get_image(PATH_INPUT + input_image) ### get image sizes # test_imgs_orig = img_test full_img_height = img_test.shape[2] full_img_width = img_test.shape[3] print("Size: " + str(full_img_height) + "x" + str(full_img_width)) ### Images to patches: patches_imgs_test = None new_height = None new_width = None patches_imgs_test, new_height, new_width = get_data( imgs_test=img_test, patch_height=patch_height, patch_width=patch_width, stride_height=stride_height, stride_width=stride_width) ### Calculate the predictions print("Computing output...") predictions = model.predict(patches_imgs_test, batch_size=32, verbose=2) # print ( "predicted images size :") # print ( predictions.shape) ### Patches back to image pred_patches = pred_to_imgs(predictions, patch_height, patch_width, "original") pred_imgs = None pred_imgs = recompone_overlap(pred_patches, new_height, new_width, stride_height, stride_width) pred_imgs = pred_imgs[:, :, 0:full_img_height, 0:full_img_width] assert (pred_imgs.shape[0] == 1) # N_predicted = pred_imgs.shape[0] # group = 1 # Save predictions to files # for i in range(int(N_predicted)): pred_stripe = group_images(pred_imgs[:, :, :, :], 1) # file_name = input_image visualize(pred_stripe, "output/" + input_image[0:len(input_image) - 4] + "_pred", mode="jpg") elapsed_time_image = time.time() - start_time_image print("Time consumed: " + str(int(elapsed_time_image)) + "s\n")
def temp_get_data_training(train_imgs_original, train_groudTruth, patch_height, patch_width, num_subimgs, label_mapping, inside_FOV, save_path): if os.path.isdir(save_path) == False: os.mkdir(save_path) else: print('already exist the folder in this path : {}'.format(save_path)) print('number of subimages : ', num_subimgs) train_imgs_original = load_hdf5(train_imgs_original) #Pillow (RGB format) train_masks = load_hdf5(train_groudTruth) #print('[DEBUG] shape of train_imgs_original : ',np.shape(train_imgs_original)) #print('[DEBUG] shape of train_imgs_label : ',np.shape(train_masks)) ''' ori_num, ori_ch, ori_h, ori_w = temp_train_imgs_original.shape label_num, label_ch, label_h, label_w = temp_train_imgs_original.shape train_imgs_original = np.zeros((ori_num -5, ori_ch, ori_h, ori_w)) train_masks = np.zeros((label_num-5, label_ch, label_h, label_w)) for i in range(np.shape(train_imgs_original)[0]): if i != 6: train_imgs_original[i] = temp_train_imgs_original[i] train_masks[i] = temp_train_masks[i] elif i > 40: pass elif i == 6: pass''' print('[DEBUG] shape of train_imgs_original : ', np.shape(train_imgs_original)) print('[DEBUG] shape of train_imgs_label : ', np.shape(train_masks)) visualize(group_images(train_imgs_original[:, :, :, :], 5), './' + save_path + '/imgs_train') #check original imgs train visualize(group_images(train_masks[:, :, :, :], 5), './' + save_path + '/imgs_labels') train_imgs = my_preprocessing(train_imgs_original) train_imgs = train_imgs.astype(np.uint8) visualize(group_images(train_imgs[:, :, :, :], 5), './' + save_path + '/preprocessed') print('\n\n[get_data_training] preprocessed image shape : ', train_imgs.shape) print('\n[get_data_training] preprocessed mask shape : ', train_masks.shape) print('mask maximum val : ', np.max(train_masks)) #train_masks = train_masks/255. ''' train_masks mapper 5classes (RGB format) example) class01 : 0, class02 : 1, class03 : 2 ''' # convert images shape to square, input = 565 * 565 print('[get_data_training] preprocessed2 image shape : ', train_imgs.shape) #zero padding train_imgs, train_masks = zero_padding(train_imgs, train_masks, patch_height) print('\n\n[get_data_training] train images/masks shape : {}'.format( train_imgs.shape)) print('[get_data_training] train images range (min-max) [{} , {}] '.format( str(np.min(train_imgs)), str(np.max(train_imgs)))) print('[get_data_training] train masks are within 0-1\n') #patches_imgs_train, patches_masks_train,class_freq_tabel = extract_random(train_imgs,train_masks,patch_height,patch_width,num_subimgs,label_mapping,inside_FOV) patches_imgs_train, patches_masks_train, class_freq_tabel = extract_oversampling( train_imgs, train_masks, patch_height, patch_width, num_subimgs, label_mapping, inside_FOV) print('[After patch] mask shape : ', patches_masks_train.shape) #patches_imgs_train,patches_masks_train = augmentations(patches_imgs_train,patches_masks_train,20,0.5) #visualize(group_images(patches_masks_train[:,:,:,:],5),'./'+save_path+'/imgs_labels_aug') print( '\n\n[get_data_training] train PATCHES images/masks shape : {}'.format( patches_imgs_train.shape)) print('[get_data_training] train PATCHES images range (min-max): ' + str(np.min(patches_imgs_train)) + ' - ' + str(np.max(patches_imgs_train))) print('[get_data_training] patches_imgs_train : {}'.format( patches_imgs_train.shape)) visualize(group_images(patches_imgs_train[0:50, :, :, :], 5), './' + save_path + '/train_patch_img') visualize(group_images(patches_masks_train[::50, :, :, :] * 255, 5), './' + save_path + '/imgs_after_labels') #data_consistency_check(patches_imgs_train, patches_masks_train) return patches_imgs_train, patches_masks_train, class_freq_tabel