def Lung_Seg(dicom_dir): time1 = time.time() original_img = read_dicoms(dicom_dir) net = Network() final_img = net.test(dicom_dir) del net gc.collect() img_spacing = final_img.GetSpacing() time2 = time.time() print "time cost for lung sement: ",str(time2-time1),'s' time3 = time.time() final_img, growed_mask = post_process(final_img,dicom_dir) growed_mask.SetSpacing(img_spacing) print "Writing lung mask" # ST.WriteImage(growed_mask, './output/lung_mask.vtk') time4 = time.time() print "time cost for lung post_process: ",str(time4-time3),'s' final_array = ST.GetArrayFromImage(growed_mask) img_array = ST.GetArrayFromImage(original_img) lung_array = final_array*img_array # lung_array = lung_array + np.min(lung_array)*2*np.int8(lung_array==0) lung_img = ST.GetImageFromArray(lung_array) lung_img.SetSpacing(img_spacing) print "Writing lung image" # ST.WriteImage(lung_img,'./output/lung_img.vtk') return lung_img
def __init__(self,data,block_shape,type): if 'dicom_data' in type: self.img = read_dicoms(data) elif 'vtk_data' in type: self.img = data self.space = self.img.GetSpacing() self.image_array = ST.GetArrayFromImage(self.img) self.image_array = np.transpose(self.image_array,[2,1,0]) self.image_shape = np.shape(self.image_array) # if "airway" in type: # self.image_array = np.float32(self.image_array <= 0) * self.image_array print np.min(self.image_array) print np.max(self.image_array) self.block_shape=block_shape self.steps = list() for i in range(len(block_shape)): self.steps.append(block_shape[i]) # print self.steps # print self.block_shape self.steps[2] = self.steps[2] / 2 self.steps[0] = self.steps[0] / 2 self.steps[1] = self.steps[1] / 2 self.blocks=dict() self.results=dict() print self.steps print self.block_shape print "maximum value of original data : ",np.max(self.image_array) print "minimum value of original data : ",np.min(self.image_array)
def post_process(img,dicom_dir): # print img.GetSize() original_img = read_dicoms(dicom_dir) img_array = np.transpose(ST.GetArrayFromImage(img),[2,1,0]) img_shape = np.shape(img_array) # Get outer mask to ensure outer noise get excluded original_array = ST.GetArrayFromImage(original_img) min_val = np.min(original_array) outer_seeds = [] inner_step = 2 outer_seeds.append([inner_step, inner_step, img_shape[2] - inner_step]) outer_seeds.append([inner_step, img_shape[1] - inner_step, inner_step]) outer_seeds.append([img_shape[0] - inner_step, inner_step, inner_step]) outer_seeds.append([inner_step, img_shape[1] - inner_step, img_shape[2] - inner_step]) outer_seeds.append([img_shape[0] - inner_step, inner_step, img_shape[2] - inner_step]) outer_seeds.append([img_shape[0] - inner_step, img_shape[1] - inner_step, inner_step]) outer_seeds.append([img_shape[0] - inner_step, img_shape[1] - inner_step, img_shape[2] - inner_step]) outer_space = ST.NeighborhoodConnected(original_img, outer_seeds, min_val * 1.0, -200, [1, 1, 0], 1.0) # ST.WriteImage(outer_space , './outer_space.vtk') outer_array = ST.GetArrayFromImage(outer_space) outer_array = np.transpose(outer_array, [2, 1, 0]) # Take out outer noise inner_array = np.float32((img_array - outer_array) > 0) inner_img = ST.GetImageFromArray(np.transpose(inner_array,[2,1,0])) # ST.WriteImage(inner_img,'./inner_mask.vtk') median_filter = ST.MedianImageFilter() median_filter.SetRadius(1) midian_img = median_filter.Execute(inner_img) midian_array = ST.GetArrayFromImage(midian_img) midian_array = np.transpose(midian_array,[2,1,0]) array_shape = np.shape(midian_array) seed = [0,0,0] max = 0 for i in range(array_shape[0]): temp_max = np.sum(midian_array[i,:,:]) if max < temp_max: max = temp_max seed[0]=i max = 0 for i in range(array_shape[1]): temp_max = np.sum(midian_array[:,i,:]) if max < temp_max: max = temp_max seed[1]=i max = 0 for i in range(array_shape[2]): temp_max = np.sum(midian_array[:,:,i]) if max < temp_max: max = temp_max seed[2]=i # print seed growed_img = ST.NeighborhoodConnected(img, [seed], 0.9,1, [1, 1, 1], 1.0) return img,growed_img
def __init__(self, data, block_shape, type): if type == 'dicom_data': self.img = read_dicoms(data) elif type == 'vtk_data': self.img = data self.space = self.img.GetSpacing() self.image_array = ST.GetArrayFromImage(self.img) self.image_array = np.transpose(self.image_array, [2, 1, 0]) self.image_shape = np.shape(self.image_array) self.block_shape = block_shape self.blocks = dict() self.results = dict()
def get_threshed_img(dicom_dir): img = read_dicoms(dicom_dir) space = img.GetSpacing() image_array = ST.GetArrayFromImage(img) ST.WriteImage(img, './original.vtk') # image_array = np.transpose(image_array,(2,1,0)) print np.shape(image_array) array_shape = np.shape(image_array) central = [(array_shape[2] - 1) / 2, (array_shape[1] - 1) / 2, (array_shape[0] - 1) / 2] print central pointslist = [] for i in range(3): for j in range(3): for k in range(3): if i != 0 or j != 0 or k != 0: pointslist.append( [central[0] + i, central[1] + j, central[2] + k]) pointslist.append( [central[0] + i, central[1] + j, central[2] - k]) pointslist.append( [central[0] + i, central[1] - j, central[2] + k]) pointslist.append( [central[0] + i, central[1] - j, central[2] - k]) pointslist.append( [central[0] - i, central[1] + j, central[2] + k]) pointslist.append( [central[0] - i, central[1] + j, central[2] - k]) pointslist.append( [central[0] - i, central[1] - j, central[2] + k]) pointslist.append( [central[0] - i, central[1] - j, central[2] - k]) threshed_mask = ST.NeighborhoodConnected(img, pointslist, -40, np.float64(np.max(image_array)), [1, 1, 1], 1.0) threshed_mask_array = ST.GetArrayFromImage(threshed_mask) threshed_array = image_array * threshed_mask_array # threshed_img = ST.GetImageFromArray(threshed_array) threshed_array = np.transpose(threshed_array, (2, 1, 0)) # threshed_array = np.float32(threshed_array) # threshed_img = ST.GetImageFromArray(threshed_array) # blured_img = ST.CurvatureAnisotropicDiffusion(threshed_img,0.0625,3,1,3) # blured_array = ST.GetArrayFromImage(blured_img) return threshed_array, space
def process_dicom(self, dicom_path): img = read_dicoms(dicom_path) array = np.transpose(ST.GetArrayFromImage(img), [2, 1, 0]) return array
def train(self, configure): data = tools.Data(configure, epoch_walked) best_acc = 0 # X = tf.placeholder(shape=[batch_size, input_shape[0], input_shape[1], input_shape[2]], dtype=tf.float32) X = tf.placeholder( shape=[batch_size, input_shape[0], input_shape[1], input_shape[2]], dtype=tf.float32) # Y = tf.placeholder(shape=[batch_size, output_shape[0], output_shape[1], output_shape[2]], dtype=tf.float32) Y = tf.placeholder(shape=[ batch_size, output_shape[0], output_shape[1], output_shape[2] ], dtype=tf.float32) print X.get_shape() lr = tf.placeholder(tf.float32) training = tf.placeholder(tf.bool) threshold = tf.placeholder(tf.float32) with tf.variable_scope('ae'): Y_pred, Y_pred_modi, Y_pred_nosig = self.ae_u( X, training, batch_size, threshold) with tf.variable_scope('dis'): XY_real_pair = self.dis(X, Y, training) with tf.variable_scope('dis', reuse=True): XY_fake_pair = self.dis(X, Y_pred, training) with tf.device('/gpu:' + GPU0): ################################ ae loss Y_ = tf.reshape(Y, shape=[batch_size, -1]) Y_pred_modi_ = tf.reshape(Y_pred_modi, shape=[batch_size, -1]) w = tf.placeholder( tf.float32) # power of foreground against background ae_loss = tf.reduce_mean( -tf.reduce_mean(w * Y_ * tf.log(Y_pred_modi_ + 1e-8), reduction_indices=[1]) - tf.reduce_mean((1 - w) * (1 - Y_) * tf.log(1 - Y_pred_modi_ + 1e-8), reduction_indices=[1])) sum_ae_loss = tf.summary.scalar('ae_loss', ae_loss) ################################ wgan loss gan_g_loss = -tf.reduce_mean(XY_fake_pair) gan_d_loss = tf.reduce_mean(XY_fake_pair) - tf.reduce_mean( XY_real_pair) sum_gan_g_loss = tf.summary.scalar('gan_g_loss', gan_g_loss) sum_gan_d_loss = tf.summary.scalar('gan_d_loss', gan_d_loss) alpha = tf.random_uniform(shape=[ batch_size, input_shape[0] * input_shape[1] * input_shape[2] ], minval=0.0, maxval=1.0) Y_pred_ = tf.reshape(Y_pred, shape=[batch_size, -1]) differences_ = Y_pred_ - Y_ interpolates = Y_ + alpha * differences_ with tf.variable_scope('dis', reuse=True): XY_fake_intep = self.dis(X, interpolates, training) gradients = tf.gradients(XY_fake_intep, [interpolates])[0] slopes = tf.sqrt( tf.reduce_sum(tf.square(gradients), reduction_indices=[1])) gradient_penalty = tf.reduce_mean((slopes - 1.0)**2) gan_d_loss += 10 * gradient_penalty ################################# ae + gan loss gan_g_w = 5 ae_w = 100 - gan_g_w ae_gan_g_loss = ae_w * ae_loss + gan_g_w * gan_g_loss with tf.device('/gpu:' + GPU0): ae_var = [ var for var in tf.trainable_variables() if var.name.startswith('ae') ] dis_var = [ var for var in tf.trainable_variables() if var.name.startswith('dis') ] ae_g_optim = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize( ae_gan_g_loss, var_list=ae_var) dis_optim = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize( gan_d_loss, var_list=dis_var) print tools.Ops.variable_count() sum_merged = tf.summary.merge_all() saver = tf.train.Saver(max_to_keep=1) config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.visible_device_list = GPU0 with tf.Session(config=config) as sess: # if os.path.exists(self.train_models_dir): # try: # saver.restore(sess,self.train_models_dir+'model.cptk') # except Exception,e: # saver.restore(sess,'./regular/'+'model.cptk') sum_writer_train = tf.summary.FileWriter(self.train_sum_dir, sess.graph) sum_write_test = tf.summary.FileWriter(self.test_sum_dir) if os.path.isfile(self.train_models_dir + 'model.cptk.data-00000-of-00001'): print "restoring saved model" saver.restore(sess, self.train_models_dir + 'model.cptk') else: sess.run(tf.global_variables_initializer()) learning_rate_g = ori_lr * pow(power, (epoch_walked / 4)) for epoch in range(epoch_walked, 15000): # data.shuffle_X_Y_files(label='train') #### select data randomly each 10 epochs if epoch % 2 == 0 and epoch > 0: del data gc.collect() data = tools.Data(configure, epoch) #### full testing # ... train_amount = len(data.train_numbers) test_amount = len(data.test_numbers) if train_amount >= test_amount and train_amount > 0 and test_amount > 0 and data.total_train_batch_num > 0 and data.total_test_seq_batch > 0: weight_for = 0.35 * (1 - epoch * 1.0 / 15000) + 0.5 if epoch % 4 == 0: print '********************** FULL TESTING ********************************' time_begin = time.time() lung_img = ST.ReadImage('./WANG_REN/lung_img.vtk') mask_dir = "./WANG_REN/airway" test_batch_size = batch_size # test_data = tools.Test_data(dicom_dir,input_shape) test_data = tools.Test_data(lung_img, input_shape, 'vtk_data') test_data.organize_blocks() test_mask = read_dicoms(mask_dir) array_mask = ST.GetArrayFromImage(test_mask) array_mask = np.transpose(array_mask, (2, 1, 0)) print "mask shape: ", np.shape(array_mask) time1 = time.time() block_numbers = test_data.blocks.keys() for i in range(0, len(block_numbers), test_batch_size): batch_numbers = [] if i + test_batch_size < len(block_numbers): temp_input = np.zeros([ test_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(test_batch_size): temp_num = block_numbers[i + j] temp_block = test_data.blocks[temp_num] batch_numbers.append(temp_num) block_array = temp_block.load_data() block_shape = np.shape(block_array) temp_input[j, 0:block_shape[0], 0:block_shape[1], 0:block_shape[2]] += block_array Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run( [Y_pred, Y_pred_modi, Y_pred_nosig], feed_dict={ X: temp_input, training: False, w: weight_for, threshold: upper_threshold }) for j in range(test_batch_size): test_data.upload_result( batch_numbers[j], Y_temp_modi[j, :, :, :]) else: temp_batch_size = len(block_numbers) - i temp_input = np.zeros([ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(temp_batch_size): temp_num = block_numbers[i + j] temp_block = test_data.blocks[temp_num] batch_numbers.append(temp_num) block_array = temp_block.load_data() block_shape = np.shape(block_array) temp_input[j, 0:block_shape[0], 0:block_shape[1], 0:block_shape[2]] += block_array X_temp = tf.placeholder(shape=[ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ], dtype=tf.float32) with tf.variable_scope('ae', reuse=True): Y_pred_temp, Y_pred_modi_temp, Y_pred_nosig_temp = self.ae_u( X_temp, training, temp_batch_size, threshold) Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run( [ Y_pred_temp, Y_pred_modi_temp, Y_pred_nosig_temp ], feed_dict={ X_temp: temp_input, training: False, w: weight_for, threshold: upper_threshold }) for j in range(temp_batch_size): test_data.upload_result( batch_numbers[j], Y_temp_modi[j, :, :, :]) test_result_array = test_data.get_result() print "result shape: ", np.shape(test_result_array) r_s = np.shape(test_result_array) # result shape e_t = 10 # edge thickness to_be_transformed = np.zeros(r_s, np.float32) to_be_transformed[ e_t:r_s[0] - e_t, e_t:r_s[1] - e_t, e_t:r_s[2] - e_t] += test_result_array[e_t:r_s[0] - e_t, e_t:r_s[1] - e_t, e_t:r_s[2] - e_t] print np.max(to_be_transformed) print np.min(to_be_transformed) final_img = ST.GetImageFromArray( np.transpose(to_be_transformed, [2, 1, 0])) final_img.SetSpacing(test_data.space) print "writing full testing result" print '/usr/analyse_airway/test_result/test_result' + str( epoch) + '.vtk' ST.WriteImage( final_img, '/usr/analyse_airway/test_result/test_result' + str(epoch) + '.vtk') if epoch == 0: mask_img = ST.GetImageFromArray( np.transpose(array_mask, [2, 1, 0])) mask_img.SetSpacing(test_data.space) ST.WriteImage( mask_img, '/usr/analyse_airway/test_result/test_mask.vtk' ) test_IOU = 2 * np.sum( to_be_transformed * array_mask) / ( np.sum(to_be_transformed) + np.sum(array_mask)) print "IOU accuracy: ", test_IOU time_end = time.time() print '******************** time of full testing: ' + str( time_end - time_begin) + 's ********************' data.shuffle_X_Y_pairs() total_train_batch_num = data.total_train_batch_num # train_files=data.X_train_files # test_files=data.X_test_files # total_train_batch_num = 500 print "total_train_batch_num:", total_train_batch_num for i in range(total_train_batch_num): #### training X_train_batch, Y_train_batch = data.load_X_Y_voxel_train_next_batch( ) # X_train_batch, Y_train_batch = data.load_X_Y_voxel_grids_train_next_batch() # Y_train_batch=np.reshape(Y_train_batch,[batch_size, output_shape[0], output_shape[1], output_shape[2], 1]) gan_d_loss_c, = sess.run( [gan_d_loss], feed_dict={ X: X_train_batch, Y: Y_train_batch, training: False, w: weight_for, threshold: upper_threshold }) ae_loss_c, gan_g_loss_c, sum_train = sess.run( [ae_loss, gan_g_loss, sum_merged], feed_dict={ X: X_train_batch, Y: Y_train_batch, training: False, w: weight_for, threshold: upper_threshold }) if epoch % 4 == 0 and epoch > 0 and i == 0: learning_rate_g = learning_rate_g * power sess.run( [ae_g_optim], feed_dict={ X: X_train_batch, threshold: upper_threshold, Y: Y_train_batch, lr: learning_rate_g, training: True, w: weight_for }) if epoch <= 5: sess.run( [dis_optim], feed_dict={ X: X_train_batch, threshold: upper_threshold, Y: Y_train_batch, lr: learning_rate_g, training: True, w: weight_for }) elif epoch <= 20: sess.run( [dis_optim], feed_dict={ X: X_train_batch, threshold: upper_threshold, Y: Y_train_batch, lr: learning_rate_g, training: True, w: weight_for }) else: sess.run( [dis_optim], feed_dict={ X: X_train_batch, threshold: upper_threshold, Y: Y_train_batch, lr: learning_rate_g, training: True, w: weight_for }) sum_writer_train.add_summary( sum_train, epoch * total_train_batch_num + i) if i % 2 == 0: print "epoch:", epoch, " i:", i, " train ae loss:", ae_loss_c, " gan g loss:", gan_g_loss_c, " gan d loss:", gan_d_loss_c, " learning rate: ", learning_rate_g #### testing if i % 20 == 0 and epoch % 1 == 0: try: X_test_batch, Y_test_batch = data.load_X_Y_voxel_test_next_batch( fix_sample=False) # Y_test_batch = np.reshape(Y_test_batch,[batch_size, output_shape[0], output_shape[1], output_shape[2], 1]) ae_loss_t,gan_g_loss_t,gan_d_loss_t, Y_test_pred,Y_test_modi, Y_test_pred_nosig= \ sess.run([ae_loss, gan_g_loss,gan_d_loss, Y_pred,Y_pred_modi,Y_pred_nosig],feed_dict={X: X_test_batch, threshold:upper_threshold, Y: Y_test_batch,training:False, w: weight_for}) predict_result = np.float32(Y_test_modi > 0.01) predict_result = np.reshape( predict_result, [ batch_size, input_shape[0], input_shape[1], input_shape[2] ]) # Foreground # if np.sum(Y_test_batch)>0: # accuracy_for = np.sum(predict_result*Y_test_batch)/np.sum(Y_test_batch) # Background # accuracy_bac = np.sum((1-predict_result)*(1-Y_test_batch))/(np.sum(1-Y_test_batch)) # IOU predict_probablity = np.float32( (Y_test_modi - 0.01) > 0) predict_probablity = np.reshape( predict_probablity, [ batch_size, input_shape[0], input_shape[1], input_shape[2] ]) accuracy = 2 * np.sum( np.abs(predict_probablity * Y_test_batch)) / np.sum( np.abs(predict_result) + np.abs(Y_test_batch)) # if epoch%30==0 and epoch>0: # to_save = {'X_test': X_test_batch, 'Y_test_pred': Y_test_pred,'Y_test_true': Y_test_batch} # scipy.io.savemat(self.test_results_dir + 'X_Y_pred_' + str(epoch).zfill(2) + '_' + str(i).zfill(4) + '.mat', to_save, do_compression=True) print "epoch:", epoch, " i:", "\nIOU accuracy: ", accuracy, "\ntest ae loss:", ae_loss_t, " gan g loss:", gan_g_loss_t, " gan d loss:", gan_d_loss_t if accuracy > best_acc: saver.save( sess, save_path=self.train_models_dir + 'model.cptk') print "epoch:", epoch, " i:", i, "best model saved!" best_acc = accuracy except Exception, e: print e #### model saving if i % 30 == 0 and epoch % 1 == 0: # regular_train_dir = "./regular/" # if not os.path.exists(regular_train_dir): # os.makedirs(regular_train_dir) saver.save(sess, save_path=self.train_models_dir + 'model.cptk') print "epoch:", epoch, " i:", i, "regular model saved!" else: print "bad data , next epoch", epoch
def full_testing(self, sess, X, w, threshold, test_merge_op, sum_write_test, training, weight_for, total_acc, Y_pred, Y_pred_modi, Y_pred_nosig, epoch): print '********************** FULL TESTING ********************************' # X = tf.placeholder(shape=[batch_size, input_shape[0], input_shape[1], input_shape[2]], dtype=tf.float32) # w = tf.placeholder(tf.float32) # threshold = tf.placeholder(tf.float32) time_begin = time.time() origin_data = read_dicoms(test_dir + "original1") mask_dir = test_dir + "airway" test_batch_size = batch_size # test_data = tools.Test_data(dicom_dir,input_shape) test_data = tools.Test_data(origin_data, input_shape, 'vtk_data') test_data.organize_blocks() test_mask = read_dicoms(mask_dir) array_mask = ST.GetArrayFromImage(test_mask) array_mask = np.transpose(array_mask, (2, 1, 0)) print "mask shape: ", np.shape(array_mask) block_numbers = test_data.blocks.keys() for i in range(0, len(block_numbers), test_batch_size): batch_numbers = [] if i + test_batch_size < len(block_numbers): temp_input = np.zeros([ test_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(test_batch_size): temp_num = block_numbers[i + j] temp_block = test_data.blocks[temp_num] batch_numbers.append(temp_num) block_array = temp_block.load_data() block_shape = np.shape(block_array) temp_input[j, 0:block_shape[0], 0:block_shape[1], 0:block_shape[2]] += block_array Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run( [Y_pred, Y_pred_modi, Y_pred_nosig], feed_dict={ X: temp_input, training: False, w: weight_for, threshold: upper_threshold + test_extra_threshold }) for j in range(test_batch_size): test_data.upload_result(batch_numbers[j], Y_temp_modi[j, :, :, :]) else: temp_batch_size = len(block_numbers) - i temp_input = np.zeros([ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(temp_batch_size): temp_num = block_numbers[i + j] temp_block = test_data.blocks[temp_num] batch_numbers.append(temp_num) block_array = temp_block.load_data() block_shape = np.shape(block_array) temp_input[j, 0:block_shape[0], 0:block_shape[1], 0:block_shape[2]] += block_array X_temp = tf.placeholder(shape=[ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ], dtype=tf.float32) with tf.variable_scope('generator', reuse=True): Y_pred_temp, Y_pred_modi_temp, Y_pred_nosig_temp = self.ae_u( X_temp, training, temp_batch_size, threshold) Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run( [Y_pred_temp, Y_pred_modi_temp, Y_pred_nosig_temp], feed_dict={ X_temp: temp_input, training: False, w: weight_for, threshold: upper_threshold + test_extra_threshold }) for j in range(temp_batch_size): test_data.upload_result(batch_numbers[j], Y_temp_modi[j, :, :, :]) test_result_array = test_data.get_result() print "result shape: ", np.shape(test_result_array) to_be_transformed = self.post_process(test_result_array) if epoch == total_test_epoch: mask_img = ST.GetImageFromArray(np.transpose( array_mask, [2, 1, 0])) mask_img.SetSpacing(test_data.space) ST.WriteImage(mask_img, self.test_results_dir + 'test_mask.vtk') test_IOU = 2 * np.sum(to_be_transformed * array_mask) / ( np.sum(to_be_transformed) + np.sum(array_mask)) test_summary = sess.run(test_merge_op, feed_dict={total_acc: test_IOU}) sum_write_test.add_summary(test_summary, global_step=epoch) print "IOU accuracy: ", test_IOU time_end = time.time() print '******************** time of full testing: ' + str( time_end - time_begin) + 's ********************'
def train(self, configure): # data data = tools.Data(configure, epoch_walked / re_example_epoch) # network X = tf.placeholder( shape=[batch_size, input_shape[0], input_shape[1], input_shape[2]], dtype=tf.float32) Y = tf.placeholder(shape=[ batch_size, output_shape[0], output_shape[1], output_shape[2] ], dtype=tf.float32) lr = tf.placeholder(tf.float32) training = tf.placeholder(tf.bool) threshold = tf.placeholder(tf.float32) with tf.variable_scope('segmenter'): Y_pred, Y_pred_modi, Y_pred_nosig = self.ae_u( X, training, batch_size, threshold) # loss function Y_ = tf.reshape(Y, shape=[batch_size, -1]) Y_pred_modi_ = tf.reshape(Y_pred_modi, shape=[batch_size, -1]) w = tf.placeholder(tf.float32) # foreground weight cross_loss = tf.reduce_mean( -tf.reduce_mean(w * Y_ * tf.log(Y_pred_modi_ + 1e-8), reduction_indices=[1]) - tf.reduce_mean((1 - w) * (1 - Y_) * tf.log(1 - Y_pred_modi_ + 1e-8), reduction_indices=[1])) loss_sum = tf.summary.scalar("cross entropy", cross_loss) # trainers optim = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize(cross_loss) # accuracy block_acc = tf.placeholder(tf.float32) total_acc = tf.placeholder(tf.float32) train_sum = tf.summary.scalar("train_block_accuracy", block_acc) test_sum = tf.summary.scalar("total_test_accuracy", total_acc) train_merge_op = tf.summary.merge([train_sum, loss_sum]) test_merge_op = tf.summary.merge([test_sum]) saver = tf.train.Saver(max_to_keep=1) # config = tf.ConfigProto(allow_soft_placement=True) # config.gpu_options.visible_device_list = GPU0 with tf.Session() as sess: # define tensorboard writer sum_writer_train = tf.summary.FileWriter(self.train_sum_dir, sess.graph) sum_write_test = tf.summary.FileWriter(self.test_sum_dir, sess.graph) # load model data if pre-trained sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) if os.path.isfile(self.train_models_dir + 'model.cptk.data-00000-of-00001'): print "restoring saved model" saver.restore(sess, self.train_models_dir + 'model.cptk') learning_rate_g = ori_lr * pow(power, (epoch_walked / decay_step)) # start training loop global_step = step_walked for epoch in range(epoch_walked, MAX_EPOCH): if epoch % re_example_epoch == 0 and epoch > 0: del data gc.collect() data = tools.Data(configure, epoch / re_example_epoch) train_amount = len(data.train_numbers) test_amount = len(data.test_numbers) if train_amount >= test_amount and train_amount > 0 and test_amount > 0 and data.total_train_batch_num > 0 and data.total_test_seq_batch > 0: # actual foreground weight weight_for = 0.5 + (1 - 1.0 * epoch / MAX_EPOCH) * 0.35 if epoch % total_test_epoch == 0 and epoch > 0: print '********************** FULL TESTING ********************************' time_begin = time.time() origin_dir = read_dicoms(test_dir + "original1") mask_dir = test_dir + "artery" test_batch_size = batch_size # test_data = tools.Test_data(dicom_dir,input_shape) test_data = tools.Test_data(origin_dir, input_shape, 'vtk_data') test_data.organize_blocks() test_mask = read_dicoms(mask_dir) array_mask = ST.GetArrayFromImage(test_mask) array_mask = np.transpose(array_mask, (2, 1, 0)) print "mask shape: ", np.shape(array_mask) block_numbers = test_data.blocks.keys() for i in range(0, len(block_numbers), test_batch_size): batch_numbers = [] if i + test_batch_size < len(block_numbers): temp_input = np.zeros([ test_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(test_batch_size): temp_num = block_numbers[i + j] temp_block = test_data.blocks[temp_num] batch_numbers.append(temp_num) block_array = temp_block.load_data() block_shape = np.shape(block_array) temp_input[j, 0:block_shape[0], 0:block_shape[1], 0:block_shape[2]] += block_array Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run( [Y_pred, Y_pred_modi, Y_pred_nosig], feed_dict={ X: temp_input, training: False, w: weight_for, threshold: upper_threshold + test_extra_threshold }) for j in range(test_batch_size): test_data.upload_result( batch_numbers[j], Y_temp_modi[j, :, :, :]) else: temp_batch_size = len(block_numbers) - i temp_input = np.zeros([ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(temp_batch_size): temp_num = block_numbers[i + j] temp_block = test_data.blocks[temp_num] batch_numbers.append(temp_num) block_array = temp_block.load_data() block_shape = np.shape(block_array) temp_input[j, 0:block_shape[0], 0:block_shape[1], 0:block_shape[2]] += block_array X_temp = tf.placeholder(shape=[ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ], dtype=tf.float32) with tf.variable_scope('segmenter', reuse=True): Y_pred_temp, Y_pred_modi_temp, Y_pred_nosig_temp = self.ae_u( X_temp, training, temp_batch_size, threshold) Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run( [ Y_pred_temp, Y_pred_modi_temp, Y_pred_nosig_temp ], feed_dict={ X_temp: temp_input, training: False, w: weight_for, threshold: upper_threshold + test_extra_threshold }) for j in range(temp_batch_size): test_data.upload_result( batch_numbers[j], Y_temp_modi[j, :, :, :]) test_result_array = test_data.get_result() print "result shape: ", np.shape(test_result_array) to_be_transformed = self.post_process( test_result_array) if epoch % output_epoch == 0: self.output_img(to_be_transformed, test_data.space, epoch) if epoch == 0: mask_img = ST.GetImageFromArray( np.transpose(array_mask, [2, 1, 0])) mask_img.SetSpacing(test_data.space) ST.WriteImage(mask_img, './test_result/test_mask.vtk') test_IOU = 2 * np.sum( to_be_transformed * array_mask) / ( np.sum(to_be_transformed) + np.sum(array_mask)) test_summary = sess.run( test_merge_op, feed_dict={total_acc: test_IOU}) sum_write_test.add_summary(test_summary, global_step=epoch) print "IOU accuracy: ", test_IOU time_end = time.time() print '******************** time of full testing: ' + str( time_end - time_begin) + 's ********************' data.shuffle_X_Y_pairs() total_train_batch_num = data.total_train_batch_num print "total_train_batch_num:", total_train_batch_num for i in range(total_train_batch_num): X_train_batch, Y_train_batch = data.load_X_Y_voxel_train_next_batch( ) # calculate loss value # print "calculate begin" loss_c = sess.run( [cross_loss], feed_dict={ X: X_train_batch, Y: Y_train_batch, training: False, w: weight_for, threshold: upper_threshold }) # print "calculate ended" if epoch % decay_step == 0 and epoch > epoch_walked and i == 0: learning_rate_g = learning_rate_g * power sess.run( [optim], feed_dict={ X: X_train_batch, threshold: upper_threshold, Y: Y_train_batch, lr: learning_rate_g, training: True, w: weight_for }) # print "training ended" global_step += 1 # output some results if i % show_step == 0: print "epoch:", epoch, " i:", i, " train loss:", loss_c, " gan g loss:", learning_rate_g if i % block_test_step == 0 and epoch % 1 == 0: try: X_test_batch, Y_test_batch = data.load_X_Y_voxel_test_next_batch( fix_sample=False) Y_test_pred, Y_test_modi, Y_test_pred_nosig ,loss_t= \ sess.run([ Y_pred, Y_pred_modi, Y_pred_nosig,cross_loss], feed_dict={X: X_test_batch, threshold: upper_threshold + test_extra_threshold, Y: Y_test_batch, training: False, w: weight_for}) predict_result = np.float32(Y_test_modi > 0.01) predict_result = np.reshape( predict_result, [ batch_size, input_shape[0], input_shape[1], input_shape[2] ]) print np.max(Y_test_pred) print np.min(Y_test_pred) # IOU predict_probablity = np.float32( (Y_test_modi - 0.01) > 0) predict_probablity = np.reshape( predict_probablity, [ batch_size, input_shape[0], input_shape[1], input_shape[2] ]) accuracy = 2 * np.sum( np.abs(predict_probablity * Y_test_batch)) / np.sum( np.abs(predict_result) + np.abs(Y_test_batch)) print "epoch:", epoch, " global step: ", global_step, "\nIOU accuracy: ", accuracy, "\ntest ae loss:", loss_t print "weight of foreground : ", weight_for print "upper threshold of testing", ( upper_threshold + test_extra_threshold) train_summary = sess.run( train_merge_op, feed_dict={ block_acc: accuracy, X: X_test_batch, threshold: upper_threshold + test_extra_threshold, Y: Y_test_batch, training: False, w: weight_for }) sum_writer_train.add_summary( train_summary, global_step=global_step) except Exception, e: print e #### model saving if i % model_save_step == 0 and epoch % 1 == 0: saver.save(sess, save_path=self.train_models_dir + 'model.cptk') print "epoch:", epoch, " i:", i, "regular model saved!" else: print "bad data , next epoch", epoch
def get_array(dicom_dir): img = dicom_read.read_dicoms(dicom_dir) ret_array = ST.GetArrayFromImage(img) ret_array = np.transpose(ret_array, [2, 1, 0]) return ret_array
import SimpleITK as ST import dicom_read import numpy as np img_dir = './WANG_REN/airway' img = dicom_read.read_dicoms(img_dir) airway_array = ST.GetArrayFromImage(img) airway_array_1 = np.transpose(airway_array,[2,1,0]) airway_array = np.transpose(airway_array_1,[2,1,0]) airway_img = ST.GetImageFromArray(airway_array) ST.WriteImage(airway_img,'./WANG_REN/airway.vtk')
import os import shutil import tensorflow as tf import scipy.io import tools import numpy as np import time import test import SimpleITK as ST from dicom_read import read_dicoms import gc input_shape = [64, 64, 128] test_dir = './FU_LI_JUN/' origin_dir = read_dicoms(test_dir + "original1") test_data = tools.Test_data(origin_dir, input_shape, 'vtk_data') test_data.output_origin() print "end"
def train(self, data): best_acc = 0 # X = tf.placeholder(shape=[batch_size, input_shape[0], input_shape[1], input_shape[2]], dtype=tf.float32) X = tf.placeholder( shape=[batch_size, input_shape[0], input_shape[1], input_shape[2]], dtype=tf.float32) # Y = tf.placeholder(shape=[batch_size, output_shape[0], output_shape[1], output_shape[2]], dtype=tf.float32) Y = tf.placeholder(shape=[ batch_size, output_shape[0], output_shape[1], output_shape[2] ], dtype=tf.float32) lr = tf.placeholder(tf.float32) training = tf.placeholder(tf.bool) with tf.variable_scope('ae'): Y_pred, Y_pred_modi, Y_pred_nosig = self.ae_u( X, training, batch_size) with tf.variable_scope('dis'): XY_real_pair = self.dis(X, Y, training) with tf.variable_scope('dis', reuse=True): XY_fake_pair = self.dis(X, Y_pred, training) with tf.device('/gpu:' + GPU0): ################################ ae loss Y_ = tf.reshape(Y, shape=[batch_size, -1]) Y_pred_modi_ = tf.reshape(Y_pred_modi, shape=[batch_size, -1]) w = 0.85 ae_loss = tf.reduce_mean( -tf.reduce_mean(w * Y_ * tf.log(Y_pred_modi_ + 1e-8), reduction_indices=[1]) - tf.reduce_mean((1 - w) * (1 - Y_) * tf.log(1 - Y_pred_modi_ + 1e-8), reduction_indices=[1])) sum_ae_loss = tf.summary.scalar('ae_loss', ae_loss) ################################ wgan loss gan_g_loss = -tf.reduce_mean(XY_fake_pair) gan_d_loss = tf.reduce_mean(XY_fake_pair) - tf.reduce_mean( XY_real_pair) sum_gan_g_loss = tf.summary.scalar('gan_g_loss', gan_g_loss) sum_gan_d_loss = tf.summary.scalar('gan_d_loss', gan_d_loss) alpha = tf.random_uniform(shape=[ batch_size, input_shape[0] * input_shape[1] * input_shape[2] ], minval=0.0, maxval=1.0) Y_pred_ = tf.reshape(Y_pred, shape=[batch_size, -1]) differences_ = Y_pred_ - Y_ interpolates = Y_ + alpha * differences_ with tf.variable_scope('dis', reuse=True): XY_fake_intep = self.dis(X, interpolates, training) gradients = tf.gradients(XY_fake_intep, [interpolates])[0] slopes = tf.sqrt( tf.reduce_sum(tf.square(gradients), reduction_indices=[1])) gradient_penalty = tf.reduce_mean((slopes - 1.0)**2) gan_d_loss += 10 * gradient_penalty ################################# ae + gan loss gan_g_w = 5 ae_w = 100 - gan_g_w ae_gan_g_loss = ae_w * ae_loss + gan_g_w * gan_g_loss with tf.device('/gpu:' + GPU0): ae_var = [ var for var in tf.trainable_variables() if var.name.startswith('ae') ] dis_var = [ var for var in tf.trainable_variables() if var.name.startswith('dis') ] ae_g_optim = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize( ae_gan_g_loss, var_list=ae_var) dis_optim = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize( gan_d_loss, var_list=dis_var) print tools.Ops.variable_count() sum_merged = tf.summary.merge_all() saver = tf.train.Saver(max_to_keep=1) config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.visible_device_list = GPU0 with tf.Session(config=config) as sess: # if os.path.exists(self.train_models_dir): # try: # saver.restore(sess,self.train_models_dir+'model.cptk') # except Exception,e: # saver.restore(sess,'./regular/'+'model.cptk') sum_writer_train = tf.summary.FileWriter(self.train_sum_dir, sess.graph) sum_write_test = tf.summary.FileWriter(self.test_sum_dir) if os.path.isfile(self.train_models_dir + 'model.cptk.data-00000-of-00001'): print "restoring saved model" saver.restore(sess, self.train_models_dir + 'model.cptk') else: sess.run(tf.global_variables_initializer()) for epoch in range(1500): # data.shuffle_X_Y_files(label='train') #### full testing # ... if epoch % 2 == 0: print '********************** FULL TESTING ********************************' time_begin = time.time() dicom_dir = "./3Dircadb1.2/PATIENT_DICOM" mask_dir = "./3Dircadb1.2/MASKS_DICOM/liver" test_batch_size = batch_size # test_X = tf.placeholder( # shape=[test_batch_size, input_shape[0], input_shape[1], input_shape[2]], # dtype=tf.float32) # test_Y_pred, test_Y_pred_modi, test_Y_pred_nosig = self.ae_u(test_X, training,test_batch_size) space, resized_array = test.get_organized_data( dicom_dir, input_shape) test_mask = read_dicoms(mask_dir) array_mask = ST.GetArrayFromImage(test_mask) array_mask = np.transpose(array_mask, (2, 1, 0)) print "mask shape: ", np.shape(array_mask) time1 = time.time() block_num = 0 inputs = {} results = {} shape_resized = np.shape(resized_array) print "input shape: ", shape_resized for i in range(0, shape_resized[2], output_shape[2] / 2): if i + output_shape[2] <= shape_resized[2]: inputs[block_num] = resized_array[:, :, i:i + output_shape[2]] else: final_block = np.zeros([ output_shape[0], output_shape[1], output_shape[2] ], np.float32) print i, shape_resized[2] final_block[:, :, :shape_resized[2] - i] = resized_array[:, :, i:shape_resized[2]] inputs[block_num] = final_block[:, :, :] block_num = block_num + 1 numbers = inputs.keys() # print numbers for i in range(0, len(numbers), test_batch_size): if i + test_batch_size < len(numbers): temp_input = np.zeros([ test_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(test_batch_size): temp_input[j, :, :, :] = inputs[i + j][:, :, :] Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run( [Y_pred, Y_pred_modi, Y_pred_nosig], feed_dict={ X: temp_input, training: False }) for j in range(test_batch_size): results[i + j] = Y_temp_modi[j, :, :, :, 0] else: temp_batch_size = len(numbers) - i temp_input = np.zeros([ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(temp_batch_size): temp_input[j, :, :, :] = inputs[i + j][:, :, :] X_temp = tf.placeholder(shape=[ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ], dtype=tf.float32) with tf.variable_scope('ae', reuse=True): Y_pred_temp, Y_pred_modi_temp, Y_pred_nosig_temp = self.ae_u( X_temp, training, temp_batch_size) Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run( [ Y_pred_temp, Y_pred_modi_temp, Y_pred_nosig_temp ], feed_dict={ X_temp: temp_input, training: False }) for j in range(temp_batch_size): results[i + j] = Y_temp_modi[j, :, :, :, 0] # print results.keys() result_final = np.zeros([ shape_resized[0], shape_resized[1], len(numbers) * (output_shape[2] / 2) + output_shape[2] / 2 ], np.float32) for i in range(0, len(numbers)): if i == 0 or i == len(numbers): result_final[:, :, i * output_shape[2] / 2:i * output_shape[2] / 2 + output_shape[2]] += 2 * np.float32( (results[i][:, :, :] - 0.01) > 0) else: result_final[:, :, i * output_shape[2] / 2:i * output_shape[2] / 2 + output_shape[2]] += np.float32( (results[i][:, :, :] - 0.01) > 0) # print i * output_shape[2]/2,i * output_shape[2]/2 + output_shape[2] # print i final_array = np.float32(result_final >= 2) final_array = final_array[:, :, 0:shape_resized[2]] # print np.max(final_array) print "result shape: ", np.shape(final_array) final_img = ST.GetImageFromArray( np.transpose(final_array, [2, 1, 0])) final_img.SetSpacing(space) print "writing full testing result" ST.WriteImage( final_img, './test_result/test_result' + str(epoch + already_trained) + '.vtk') if epoch == 0: mask_img = ST.GetImageFromArray( np.transpose(array_mask, [2, 1, 0])) mask_img.SetSpacing(space) ST.WriteImage(mask_img, './test_result/test_mask.vtk') test_IOU = 2 * np.sum(final_array * array_mask) / ( np.sum(final_array) + np.sum(array_mask)) print "IOU accuracy: ", test_IOU time_end = time.time() print '******************** time of full testing: ' + str( time_end - time_begin) + 's ********************' data.shuffle_X_Y_pairs() total_train_batch_num = data.total_train_batch_num # train_files=data.X_train_files # test_files=data.X_test_files # total_train_batch_num = 500 print "total_train_batch_num:", total_train_batch_num for i in range(total_train_batch_num): #### training X_train_batch, Y_train_batch = data.load_X_Y_voxel_train_next_batch( ) # X_train_batch, Y_train_batch = data.load_X_Y_voxel_grids_train_next_batch() # Y_train_batch=np.reshape(Y_train_batch,[batch_size, output_shape[0], output_shape[1], output_shape[2], 1]) gan_d_loss_c, = sess.run([gan_d_loss], feed_dict={ X: X_train_batch, Y: Y_train_batch, training: False }) ae_loss_c, gan_g_loss_c, sum_train = sess.run( [ae_loss, gan_g_loss, sum_merged], feed_dict={ X: X_train_batch, Y: Y_train_batch, training: False }) learning_rate_g = ori_lr * ( 1 - (epoch + already_trained) / 1500)**power sess.run( [ae_g_optim], feed_dict={ X: X_train_batch, Y: Y_train_batch, lr: learning_rate_g, training: True }) if epoch <= 5: sess.run( [dis_optim], feed_dict={ X: X_train_batch, Y: Y_train_batch, lr: lr_down[0], training: True }) elif epoch <= 20: sess.run( [dis_optim], feed_dict={ X: X_train_batch, Y: Y_train_batch, lr: lr_down[1], training: True }) else: sess.run( [dis_optim], feed_dict={ X: X_train_batch, Y: Y_train_batch, lr: lr_down[2], training: True }) sum_writer_train.add_summary( sum_train, epoch * total_train_batch_num + i) if i % 2 == 0: print "epoch:", epoch, " i:", i, " train ae loss:", ae_loss_c, " gan g loss:", gan_g_loss_c, " gan d loss:", gan_d_loss_c #### testing if i % 10 == 0 and epoch % 1 == 0: X_test_batch, Y_test_batch = data.load_X_Y_voxel_test_next_batch( fix_sample=False) # Y_test_batch = np.reshape(Y_test_batch,[batch_size, output_shape[0], output_shape[1], output_shape[2], 1]) ae_loss_t,gan_g_loss_t,gan_d_loss_t, Y_test_pred,Y_test_modi, Y_test_pred_nosig= \ sess.run([ae_loss, gan_g_loss,gan_d_loss, Y_pred,Y_pred_modi,Y_pred_nosig],feed_dict={X: X_test_batch, Y: Y_test_batch,training:False}) predict_result = np.float32(Y_test_modi > 0.01) predict_result = np.reshape(predict_result, [ batch_size, input_shape[0], input_shape[1], input_shape[2] ]) # Foreground accuracy_for = np.sum( predict_result * Y_test_batch) / np.sum(Y_test_batch) # Background accuracy_bac = np.sum( (1 - predict_result) * (1 - Y_test_batch)) / (np.sum(1 - Y_test_batch)) # IOU predict_probablity = np.float32(Y_test_modi - 0.01) predict_probablity = np.reshape( predict_probablity, [ batch_size, input_shape[0], input_shape[1], input_shape[2] ]) accuracy = 2 * np.sum( np.abs( predict_probablity * Y_test_batch)) / np.sum( np.abs(predict_result) + np.abs(Y_test_batch)) if epoch % 30 == 0 and epoch > 0: to_save = { 'X_test': X_test_batch, 'Y_test_pred': Y_test_pred, 'Y_test_true': Y_test_batch } scipy.io.savemat( self.test_results_dir + 'X_Y_pred_' + str(epoch).zfill(2) + '_' + str(i).zfill(4) + '.mat', to_save, do_compression=True) print "epoch:", epoch, " i:", i, "\nacc_for: ", accuracy_for, "\nacc_bac: ", accuracy_bac, "\nIOU accuracy: ", accuracy, "\ntest ae loss:", ae_loss_t, " gan g loss:", gan_g_loss_t, " gan d loss:", gan_d_loss_t if accuracy > best_acc: saver.save(sess, save_path=self.train_models_dir + 'model.cptk') print "epoch:", epoch, " i:", i, "best model saved!" best_acc = accuracy if i % 50 == 0 and epoch % 10 == 0: # data.plotFromVoxels(Y_test_modi[1,:,:,:,:]-0.01,Y_test_batch[1,:,:,:]) # print "original" # print np.max(Y_test_pred_nosig) # print np.min(Y_test_pred_nosig) # print "sigmoided" # print np.max(Y_test_pred) # print np.min(Y_test_pred) print "modified" print np.max(Y_test_modi[0, :, :, :, :] - 0.01) print np.min(Y_test_modi[0, :, :, :, :] - 0.01) #### model saving if i % 30 == 0 and epoch % 1 == 0: # regular_train_dir = "./regular/" # if not os.path.exists(regular_train_dir): # os.makedirs(regular_train_dir) saver.save(sess, save_path=self.train_models_dir + 'model.cptk') print "epoch:", epoch, " i:", i, "regular model saved!"
def full_testing(self, sess, epoch): print '********************** FULL TESTING ********************************' time_begin = time.time() origin_data = read_dicoms(test_dir + "original1") mask_dir = test_dir + "artery" test_batch_size = batch_size test_data = tools.Test_data(origin_data, input_shape, 'vtk_data') test_data.organize_blocks() test_mask = read_dicoms(mask_dir) array_mask = ST.GetArrayFromImage(test_mask) array_mask = np.transpose(array_mask, (2, 1, 0)) print "mask shape: ", np.shape(array_mask) block_numbers = test_data.blocks.keys() for i in range(0, len(block_numbers), test_batch_size): batch_numbers = [] if i + test_batch_size < len(block_numbers): temp_input = np.zeros([ test_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(test_batch_size): temp_num = block_numbers[i + j] temp_block = test_data.blocks[temp_num] batch_numbers.append(temp_num) block_array = temp_block.load_data() block_shape = np.shape(block_array) temp_input[j, 0:block_shape[0], 0:block_shape[1], 0:block_shape[2]] += block_array pred_unsoft, softmax_pred, argmax_label = \ sess.run([self.pred_unsoft, self.softmax_pred, self.argmax_label], feed_dict={self.X: temp_input, self.training: False}) for j in range(test_batch_size): test_data.upload_result_multiclass( batch_numbers[j], argmax_label[j, :, :, :], mask_type) else: temp_batch_size = len(block_numbers) - i temp_input = np.zeros([ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ]) for j in range(temp_batch_size): temp_num = block_numbers[i + j] temp_block = test_data.blocks[temp_num] batch_numbers.append(temp_num) block_array = temp_block.load_data() block_shape = np.shape(block_array) temp_input[j, 0:block_shape[0], 0:block_shape[1], 0:block_shape[2]] += block_array X_temp = tf.placeholder(shape=[ temp_batch_size, input_shape[0], input_shape[1], input_shape[2] ], dtype=tf.float32) with tf.variable_scope("segment", reuse=True): temp_unsoft, softmax_temp, argmax_temp = self.Segmentor( X_temp, self.training, batch_size) pred_unsoft_temp, softmax_pred_temp, argmax_label_temp = \ sess.run([temp_unsoft, softmax_temp,argmax_temp],feed_dict={X_temp:temp_input, self.training:False}) for j in range(temp_batch_size): test_data.upload_result_multiclass( batch_numbers[j], argmax_label_temp[j, :, :, :], mask_type) test_result_array = test_data.get_result_() print "result shape: ", np.shape(test_result_array) to_be_transformed = self.post_process(test_result_array) if epoch == 0: mask_img = ST.GetImageFromArray(np.transpose( array_mask, [2, 1, 0])) mask_img.SetSpacing(test_data.space) ST.WriteImage(mask_img, './test_result/test_mask.vtk') test_IOU = 2 * np.sum(to_be_transformed * array_mask) / ( np.sum(to_be_transformed) + np.sum(array_mask)) test_summary = sess.run(self.test_merge_op, feed_dict={self.total_acc: test_IOU}) self.sum_write_test.add_summary(test_summary, global_step=epoch) print "IOU accuracy: ", test_IOU time_end = time.time() print '******************** time of full testing: ' + str( time_end - time_begin) + 's ********************'