def Lung_Seg(dicom_dir): time1 = time.time() original_img = read_dicoms(dicom_dir) net = Network() final_img = net.test(dicom_dir) del net gc.collect() img_spacing = final_img.GetSpacing() time2 = time.time() print "time cost for lung sement: ",str(time2-time1),'s' time3 = time.time() final_img, growed_mask = post_process(final_img,dicom_dir) growed_mask.SetSpacing(img_spacing) print "Writing lung mask" # ST.WriteImage(growed_mask, './output/lung_mask.vtk') time4 = time.time() print "time cost for lung post_process: ",str(time4-time3),'s' final_array = ST.GetArrayFromImage(growed_mask) img_array = ST.GetArrayFromImage(original_img) lung_array = final_array*img_array # lung_array = lung_array + np.min(lung_array)*2*np.int8(lung_array==0) lung_img = ST.GetImageFromArray(lung_array) lung_img.SetSpacing(img_spacing) print "Writing lung image" # ST.WriteImage(lung_img,'./output/lung_img.vtk') return lung_img
def post_process(img,dicom_dir): # print img.GetSize() original_img = read_dicoms(dicom_dir) img_array = np.transpose(ST.GetArrayFromImage(img),[2,1,0]) img_shape = np.shape(img_array) # Get outer mask to ensure outer noise get excluded original_array = ST.GetArrayFromImage(original_img) min_val = np.min(original_array) outer_seeds = [] inner_step = 2 outer_seeds.append([inner_step, inner_step, img_shape[2] - inner_step]) outer_seeds.append([inner_step, img_shape[1] - inner_step, inner_step]) outer_seeds.append([img_shape[0] - inner_step, inner_step, inner_step]) outer_seeds.append([inner_step, img_shape[1] - inner_step, img_shape[2] - inner_step]) outer_seeds.append([img_shape[0] - inner_step, inner_step, img_shape[2] - inner_step]) outer_seeds.append([img_shape[0] - inner_step, img_shape[1] - inner_step, inner_step]) outer_seeds.append([img_shape[0] - inner_step, img_shape[1] - inner_step, img_shape[2] - inner_step]) outer_space = ST.NeighborhoodConnected(original_img, outer_seeds, min_val * 1.0, -200, [1, 1, 0], 1.0) # ST.WriteImage(outer_space , './outer_space.vtk') outer_array = ST.GetArrayFromImage(outer_space) outer_array = np.transpose(outer_array, [2, 1, 0]) # Take out outer noise inner_array = np.float32((img_array - outer_array) > 0) inner_img = ST.GetImageFromArray(np.transpose(inner_array,[2,1,0])) # ST.WriteImage(inner_img,'./inner_mask.vtk') median_filter = ST.MedianImageFilter() median_filter.SetRadius(1) midian_img = median_filter.Execute(inner_img) midian_array = ST.GetArrayFromImage(midian_img) midian_array = np.transpose(midian_array,[2,1,0]) array_shape = np.shape(midian_array) seed = [0,0,0] max = 0 for i in range(array_shape[0]): temp_max = np.sum(midian_array[i,:,:]) if max < temp_max: max = temp_max seed[0]=i max = 0 for i in range(array_shape[1]): temp_max = np.sum(midian_array[:,i,:]) if max < temp_max: max = temp_max seed[1]=i max = 0 for i in range(array_shape[2]): temp_max = np.sum(midian_array[:,:,i]) if max < temp_max: max = temp_max seed[2]=i # print seed growed_img = ST.NeighborhoodConnected(img, [seed], 0.9,1, [1, 1, 1], 1.0) return img,growed_img
def __init__(self, data, block_shape, type): if type == 'dicom_data': self.img = read_dicoms(data) elif type == 'vtk_data': self.img = data self.space = self.img.GetSpacing() self.image_array = ST.GetArrayFromImage(self.img) self.image_array = np.transpose(self.image_array, [2, 1, 0]) self.image_shape = np.shape(self.image_array) self.block_shape = block_shape self.blocks = dict() self.results = dict()
def process_dicom(self,dicom_path): img = read_dicoms(dicom_path) array = np.transpose(ST.GetArrayFromImage(img),[2,1,0]) return array
def train(self): flags = self.FLAGS block_shape = self.block_shape record_dir = self.record_dir record_dir_test = self.record_dir_test batch_size_train = self.batch_size_train batch_size_test = self.batch_size_test test_step = self.test_step threashold = flags.accept_threshold LEARNING_RATE_BASE = flags.training_rate_base LEARNING_RATE_DECAY = flags.training_rate_decay weight_vec = tf.constant([ flags.airway_weight, flags.artery_weight, flags.back_ground_weight ], tf.float32) X = tf.placeholder(dtype=tf.float32, shape=[ batch_size_train, block_shape[0], block_shape[1], block_shape[2] ]) training = tf.placeholder(tf.bool) with tf.variable_scope('network'): seg_pred = self.Dense_Net(X, training, flags.batch_size_train, flags.accept_threshold) # lost function ''' lable vector: [airway,artery,background] ''' lables = tf.placeholder(dtype=tf.float32, shape=[ batch_size_train, block_shape[0], block_shape[1], block_shape[2], 3 ]) weight_map = tf.reduce_sum(tf.multiply(lables, weight_vec), 4) loss_origin = tf.nn.softmax_cross_entropy_with_logits(logits=seg_pred, labels=lables) loss_weighted = weight_map * loss_origin loss = tf.reduce_mean(loss_weighted) tf.summary.scalar('loss', loss) # accuracy # predict_softmax = tf.nn.softmax(seg_pred) pred_map = tf.argmax(seg_pred, axis=-1) pred_map_bool = tf.equal(pred_map, 1) artery_pred_mask = tf.cast(pred_map_bool, tf.float32) artery_lable = tf.cast(lables[:, :, :, :, 0], tf.float32) artery_acc = 2 * tf.reduce_sum(artery_lable * artery_pred_mask) / ( tf.reduce_sum(artery_lable + artery_pred_mask)) tf.summary.scalar('airway_block_acc', artery_acc) # data part records = ut.get_records(record_dir) records_processor = TF_Records(records, block_shape) single_blocks = records_processor.read_records() queue = tf.RandomShuffleQueue(capacity=8, min_after_dequeue=4, dtypes=( single_blocks['airway'].dtype, single_blocks['artery'].dtype, single_blocks['lung'].dtype, single_blocks['original'].dtype, )) enqueue_op = queue.enqueue(( single_blocks['airway'], single_blocks['artery'], single_blocks['lung'], single_blocks['original'], )) (airway_block, artery_block, lung_block, original_block) = queue.dequeue() qr = tf.train.QueueRunner(queue, [enqueue_op] * 2) # test data part records_test = ut.get_records(record_dir_test) records_processor_test = TF_Records(records_test, block_shape) single_blocks_test = records_processor_test.read_records() queue_test = tf.RandomShuffleQueue( capacity=8, min_after_dequeue=4, dtypes=( single_blocks_test['airway'].dtype, single_blocks_test['artery'].dtype, single_blocks_test['lung'].dtype, single_blocks_test['original'].dtype, )) enqueue_op_test = queue_test.enqueue(( single_blocks_test['airway'], single_blocks_test['artery'], single_blocks_test['lung'], single_blocks_test['original'], )) (airway_block_test, artery_block_test, lung_block_test, original_block_test) = queue_test.dequeue() qr_test = tf.train.QueueRunner(queue, [enqueue_op_test] * 2) global_step = tf.Variable(0, trainable=False) learning_rate = tf.maximum( tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, 13500 * 5 / flags.batch_size_train, LEARNING_RATE_DECAY, staircase=True), 1e-9) train_op = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize( loss, global_step) # merge operation for tensorboard summary merge_summary_op = tf.summary.merge_all() saver = tf.train.Saver(max_to_keep=1) config = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=config) as sess: # load variables if saved before if len(os.listdir(self.train_models_dir)) > 0: print "load saved model" sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) saver.restore(sess, self.train_models_dir + "train_models.ckpt") else: sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) # coord for the reading threads coord = tf.train.Coordinator() enqueue_threads = qr.create_threads(sess, coord=coord, start=True) enqueue_threads_test = qr_test.create_threads(sess, coord=coord, start=True) tf.train.start_queue_runners(sess=sess) summary_writer_test = tf.summary.FileWriter( self.test_sum_dir, sess.graph) summary_writer_train = tf.summary.FileWriter( self.train_sum_dir, sess.graph) # main train loop # for i in range(flags.max_iteration_num): for i in range(flags.max_iteration_num): # organize a batch of data for training lable_np = np.zeros([ batch_size_train, block_shape[0], block_shape[1], block_shape[2], 3 ], np.int16) original_np = np.zeros([ batch_size_train, block_shape[0], block_shape[1], block_shape[2] ], np.int16) # store values into data block for m in range(flags.batch_size_train): ''' lable vector: [airway,artery,background] ''' artery_data, airway_data, original_data = \ sess.run([artery_block, airway_block, original_block]) airway_array = airway_data artery_array = artery_data back_ground_array = np.int16((airway_array + artery_array) == 0) check_array = airway_array + artery_array + back_ground_array while not np.max(check_array) == np.min(check_array) == 1: artery_data, airway_data, original_data = \ sess.run([artery_block, airway_block, original_block]) airway_array = airway_data artery_array = artery_data back_ground_array = np.int16((airway_array + artery_array) == 0) check_array = airway_array + artery_array + back_ground_array lable_np[m, :, :, :, 0] += airway_array lable_np[m, :, :, :, 1] += artery_array lable_np[m, :, :, :, 2] += back_ground_array original_np[m, :, :, :] += original_data train_, step_num = sess.run([train_op, global_step], feed_dict={ X: original_np, lables: lable_np, training: True }) if step_num % flags.full_test_step == 0: # full testing print "****************************full testing******************************" data_type = "dicom_data" test_dicom_dir = '/opt/Multi-Task-data-process/multi_task_data_test/FU_LI_JUN/original1' test_mask_dir = '/opt/Multi-Task-data-process/multi_task_data_test/FU_LI_JUN/artery' test_mask = ut.read_dicoms(test_mask_dir) test_mask_array = np.transpose( ST.GetArrayFromImage(test_mask), [2, 1, 0]) test_data = tools.Test_data(test_dicom_dir, block_shape, data_type) test_data.organize_blocks() block_numbers = test_data.blocks.keys() blocks_num = len(block_numbers) print "block count: ", blocks_num time1 = time.time() sys.stdout.write("\r>>>deep learning calculating : %f" % (0.0) + "%") sys.stdout.flush() for m in range(0, blocks_num, batch_size_train): batch_numbers = [] if m + batch_size_train < blocks_num: temp_batch_size = batch_size_train else: temp_batch_size = blocks_num - m temp_input = np.zeros([ batch_size_train, block_shape[0], block_shape[1], block_shape[2] ]) for j in range(temp_batch_size): temp_num = block_numbers[m + j] temp_block = test_data.blocks[temp_num] batch_numbers.append(temp_num) block_array = temp_block.load_data() data_block_shape = np.shape(block_array) temp_input[j, 0:data_block_shape[0], 0:data_block_shape[1], 0:data_block_shape[2]] += block_array artery_predict = sess.run(artery_pred_mask, feed_dict={ X: temp_input, training: False }) for j in range(temp_batch_size): test_data.upload_result(batch_numbers[j], artery_predict[j, :, :, :]) if (m) % (batch_size_train * 10) == 0: sys.stdout.write( "\r>>>deep learning calculating : %f" % ((1.0 * m) * 100 / blocks_num) + "%") sys.stdout.flush() sys.stdout.write("\r>>>deep learning calculating : %f" % (100.0) + "%") sys.stdout.flush() time2 = time.time() print "\ndeep learning time consume : ", str(time2 - time1) time3 = time.time() test_result_array = test_data.get_result() test_result_array = np.float32(test_result_array >= 2) print "result shape: ", np.shape(test_result_array) r_s = np.shape(test_result_array) # result shape e_t = 10 # edge thickness to_be_transformed = np.zeros(r_s, np.float32) to_be_transformed[e_t:r_s[0] - e_t, e_t:r_s[1] - e_t, 0:r_s[2] - e_t] += test_result_array[e_t:r_s[0] - e_t, e_t:r_s[1] - e_t, 0:r_s[2] - e_t] print "maximum value in mask: ", np.max(to_be_transformed) print "minimum value in mask: ", np.min(to_be_transformed) final_img = ST.GetImageFromArray( np.transpose(to_be_transformed, [2, 1, 0])) final_img.SetSpacing(test_data.space) time4 = time.time() print "post processing time consume : ", str(time4 - time3) print "writing final testing result" if not os.path.exists('./test_result'): os.makedirs('./test_result') print './test_result/test_result_' + str(step_num) + '.vtk' ST.WriteImage( final_img, './test_result/test_result_' + str(step_num) + '.vtk') total_accuracy = 2 * np.sum( 1.0 * test_mask_array * to_be_transformed) / np.sum( 1.0 * (test_mask_array + to_be_transformed)) print "total IOU accuracy : ", total_accuracy if i == 0: mask_img = ST.GetImageFromArray( np.transpose(test_mask_array, [2, 1, 0])) mask_img.SetSpacing(test_data.space) ST.WriteImage(mask_img, './test_result/mask_img.vtk') print "***********************full testing end*******************************" if i % 10 == 0: sum_train,\ l_val \ = sess.run([merge_summary_op, loss], feed_dict={X: original_np, lables: lable_np, training: False}) summary_writer_train.add_summary(sum_train, global_step=int(step_num)) print "train :\nstep %d , loss = %f\n =====================" \ % (int(step_num), l_val) if i % test_step == 0 and i > 0: lable_np_test = np.zeros([ batch_size_train, block_shape[0], block_shape[1], block_shape[2], 3 ], np.int16) original_np_test = np.zeros([ batch_size_train, block_shape[0], block_shape[1], block_shape[2] ], np.int16) for m in range(flags.batch_size_train): ''' lable vector: [airway,artery,background] ''' artery_data, airway_data, original_data = \ sess.run([artery_block_test, airway_block_test, original_block_test]) airway_array = airway_data artery_array = artery_data back_ground_array = np.int16((airway_array + artery_array) == 0) check_array = airway_array + artery_array + back_ground_array while not np.max(check_array) == np.min( check_array) == 1: artery_data, airway_data, original_data = \ sess.run([artery_block, airway_block, original_block]) airway_array = airway_data artery_array = artery_data back_ground_array = np.int16((airway_array + artery_array) == 0) check_array = airway_array + artery_array + back_ground_array lable_np_test[m, :, :, :, 0] += airway_array lable_np_test[m, :, :, :, 1] += artery_array lable_np_test[m, :, :, :, 2] += back_ground_array original_np_test[m, :, :, :] += original_data sum_test, accuracy_artery, l_val, predict_array = \ sess.run([merge_summary_op,artery_acc,loss,pred_map], feed_dict={X: original_np_test,lables: lable_np_test, training: False}) summary_writer_test.add_summary(sum_test, global_step=int(step_num)) print "\ntest :\nstep %d , artery loss = %f \n\t artery block accuracy = %f\n=====================" \ % (int(step_num), l_val, accuracy_artery) print "artery percentage : ", str( np.float32( np.sum(np.float32(lable_np_test[:, :, :, :, 1])) / (flags.batch_size_train * block_shape[0] * block_shape[1] * block_shape[2]))) # print "prediction of airway : maximum = ",np.max(airway_np_sig)," minimum = ",np.min(airway_np_sig) print "prediction : maximum = ", np.max( predict_array), " minimum = ", np.min(predict_array) if i % 100 == 0: saver.save(sess, self.train_models_dir + "train_models.ckpt") print "regular model saved! step count : ", step_num coord.request_stop() coord.join(enqueue_threads) coord.join(enqueue_threads_test)