def Segment(): patient = readindex() patient = np.array(patient) for i in range(25): data = DataProcess(i + 1) print('data shape:' + str(data.shape)) OUTMHA = np.zeros([data.shape[0], data.shape[1], data.shape[2]]) for x in range(data.shape[0]): for y in range(data.shape[1] - 64): x1, x2 = Getbatch(data, x, y + 32) print('x1,x2:' + str(x1.shape) + str(x2.shape)) X1 = tf.placeholder(dtype=tf.float32, shape=[None, 65, 65, 4], name='big_patch') X2 = tf.placeholder(dtype=tf.float32, shape=[None, 33, 33, 4], name='small_patch') OUT = Network(X1, X2, data.shape[2] - 64) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore( sess=sess, save_path= 'E:/zhangjinjing/brain2D/brats2013/brain_session_2013_kongdong.ckpt' ) OUT = sess.run(OUT, feed_dict={X1: x1, X2: x2}) OUTMHA[x, y, 32:data.shape[2] - 32] = np.argmax(OUT, 1) print(OUTMHA.shape) util.save_nuarray_as_mha( 'E://zhangjinjing/brain2D/2013test/VSD.Seg_HG_001.' + str(patient[i]) + '.mha', OUTMHA)
def segment_unparalel(patient_parent_file, description, model, extra_file): before = int(round(time.time() * 1000)) print("unparalel segmentation") print(patient_parent_file) p = Patient() p.set_parent_file(patient_parent_file) p.set_window_size(model_setup.window_size) p.start_iteration() print(p.file_FLAIR) image_name = re.findall(r'\d+', p.file_FLAIR) image_name = image_name[len(image_name) - 1] path_ttt, file_nnn = os.path.split(patient_parent_file) image_name = "VSD." + description + "_(" + file_nnn + ")." + image_name + ".mha" print("result name : %s" % image_name) file_result_name = os.path.join(patient_parent_file, image_name) extra_file = os.path.join(extra_file, image_name) if (os.path.isfile(file_result_name)): return segmentation = numpy.zeros(shape=p.label.shape) z = 0 utility.save_nuarray_as_mha(file_result_name, segmentation) utility.save_nuarray_as_mha(extra_file, segmentation) while z < p.limit_z: for y in range(0, p.limit_y): if (y % 10 == 0): print("%s %s" % (z, y)) for x in range(0, p.limit_x): if (p.is_back_ground(z, y, x)): segmentation[z][y][x] = 0 else: t, g = p.get_batch_at(z, y, x) features = [] features.append(t) features = numpy.asarray(features) features = features.astype('float32') r = model.predict_classes(features, batch_size=1, verbose=False) segmentation[z][y][x] = r z += 1 utility.save_nuarray_as_mha(file_result_name, segmentation) utility.save_nuarray_as_mha(extra_file, segmentation) p.stop_iteration() print("segmentation done") totoal_time = int(round(time.time() * 1000)) - before print("total time : %s minutes" % (totoal_time / 1000.0 / 60))
def Segment(): X1 = tf.placeholder(dtype=tf.float32, shape=[None, 65, 65, 4], name='big_patch') X2 = tf.placeholder(dtype=tf.float32, shape=[None, 33, 33, 4], name='small_patch') OUT = Network(X1, X2) patient = readindex() patient = np.array(patient) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore( sess, 'E:/zhangjinjing/brain2D/brats2013/brain_session_2013_8.ckpt') for i in range(25): data = DataProcess(i) print('data shape:' + str(data.shape)) OUTMHA = np.zeros([data.shape[0], data.shape[1], data.shape[2]]) print('MHA:', OUTMHA.shape) index = Getcoord(data) print('index:', index.shape) n = 0 while (n < index.shape[0] - 175): subindex = np.array(index[n:n + 175]) print('sunindex:', subindex.shape) print('n:', n) x1, x2 = Getbatch(data, subindex) n = n + 175 OUTLINE = sess.run(OUT, feed_dict={X1: x1, X2: x2}) OUTLINE = np.array(OUTLINE) #print('OUTLINE:',OUTLINE.shape) for s in range(175): OUTMHA[subindex[s, 0], subindex[s, 1], subindex[s, 2]] = np.argmax(OUTLINE, 1)[s] print('subindex:', subindex[s, 0], subindex[s, 1], subindex[s, 2]) print('max:', np.argmax(OUTLINE, 1)[s]) print('finish!') util.save_nuarray_as_mha( 'E://zhangjinjing/brain2D/2013test/VSD.Seg_HG_001.' + str(patient[i]) + '.mha', OUTMHA)
def preprocess(self): self.set_parent_file(self.file_parent) if (self._find_file("back_ground") == None): back_ground = self.BFS() utility.save_nuarray_as_mha( os.path.join(self.file_parent, "back_ground.mha"), back_ground) self.set_parent_file(self.file_parent) self.back_ground = utility.read_mha_image_as_nuarray( self._find_file("back_ground\.mha")) balanced_data_file = os.path.join(self.file_parent, "balanced_data.pickle") if (not os.path.isfile(balanced_data_file)): self.balance_data() ff = open(balanced_data_file, "rb") self.balanced_data_indices = pickle.load(ff) ff.close()
def preprocess( self ): # creates a background.mha file ,which displays black brain part self.set_parent_file(self.file_parent) if (self._find_file("back_ground") == None): back_ground = self.BFS() utility.save_nuarray_as_mha( os.path.join(self.file_parent, "back_ground.mha"), back_ground) #getting image from array self.set_parent_file(self.file_parent) self.back_ground = utility.read_mha_image_as_nuarray( self._find_file("back_ground\.mha")) # getting array from img balanced_data_file = os.path.join( self.file_parent, "balanced_data.pickle") #file pickling if (not os.path.isfile(balanced_data_file)): self.balance_data() ff = open(balanced_data_file, "rb") self.balanced_data_indices = pickle.load(ff) ff.close()
import numpy as np import utility as util modi210=util.read_mha_image_as_nuarray('test210.mha') temp=np.zeros([155,240,240]) temp[:,32:207,32:207]=modi210 util.save_nuarray_as_mha('test210_2.mha', temp)
import scipy.io import numpy as np import utility as util mat = scipy.io.loadmat('brain_1125.mat') print(mat) files = mat['img'] # file=mat['V'] y = np.zeros([files.shape[2], files.shape[1], files.shape[0]]) for i in range(files.shape[2]): x = files[:, :, i] for j in range(files.shape[1]): y[i, j] = x[:, j] print(y.shape) util.save_nuarray_as_mha('ktest.mha', y)
import numpy as np import os import utility as util seg="E://zhangjinjing/brain2D/test210.mha" label="E://zhangjinjing/brain2D/BrainTrain/210/0005/0005.mha" def compare(path1,path2): data1=util.read_mha_image_as_nuarray(path1) data2=util.read_mha_image_as_nuarray(path2) for i in range(data1.shape[0]): for j in range(data1.shape[1]): for k in range (data1.shape[2]): if data1[i,j,k]==data2[i,j,k]: data1[i,j,k]=0 return data1 if __name__=="__main__": data=compare(seg,label) util.save_nuarray_as_mha("E://zhangjinjing/brain2D/error.mha",data)
# print(OUTLINE.shape) # util.save_nuarray_as_mha('VSD.Seg_LG_011.'+str(dict[zip_num+85])+'.mha', OUTMHA) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore( sess, 'E:/zhangjinjing/brain2D/brain_session_HLGG/brain_session_resize_8.ckpt') print('read session ok') iter = 0 OUTMHA = np.zeros([ALLtest.shape[0], ALLtest.shape[1], ALLtest.shape[2]]) for pos_x in range(ALLtest.shape[0]): for pos_y in range(ALLtest.shape[1] - patch_size): iter += 1 print('\n', iter) print('finish in: ', iter * 700. / 189875., '%') #Input_batch1,Input_batch2,Label_batch=getBatch2D(pos_x,pos_y) Input_batch1, Input_batch2 = getBatch2D(pos_x, pos_y) #OUTLINE=sess.run([OUT],feed_dict={Xp1:Input_batch1,Xp2:Input_batch2,Yp:Label_batch}) OUTLINE = sess.run([OUT], feed_dict={ Xp1: Input_batch1, Xp2: Input_batch2 }) OUTLINE = np.array(OUTLINE) OUTMHA[pos_x, pos_y + halfpz, halfpz:175 + halfpz] = np.argmax(OUTLINE[0, :, :], 1) util.save_nuarray_as_mha("test/8/VSD.Seg_HG_001.40767.mha", OUTMHA) print(OUTLINE.shape)
loss=tf.reduce_mean(-tf.reduce_sum(Yp*tf.log(OUT),reduction_indices=[1])) correct_prediction = tf.equal(tf.argmax(OUT,1), tf.argmax(Yp,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess=tf.Session(config=config) saver = tf.train.Saver() saver.restore(sess,'E:/zhangjinjing/brain2D/brain_session_HLGG/brain_session_resize.ckpt') print('read sessionOK') iter=0 OUTMHA=np.zeros([ALLtest.shape[0],ALLtest.shape[1],ALLtest.shape[2]]) for pos_x in range(ALLtest.shape[0]): for pos_y in range(ALLtest.shape[1]-patch_size): iter+=1 print('\n',iter) print('finish in: ',iter*700./189875.,'%') Input_batch1,Input_batch2,Label_batch=getBatch2D(pos_x,pos_y) #Input_batch1, Input_batch2 =getBatch2D(pos_x,pos_y) OUTLINE=sess.run([OUT],feed_dict={Xp1:Input_batch1,Xp2:Input_batch2,Yp:Label_batch}) #OUTLINE=sess.run([OUT],feed_dict={Xp1:Input_batch1,Xp2:Input_batch2}) OUTLINE=np.array(OUTLINE) # print(tf.Session().run(tf.arg_max(OUTLINE,1),feed_dict={Xp1:Input_batch1,Xp2:Input_batch2,Yp:Label_batch})) # print(tf.Session().run(tf.arg_max(Label_batch,1),feed_dict={Xp1:Input_batch1,Xp2:Input_batch2,Yp:Label_batch})) OUTMHA[pos_x,pos_y+halfpz,halfpz:175+halfpz]=np.argmax(OUTLINE[0,:,:],1) print(OUTLINE.shape) util.save_nuarray_as_mha('test40461_resize.mha', OUTMHA)
config.gpu_options.allow_growth = True sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore( sess, 'E:/zhangjinjing/brain2D/brain_session_HLGG/brain_session.ckpt') print('read sessionOK') iter = 0 OUTMHA = np.zeros([ALLtest.shape[0], ALLtest.shape[1], ALLtest.shape[2]]) for pos_x in range(ALLtest.shape[0]): for pos_y in range(ALLtest.shape[1] - patch_size): iter += 1 print('\n', iter) print('finish in: ', iter * 700. / 189875., '%') #Input_batch1,Input_batch2,Label_batch=getBatch 2D(pos_x,pos_y) Input_batch1, Input_batch2 = getBatch2D(pos_x, pos_y) #OUTLINE=sess.run([OUT],feed_dict={Xp1:Input_batch1,Xp2:Input_batch2,Yp:Label_batch}) OUTLINE = sess.run([OUT], feed_dict={ Xp1: Input_batch1, Xp2: Input_batch2 }) OUTLINE = np.array(OUTLINE) # print(tf.Session().run(tf.arg_max(OUTLINE,1),feed_dict={Xp1:Input_batch1,Xp2:Input_batch2,Yp:Label_batch})) # print(tf.Session().run(tf.arg_max(Label_batch,1),feed_dict={Xp1:Input_batch1,Xp2:Input_batch2,Yp:Label_batch})) OUTMHA[pos_x, pos_y + halfpz, halfpz:175 + halfpz] = np.argmax(OUTLINE[0, :, :], 1) print(OUTLINE.shape) util.save_nuarray_as_mha("VSD.Seg_HG_001." + index[zip_num] + ".mha", OUTMHA)
convout = tf.nn.conv2d(output, weight_variable([21, 21, 224, 5]), strides=[1, 1, 1, 1], padding='VALID') + bias_variable([5]) convout = tf.reshape(convout, [175, 5]) OUT = tf.nn.softmax(convout) OUTMHA = np.zeros([ALLTest.shape[0], ALLTest.shape[1], ALLTest.shape[2]]) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore( sess, 'E:/zhangjinjing/brain2D/brain_session_HLGG/brain_session_resize2.ckpt') print('read sessionOK') iter = 1 for pos_x in range(ALLTest.shape[0]): for pos_y in range(ALLTest.shape[1] - patch_size): input_batch1, input_batch2 = get_2Dbatch(pos_x, pos_y) out = sess.run([OUT], feed_dict={XP1: input_batch1, XP2: input_batch2}) out = np.array(out) Endpatch = half_patch + 175 OUTMHA[pos_x, pos_y + half_patch, half_patch:Endpatch] = np.argmax(out[0, :, :], 1) print("inter=", iter / 26250) iter = iter + 1 print(out.shape) util.save_nuarray_as_mha('VSD.Seg_HG_001.41163.mha', OUTMHA)