brain_test = np.expand_dims(brain_test, 0) deformation = sdn.predict(brain_test)[0, ...] sample = grid + deformation sample = np.stack( (sample[:, :, :, 1], sample[:, :, :, 0], sample[:, :, :, 2]), 3) warp_seg = interpn((yy, xx, zz), brain_test_label1, sample, method='nearest', bounds_error=False, fill_value=0) dice_after[test_ind, :] = np.array( [Dice(brain_test_label2 == i, warp_seg == i) for i in label_list]) print('Test sample {}\'s evaluation completed.'.format(test_ind + 1)) # for label_ind, i in enumerate(label_list): #dice_before[test_ind, label_ind]= Dice(brain_test_label2==i, brain_test_label1 == i) # seg = transform(deformation, np.expand_dims(np.expand_dims(brain_test_label1==i, 3),0), (res1, res2, res3)) # dice_after[test_ind, label_ind] = Dice(brain_test_label2 == i, np.rint(seg)>0) #print("ROI {} result appended..".format(i)) # print('Test sample {}\'s evaluation completed.'.format(test_ind+1)) #print("writing the first to log....") np.save('.npy', dice_after) #np.save('/global/home/hpc4355/MindBoggle/output/warped.npy', warp_seg)
brain_test_mov = np.load(datapath+test_label[:2]+'.npy') brain_test_fix = np.load(datapath+test_label[2:]+'.npy') brain_test = np.stack([brain_test_mov, brain_test_fix], 3) brain_test_label1 = np.load(labelpath+test_label[:2]+'.npy') brain_test_label2 = np.load(labelpath+test_label[2:4]+'.npy') brain_test = np.expand_dims(brain_test, 0) warped_vol, deformation = sdn_pred.predict(brain_test) np.save('/home/dkuang/{}.npy'.format(test_label), warped_vol[0,...,0]) for label_ind, i in enumerate(label_list): seg = transform(deformation, np.expand_dims(np.expand_dims(brain_test_label1==i, 3),0), (res1, res2, res3)) dice_before[test_ind, label_ind] = Dice(brain_test_label2 == i, np.rint(seg)>0) print(dice_before) print(np.mean(dice_before, 1)) sdn_refine.compile(loss = ['mse', total_variation_loss], loss_weights = par['loss_weights'], optimizer = Adam(lr = par['lr'], decay = 1e-5) ) print(sdn_refine.summary()) gen_train = vol_gen_refine(datapath, predpath,train_list, par['batch_size']) history = sdn_refine.fit_generator(gen_train, steps_per_epoch = len(train_list)/par['batch_size'], epochs = par['epochs'], use_multiprocessing = True, verbose=1) dice_after = np.zeros([len(train_list), 56]) for test_ind, test_label in enumerate(train_list):
sdn_warped_img = transform(deformation, np.expand_dims(brain_test[..., 0], 4), (res1, res2, res3)) sse_before[ind] = np.sum((brain_test[..., 0] - brain_test[..., 1])**2) sse_sdn[ind] = np.sum( (brain_test[0, ..., 1] - sdn_warped_img[0, ..., 0])**2) sse_utr[ind] = np.sum((brain_test[0, ..., 1] - utr_warped_img)**2) #print("sse_before: {}".format(sse_before)) #print("sse_sdn: {}".format(sse_sdn)) #print("sse_utr: {}".format(sse_utr)) for label_ind, i in enumerate(label_list): dice_before[ind, label_ind] = Dice(brain_test_label2 == i, brain_test_label1 == i) seg = transform( deformation, np.expand_dims(np.expand_dims(brain_test_label1 == i, 3), 0), (res1, res2, res3)) #seg_utr = transform(disp, np.expand_dims(np.expand_dims(brain_test_label1==i, 3),0), (res1, res2, res3)) dice_after_sdn[ind, label_ind] = Dice(brain_test_label2 == i, np.rint(seg) > 0) dice_after_utr[ind, label_ind] = Dice(brain_test_label2 == i, np.rint(utr_warped_label) == i) print("{}-th sample done.".format(ind + 1)) #print("utr vs before: {} ROIs are better".format(np.sum(dice_after_utr[0]>dice_before[0]))) #print("sdn vs before: {} ROIs are better".format(np.sum(dice_after_sdn[0]>dice_before[0]))) #print("utr vs sdn: {} ROIs are better".format(np.sum(dice_after_utr[0]>dice_after_sdn[0])))
#Vol_Prob = np.exp(np.mean(label_prob, axis=0)) #other than taking mean? #Vol_sum = np.sum(Vol_Prob, axis = 3) #for i in range(len(label_list)): # Vol_Prob[...,i] = Vol_Prob[...,i]/D_sum[0,...,0] ''' Probability across samples ''' maxprob = np.amax(Vol_Prob, axis=3) / np.sum(W) #np.save(weights_path+'lpba_maxprob_sample.npy', maxprob) WinningLabel = np.argmax(Vol_Prob, axis=3) #WinningLabel = WinningLabel*(maxprob>0.02) ''' Weighted? Joint Fusion? ''' print('%s seconds used in whole atlas construction.' % (time.time() - start_time)) np.save(weights_path + 'mb_avg_incept0_temp1_near.npy', ini) #np.save(weights_path+'mb_prob_incept0.npy', Vol_Prob) np.save(weights_path + 'mb_maxprob_temp1_near.npy', maxprob) np.save(weights_path + 'mb_WinningLabel_temp1_near.npy', WinningLabel) dice = [ Dice(temp_label == label_list[i], WinningLabel == i) for i in range(1, len(label_list)) ] print(np.mean(dice[1:])) #print(W)
# Vol_Prob[:-1,:-1,:-1,label_ind] += DJ*interpn((yy, xx, zz), brain_label==i, warped_grid, method='nearest', bounds_error=False, fill_value=0)[:-1,:-1,:-1] # Vol_Prob = np.exp(np.mean(label_prob, axis=0)) #other than taking mean? # Vol_sum = np.sum(Vol_Prob, axis = 3) # for i in range(len(label_list)): # Vol_Prob[...,i] = Vol_Prob[...,i]/D_sum[0,...,0] ''' Probability across samples ''' maxprob = np.amax(Vol_Prob, axis=3)/np.sum(W) # np.save(weights_path+'lpba_maxprob_sample.npy', maxprob) WinningLabel = np.argmax(Vol_Prob,axis=3) # WinningLabel = WinningLabel*(maxprob>0.02) ''' Weighted? Joint Fusion? ''' print('%s seconds used in whole atlas construction.'%(time.time() - start_time)) np.save(weights_path+'mb_avg_incept0_temp1_near.npy', ini) # np.save(weights_path+'mb_prob_incept0.npy', Vol_Prob) np.save(weights_path+'mb_maxprob_temp1_near.npy', maxprob) np.save(weights_path+ 'mb_WinningLabel_temp1_near.npy', WinningLabel) dice = [Dice(temp_label==label_list[i], WinningLabel==i) for i in range(1, len(label_list))] print(np.mean(dice[1:])) # print(W)