def train(): global tensorboard_dir now = datetime.utcnow().strftime("%Y-%m-%d_%Hh-%Mm-%Ss") method = 'full' tensorboard_dir = "{}/run_{}_{}/".format(tensorboard_dir, method, now) writer = tf.summary.create_file_writer(tensorboard_dir) for epoch in range(1,epochs+1): model_loss = [] if epoch % 5 == 1: snr = 1.5+4.5*np.random.rand() data, _ = movie_maker(snr,20) z_sample = rd.sample(np.linspace(21,60,40).tolist(),40) for z in z_sample: image_batch = tf.convert_to_tensor(np.reshape(data[:,:,int(z),:],[1,image_size,2*image_size,data.shape[-1]]).astype(np.float32)) for i in range(0,image_batch.shape[-1]): inputs = tf.slice(image_batch,[0,0,0,i],[batch_size,image_size,image_size,1]) targets = tf.slice(image_batch,[0,0,image_size,i],[batch_size,image_size,image_size,1]) loss = train_step(inputs,targets) model_loss.append(loss) print('Epochs {}/{}, Loss = {}'.format( epoch, epochs, np.mean(model_loss))) if epoch % 1 == 0: print('Saving training log...') with writer.as_default(): tf.summary.scalar('summary_loss', np.mean(model_loss), step=epoch) if epoch % 10 == 0: print('Saving training checkpoint...') if not os.path.exists(a.checkpoint + '/'): os.makedirs(a.checkpoint + '/') model.save_weights(a.checkpoint + '/')
def evaluate(): method = 'newby3' if not os.path.exists('./arrays/'): os.makedirs('./arrays/') snr = [1.5,2.0,3.0,4.0,5.0,6.0] FALSEPOSITIVES = [] FALSENEGATIVES = [] TRUEPOSITIVES = [] PRECISION = [] RECALL = [] F1SCORE = [] PIXELERROR = [] LOCERROR = [] MAXLOCERROR = [] DETECTED = [] for SNR in snr: print('Testing SNR:', SNR) aFP1 = [] aFN1 = [] aTP1 = [] P1 = [] R1 = [] F1_1 = [] aMinDist1 = [] aMaxDist1 = [] aPWE1 = [] aDetect1 = [] for i in range(0,movies): print('Generating test movie {}...'.format(i)) movie, loc = movie_maker(SNR,10) [in_x,in_y,in_z,in_t] = movie.shape data = np.reshape(movie[:,0:256,:,:].astype(np.float32)/255, [1,image_size,image_size,in_z,in_t]) targets = movie[:,256::,:,:].astype(np.float32)/255 out = np.zeros([image_size,image_size,in_z,in_t]).astype(np.float32) for z in range(0,in_z): print('Evaluating slice {} of {}'.format(z,in_z)) for t in range(0,in_t): if z == 0: input_z = tf.convert_to_tensor(np.reshape(data[0,:,:,z:z+2,t],[1,image_size,image_size,2,1])) input_z_pad = tf.convert_to_tensor(np.reshape(data[0,:,:,z,t],[1,image_size,image_size,1,1])) input_vol = tf.concat([input_z_pad,input_z],axis=3) elif z == in_z-1: input_z = tf.convert_to_tensor(np.reshape(data[0,:,:,z-1:z+1,t],[1,image_size,image_size,2,1])) input_z_pad = tf.convert_to_tensor(np.reshape(data[0,:,:,z,t],[1,image_size,image_size,1,1])) input_vol = tf.concat([input_z,input_z_pad],axis=3) else: input_vol = tf.convert_to_tensor(np.reshape(data[0,:,:,z-1:z+2,t],[1,image_size,image_size,3,1])) if t == 0: prev_step = tf.zeros(shape = [batch_size,image_size//2,image_size//2,3,6]) inputs = tf.slice(input_vol,[0,0,0,0,0],[batch_size,image_size,image_size,3,1]) tf_output, prev_step = model([inputs,prev_step], training=True) out[:,:,z,t] = tf_output.numpy()[0,:,:,0,0] print('Evaluating Output...') for t in range(0,in_t): y = loc[:,:,t].astype(np.uint8) detected_targets = len(y[:,0]) FP1 = 0 x1 = post_process(out[:,:,:,t]) detected_targets = len(x1) distances = [] for i in range(0,len(x1)): distance = np.sqrt((x1[i,0]-y[:,1])**2+(x1[i,1]-y[:,0])**2+(x1[i,2]-y[:,2])**2) distances.append(min(distance)) if min(distance) > 2: FP1 += 1 aMinDist1.append(np.mean(distances)) aMaxDist1.append(np.max(distances)) TP1 = len(x1) - FP1 FN1 = len(y[:,0])-(len(x1) - FP1) aFP1.append(FP1) aFN1.append(FN1) aTP1.append(TP1) P1.append(TP1/(TP1+FP1+1e-10)) R1.append(TP1/(TP1+FN1+1e-10)) F1_1.append(2*P1[-1]*R1[-1]/(P1[-1]+R1[-1]+1e-10)) aDetect1.append(len(x1)) average_pixel_error = [] # Mean-Squared Error per pixel around a true spot for i in range(0,len(y[:,0])): true_box = targets[y[i,1]-2:y[i,1]+3,y[i,0]-2:y[i,0]+3,y[i,2]-2:y[i,2]+3,t] predicted_box = out[y[i,1]-2:y[i,1]+3,y[i,0]-2:y[i,0]+3,y[i,2]-2:y[i,2]+3,t] average_pixel_error.append(np.sum(np.abs(true_box-predicted_box))/(5**3)) aPWE1.append(np.mean(average_pixel_error)) print('Appending Results...') FALSEPOSITIVES.append(np.mean(aFP1)) FALSENEGATIVES.append(np.mean(aFN1)) TRUEPOSITIVES.append(np.mean(aTP1)) PRECISION.append(np.mean(P1)) RECALL.append(np.mean(R1)) F1SCORE.append(np.mean(F1_1)) PIXELERROR.append(np.mean(aPWE1)) LOCERROR.append(np.mean(aMinDist1)) MAXLOCERROR.append(np.mean(aMaxDist1)) DETECTED.append(np.mean(aDetect1)) print('FALSEPOSITIVES',FALSEPOSITIVES[-1]) print('FALSENEGATIVES',FALSENEGATIVES[-1]) print('TRUEPOSITIVES',TRUEPOSITIVES[-1]) print('PRECISION',PRECISION[-1]) print('RECALL',RECALL[-1]) print('F1SCORE',F1SCORE[-1]) print('LOCATION ERROR',LOCERROR[-1]) print('MAX. LOCATION ERROR',MAXLOCERROR[-1]) print('PIXELERROR',PIXELERROR[-1]) print('DETECTED',DETECTED[-1]) if a.save_arr == "Yes": np.save('./arrays/'+method+'_false-positives.npy',np.array(FALSEPOSITIVES)) np.save('./arrays/'+method+'_false-negatives.npy',np.array(FALSENEGATIVES)) np.save('./arrays/'+method+'_true-positives.npy',np.array(TRUEPOSITIVES)) np.save('./arrays/'+method+'_precision.npy',np.array(PRECISION)) np.save('./arrays/'+method+'_recall.npy',np.array(RECALL)) np.save('./arrays/'+method+'_f1-score.npy',np.array(F1SCORE)) np.save('./arrays/'+method+'_pixel-error.npy',np.array(PIXELERROR)) np.save('./arrays/'+method+'_loc-error.npy',np.array(LOCERROR)) np.save('./arrays/'+method+'_max-loc-error.npy',np.array(MAXLOCERROR)) np.save('./arrays/'+method+'_detected.npy',np.array(DETECTED))
def train_plus(): global tensorboard_dir now = datetime.utcnow().strftime("%Y-%m-%d_%Hh-%Mm-%Ss") method = 'newby3' tensorboard_dir = "{}/run_{}_{}/".format(tensorboard_dir, method, now) # Number of movies per set, movie length n = 10 mov_len = 20 writer = tf.summary.create_file_writer(tensorboard_dir) for epoch in range(a.pre_epoch+1, a.pre_epoch + epochs + 1): #Generate New Training Set of 10 movies if epoch % 50 == 1: data = [] for _ in range(0,n): snr = 1.5+4.5*np.random.rand() mov, _ = movie_maker(snr, mov_len, arr_type) data.append(mov) if arr_type == 'float32': data = np.array(data).astype(np.float32) elif arr_type == 'uint8': data = (np.array(data)/255).astype(np.float32) else: raise Exception('Specified Array Type must be float32 or uint8') model_loss = [] for i in range(0,n): rnn_out = [] z_sample = rd.sample(np.linspace(21,60,40).tolist(),40) for z in z_sample: z = int(z) for t in range(0,data.shape[-1]): if z == 0: input_z = tf.convert_to_tensor(np.reshape(data[i,:,:,z:z+2,t],[1,image_size,2*image_size,2,1]).astype(np.float32)) input_z_pad = tf.zeros(shape = [1,image_size,2*image_size,1,1]) input_vol = tf.concat([input_z_pad,input_z],axis=3) elif z == data.shape[3]-1: input_z = tf.convert_to_tensor(np.reshape(data[i,:,:,z-1:z+1,t],[1,image_size,2*image_size,2,1]).astype(np.float32)) input_z_pad = tf.zeros(shape = [1,image_size,2*image_size,1,1]) input_vol = tf.concat([input_z,input_z_pad],axis=3) else: input_vol = tf.convert_to_tensor(np.reshape(data[i,:,:,z-1:z+2,t],[1,image_size,2*image_size,3,1]).astype(np.float32)) if t == 0: prev_step = tf.zeros(shape = [batch_size,image_size//2,image_size//2,3,6]) else: prev_step = rnn_out[-1] inputs = tf.slice(input_vol,[0,0,0,0,0],[batch_size,image_size,image_size,3,1]) targets = tf.convert_to_tensor(np.reshape(data[i,:,image_size::,z,t],[1,image_size,image_size,1,1]).astype(np.float32)) loss, output = train_step(inputs,prev_step,targets) model_loss.append(loss) rnn_out.append(output[1]) print('Epochs {}/{}, Loss = {}'.format( epoch, epochs, model_loss[-1])) if epoch % 1 == 0: print('Saving training log...') with writer.as_default(): tf.summary.scalar('summary_loss', model_loss[-1], step=epoch+a.pre_epoch) if epoch % 10 == 0: print('Saving training checkpoint...') if not os.path.exists(a.checkpoint + '/'): os.makedirs(a.checkpoint + '/') model.save_weights(a.checkpoint + '/')
def test(): method = 'full' if not os.path.exists('./arrays/'): os.makedirs('./arrays/') snr = [1.5,2.0,3.0,4.0,5.0,6.0] FALSEPOSITIVES1 = [] FALSENEGATIVES1 = [] TRUEPOSITIVES1 = [] PRECISION1 = [] RECALL1 = [] F1SCORE1 = [] PIXELERROR1 = [] LOCERROR1 = [] MAXLOCERROR1 = [] DETECTED1 = [] FALSEPOSITIVES2 = [] FALSENEGATIVES2 = [] TRUEPOSITIVES2 = [] PRECISION2 = [] RECALL2 = [] F1SCORE2 = [] PIXELERROR2 = [] LOCERROR2 = [] MAXLOCERROR2 = [] DETECTED2 = [] for SNR in snr: print('Testing SNR:', SNR) aFP1 = [] aFN1 = [] aTP1 = [] P1 = [] R1 = [] F1_1 = [] aMinDist1 = [] aMaxDist1 = [] aPWE1 = [] aDetect1 = [] aFP2 = [] aFN2 = [] aTP2 = [] P2 = [] R2 = [] F1_2 = [] aMinDist2 = [] aMaxDist2 = [] aPWE2 = [] aDetect2 = [] ########################################################################################################################### for i in range(0,movies): print('Generating test movie {}...'.format(i)) movie, loc = movie_maker(SNR,10) for t in range(0,movie.shape[-1]): print('Running Model...') out = np.zeros([image_size,image_size,movie.shape[2]]) for z in range(0,movie.shape[2]): input_slice = tf.convert_to_tensor(np.reshape(movie[:,0:image_size,z,t],[1,image_size,image_size,1])) out_slice = model(input_slice, training=False) out[:,:,z] = out_slice.numpy()[0,:,:,0] targets = movie[:,image_size::,:,t] ############################################################################################################################# #Evaluating Output y = loc[:,:,t].astype(np.uint8) detected_targets = len(y[:,0]) # Mask Method FP1 = 0 x1 = process_1(out) detected_targets = len(x1) distances = [] for i in range(0,len(x1)): distance = np.sqrt((x1[i,0]-y[:,1])**2+(x1[i,1]-y[:,0])**2+(x1[i,2]-y[:,2])**2) distances.append(min(distance)) if min(distance) > 2: FP1 += 1 aMinDist1.append(np.mean(distances)) aMaxDist1.append(np.max(distances)) TP1 = len(x1) - FP1 FN1 = len(y[:,0])-(len(x1) - FP1) aFP1.append(FP1) aFN1.append(FN1) aTP1.append(TP1) P1.append(TP1/(TP1+FP1+1e-10)) R1.append(TP1/(TP1+FN1+1e-10)) F1_1.append(2*P1[-1]*R1[-1]/(P1[-1]+R1[-1]+1e-10)) aDetect1.append(len(x1)) average_pixel_error = [] # Mean-Squared Error per pixel around a true spot for i in range(0,len(y[:,0])): true_box = targets[y[i,1]-2:y[i,1]+3,y[i,0]-2:y[i,0]+3,y[i,2]-2:y[i,2]+3] predicted_box = out[y[i,1]-2:y[i,1]+3,y[i,0]-2:y[i,0]+3,y[i,2]-2:y[i,2]+3] average_pixel_error.append(np.sum(np.abs(true_box-predicted_box))/(5**3)) aPWE1.append(np.mean(average_pixel_error)) # Peak Local Max Method FP2 = 0 x2 = process_2(out) detected_targets = len(x2) distances = [] for i in range(0,len(x2)): distance = np.sqrt((x2[i,0]-y[:,1])**2+(x2[i,1]-y[:,0])**2+(x2[i,2]-y[:,2])**2) distances.append(min(distance)) if min(distance) > 2: FP2 += 1 aMinDist2.append(np.mean(distances)) aMaxDist2.append(np.max(distances)) TP2 = len(x2) - FP2 FN2 = len(y[:,0])-(len(x2) - FP2) aFP2.append(FP2) aFN2.append(FN2) aTP2.append(TP2) P2.append(TP2/(TP2+FP2+1e-10)) R2.append(TP2/(TP2+FN2+1e-10)) F1_2.append(2*P2[-1]*R2[-1]/(P2[-1]+R2[-1]+1e-10)) aDetect2.append(len(x2)) average_pixel_error = [] # Mean-Squared Error per pixel around a true spot for i in range(0,len(y[:,0])): true_box = targets[y[i,1]-2:y[i,1]+3,y[i,0]-2:y[i,0]+3,y[i,2]-2:y[i,2]+3] predicted_box = out[y[i,1]-2:y[i,1]+3,y[i,0]-2:y[i,0]+3,y[i,2]-2:y[i,2]+3] average_pixel_error.append(np.sum(np.abs(true_box-predicted_box))/(5**3)) aPWE2.append(np.mean(average_pixel_error)) print('Appending Results...') FALSEPOSITIVES1.append(np.mean(aFP1)) FALSENEGATIVES1.append(np.mean(aFN1)) TRUEPOSITIVES1.append(np.mean(aTP1)) PRECISION1.append(np.mean(P1)) RECALL1.append(np.mean(R1)) F1SCORE1.append(np.mean(F1_1)) PIXELERROR1.append(np.mean(aPWE1)) LOCERROR1.append(np.mean(aMinDist1)) MAXLOCERROR1.append(np.mean(aMaxDist1)) DETECTED1.append(np.mean(aDetect1)) print('Mask Method') print('FALSEPOSITIVES',FALSEPOSITIVES1[-1]) print('FALSENEGATIVES',FALSENEGATIVES1[-1]) print('TRUEPOSITIVES',TRUEPOSITIVES1[-1]) print('PRECISION',PRECISION1[-1]) print('RECALL',RECALL1[-1]) print('F1SCORE',F1SCORE1[-1]) print('LOCATION ERROR',LOCERROR1[-1]) print('MAX. LOCATION ERROR',MAXLOCERROR1[-1]) print('PIXELERROR',PIXELERROR1[-1]) print('DETECTED',DETECTED1[-1]) FALSEPOSITIVES2.append(np.mean(aFP2)) FALSENEGATIVES2.append(np.mean(aFN2)) TRUEPOSITIVES2.append(np.mean(aTP2)) PRECISION2.append(np.mean(P2)) RECALL2.append(np.mean(R2)) F1SCORE2.append(np.mean(F1_2)) PIXELERROR2.append(np.mean(aPWE2)) LOCERROR2.append(np.mean(aMinDist2)) MAXLOCERROR2.append(np.mean(aMaxDist2)) DETECTED2.append(np.mean(aDetect2)) print('Peak Local Max. Method') print('FALSEPOSITIVES',FALSEPOSITIVES2[-1]) print('FALSENEGATIVES',FALSENEGATIVES2[-1]) print('TRUEPOSITIVES',TRUEPOSITIVES2[-1]) print('PRECISION',PRECISION2[-1]) print('RECALL',RECALL2[-1]) print('F1SCORE',F1SCORE2[-1]) print('LOCATION ERROR',LOCERROR2[-1]) print('MAX. LOCATION ERROR',MAXLOCERROR2[-1]) print('PIXELERROR',PIXELERROR2[-1]) print('DETECTED',DETECTED2[-1]) if a.save_arr == "Yes": np.save('./arrays/'+method+'_1-false-positives.npy',np.array(FALSEPOSITIVES1)) np.save('./arrays/'+method+'_1-false-negatives.npy',np.array(FALSENEGATIVES1)) np.save('./arrays/'+method+'_1-true-positives.npy',np.array(TRUEPOSITIVES1)) np.save('./arrays/'+method+'_1-precision.npy',np.array(PRECISION1)) np.save('./arrays/'+method+'_1-recall.npy',np.array(RECALL1)) np.save('./arrays/'+method+'_1-f1-score.npy',np.array(F1SCORE1)) np.save('./arrays/'+method+'_1-pixel-error.npy',np.array(PIXELERROR1)) np.save('./arrays/'+method+'_1-loc-error.npy',np.array(LOCERROR1)) np.save('./arrays/'+method+'_1-max-loc-error.npy',np.array(MAXLOCERROR1)) np.save('./arrays/'+method+'_1-detected.npy',np.array(DETECTED1)) np.save('./arrays/'+method+'_2-false-positives.npy',np.array(FALSEPOSITIVES2)) np.save('./arrays/'+method+'_2-false-negatives.npy',np.array(FALSENEGATIVES2)) np.save('./arrays/'+method+'_2-true-positives.npy',np.array(TRUEPOSITIVES2)) np.save('./arrays/'+method+'_2-precision.npy',np.array(PRECISION2)) np.save('./arrays/'+method+'_2-recall.npy',np.array(RECALL2)) np.save('./arrays/'+method+'_2-f1-score.npy',np.array(F1SCORE2)) np.save('./arrays/'+method+'_2-pixel-error.npy',np.array(PIXELERROR2)) np.save('./arrays/'+method+'_2-loc-error.npy',np.array(LOCERROR2)) np.save('./arrays/'+method+'_2-max-loc-error.npy',np.array(MAXLOCERROR2)) np.save('./arrays/'+method+'_2-detected.npy',np.array(DETECTED2))
def train_plus(): global tensorboard_dir now = datetime.utcnow().strftime("%Y-%m-%d_%Hh-%Mm-%Ss") method = 'rnn' + a.sig tensorboard_dir = "{}/run_{}_{}/".format(tensorboard_dir, method, now) dataset = load_data() model_loss = [] writer = tf.summary.create_file_writer(tensorboard_dir) for epoch in range(a.pre_epoch + 1, a.pre_epoch + epochs + 1): if epoch % 5 == 1: snr = 1.5 + 1.5 * np.random.rand() data, _ = movie_maker(snr, 20) z_sample = rd.sample(np.linspace(31, 60, 30).tolist(), 20) for z in z_sample: outputs = [] c = 1 image_batch = tf.convert_to_tensor( np.reshape(data[:, :, int(z), :], [1, image_size, 2 * image_size, data.shape[-1] ]).astype(np.float32)) for i in range(0, image_batch.shape[-1]): if i == 0: next_step = tf.slice( image_batch, [0, 0, 0, i + 1], [batch_size, image_size, image_size, 1]) inputs = tf.slice(image_batch, [0, 0, 0, i], [batch_size, image_size, image_size, 1]) prev_step = tf.zeros( shape=[batch_size, image_size, image_size, 1]) elif i == image_batch.shape[-1] - 1: prev_step = tf.slice( image_batch, [0, 0, 0, i - 1], [batch_size, image_size, image_size, 1]) inputs = tf.slice(image_batch, [0, 0, 0, i], [batch_size, image_size, image_size, 1]) next_step = tf.zeros( shape=[batch_size, image_size, image_size, 1]) else: next_step = tf.slice( image_batch, [0, 0, 0, i + 1], [batch_size, image_size, image_size, 1]) inputs = tf.slice(image_batch, [0, 0, 0, i], [batch_size, image_size, image_size, 1]) prev_step = tf.slice( image_batch, [0, 0, 0, i - 1], [batch_size, image_size, image_size, 1]) targets = tf.slice(image_batch, [0, 0, image_size, i], [batch_size, image_size, image_size, 1]) loss, output = train_step(inputs, next_step, prev_step, targets) model_loss.append(loss) outputs.append(output) print('Epochs {}/{}, Loss = {}'.format(epoch, epochs, model_loss[-1])) if epoch % 1 == 0: print('Saving training log...') with writer.as_default(): tf.summary.scalar('summary_loss', loss, step=epoch) if epoch % 10 == 0: print('Saving training checkpoint...') if not os.path.exists(a.checkpoint + '/'): os.makedirs(a.checkpoint + '/') model.save_weights(a.checkpoint + '/')