def main(): log_files = ['/hnn/examples/knees_sagittal_fold1/knees_sagittal_fold1.log'] train_iteration = [] train_loss = [] base_test_iter = 0 base_train_iter = 0 for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] base_test_iter = test_iteration[-1] line_count = 0 for line in f: line_count += 1 #print('{}: {}'.format(line_count, line)) # TRAIN NET if strstr(line, 'Iteration') and strstr(line, 'lr'): matched = match_iteration(line) train_iteration.append( int(matched.group(1)) + base_train_iter) elif strstr(line, ', loss'): matched = match_loss(line) print(matched.group(1)) train_loss.append(float(matched.group(1))) log_base = os.path.splitext(os.path.basename(log_files[0]))[0] result = {'TRAIN': (train_iteration, train_loss)} pickle.dump(result, open(log_base + '.pkl', "wb")) print('read {} lines'.format(line_count)) print("TRAIN", train_iteration, train_loss, np.shape(train_iteration), np.shape(train_loss)) print("Best TRAIN performance at:") min_train_loss = np.min(train_loss) best_idx = np.where(train_loss == min_train_loss)[0] print(best_idx) print("{}: iteration {}, loss {}".format(log_base, train_iteration[best_idx], min_train_loss)) # loss #plt.plot(train_iteration[0:len(train_loss)], train_loss[0:len(train_loss)], 'k', label='Train loss') plt.semilogy(train_iteration[0:len(train_loss)], train_loss[0:len(train_loss)], 'k', label='Train loss') plt.legend() plt.ylabel('Loss') plt.xlabel('Number of iterations') plt.grid(True) plt.show() plt.savefig(log_base + '.png')
def main(): log_files = process_arguments(sys.argv) train_iteration = [] train_loss = [] test_iteration = [] test_loss = [] test_accuracy = [] base_test_iter = 0 base_train_iter = 0 for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] base_test_iter = test_iteration[-1] for line in f: # TRAIN NET if strstr(line, 'Iteration') and strstr(line, 'lr'): matched = match_iteration(line) train_iteration.append( int(matched.group(1)) + base_train_iter) elif strstr(line, 'Train net output'): matched = match_loss(line) train_loss.append(float(matched.group(1))) # TEST NET elif strstr(line, 'Testing net'): matched = match_iteration(line) test_iteration.append( int(matched.group(1)) + base_test_iter) elif strstr(line, 'Test net output'): matched = match_loss(line) if matched: test_loss.append(float(matched.group(1))) else: matched = match_accuracy(line) test_accuracy.append(float(matched.group(1))) print("TRAIN", train_iteration, train_loss) print("TEST", test_iteration, test_loss) print("ACCURACY", test_iteration, test_accuracy) # loss plt.plot(train_iteration, train_loss, 'k', label='Train loss') plt.plot(test_iteration, test_loss, 'r', label='Test loss') plt.legend() plt.ylabel('Loss') plt.xlabel('Number of iterations') plt.savefig('loss.png')
def main(): log_files = process_arguments(sys.argv) train_iteration = [] train_loss = [] test_iteration = [] test_loss = [] test_accuracy = [] base_test_iter = 0 base_train_iter = 0 for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] base_test_iter = test_iteration[-1] for line in f: # TRAIN NET if strstr(line, 'Iteration') and strstr(line, 'lr'): matched = match_iteration(line) train_iteration.append(int(matched.group(1))+base_train_iter) elif strstr(line, 'Train net output'): matched = match_loss(line) train_loss.append(float(matched.group(1))) # TEST NET elif strstr(line, 'Testing net'): matched = match_iteration(line) test_iteration.append(int(matched.group(1))+base_test_iter) elif strstr(line, 'Test net output'): matched = match_loss(line) if matched: test_loss.append(float(matched.group(1))) else: matched = match_accuracy(line) test_accuracy.append(float(matched.group(1))) print("TRAIN", train_iteration, train_loss) print("TEST", test_iteration, test_loss) print("ACCURACY", test_iteration, test_accuracy) # loss plt.plot(train_iteration, train_loss, 'k', label='Train loss') plt.plot(test_iteration, test_loss, 'r', label='Test loss') plt.legend() plt.ylabel('Loss') plt.xlabel('Number of iterations') plt.savefig('loss.png')
def main(): log_file = process_arguments(sys.argv) train_iteration = [] train_loss = [] test_iteration = [] test_loss = [] with open(log_file, 'rb') as f: for line in f: # TRAIN NET if strstr(line, 'Iteration') and strstr(line, 'lr'): matched = match_iteration(line) train_iteration.append(int(matched.group(1))) elif strstr(line, 'Train net output'): matched = match_loss(line) train_loss.append(float(matched.group(1))) # TEST NET elif strstr(line, 'Testing net'): matched = match_iteration(line) test_iteration.append(int(matched.group(1))) elif strstr(line, 'Test net output'): matched = match_loss(line) test_loss.append(float(matched.group(1))) print("TRAIN", train_iteration, train_loss) print("TEST", test_iteration, test_loss) plt.plot(train_iteration, train_loss, 'k') plt.plot(test_iteration, test_loss, 'r') plt.ylabel('loss') plt.xlabel('number of iterations') plt.savefig('loss.png')
def main(): log_files = process_arguments(sys.argv) train_iteration = [] train_loss = [] test_iteration = [] test_loss = [] test_accuracy = [] pixel_accuracy = [] mean_accuracy = [] mean_IU = [] frequency_weighted_IU = [] base_test_iter = 0 base_train_iter = 0 for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] base_test_iter = test_iteration[-1] for line in f: # TRAIN NET if strstr(line, 'Iteration') and strstr(line, 'lr'): matched = match_iteration(line) train_iteration.append( int(matched.group(1)) + base_train_iter) elif strstr(line, 'Iteration') and strstr(line, 'loss'): matched = match_loss(line) train_loss.append(float(matched.group(1))) # TEST NET elif strstr(line, 'Testing net'): matched = match_iteration(line) test_iteration.append( int(matched.group(1)) + base_test_iter) elif strstr(line, 'Test net output'): matched = match_loss(line) if matched: test_loss.append(float(matched.group(1))) else: matched = match_accuracy(line) test_accuracy.append(float(matched.group(1))) #train_loss.sort() print("TRAIN", train_iteration, train_loss) print("TEST", test_iteration, test_loss) print("ACCURACY", test_iteration, test_accuracy) # loss plt.plot(train_iteration, train_loss, 'k', label='Train loss') plt.plot(test_iteration, test_loss, 'r', label='Test loss') plt.legend() plt.ylabel('Loss') plt.xlabel('Number of iterations') plt.savefig('loss.png') # evaluation plt.clf() plt.plot(range(len(pixel_accuracy)), pixel_accuracy, 'k', label='pixel accuracy') plt.plot(range(len(mean_accuracy)), mean_accuracy, 'r', label='mean accuracy') plt.plot(range(len(mean_IU)), mean_IU, 'g', label='mean IU') plt.plot(range(len(frequency_weighted_IU)), frequency_weighted_IU, 'b', label='frequency weighted IU') plt.legend(loc=0) plt.savefig('evaluation.png')
def main(): log_files = process_arguments(sys.argv) train_iteration = [] train_loss = [] lr = [] test_iteration = [] detection_eval = [] base_test_iter = 0 base_train_iter = 0 base_lr = 0 for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] base_test_iter = test_iteration[-1] base_lr = lr[-1] for line in f: # TRAIN NET if strstr(line, 'Iteration') and strstr(line, 'lr'): matched = match_iteration(line) train_iteration.append(int(matched.group(1))) matched = match_lr(line) lr.append(float(matched.group(1))) elif strstr(line, 'Train net output'): matched = match_loss(line) train_loss.append(float(matched.group(1))) # TEST NET elif strstr(line, 'Testing net'): matched = match_iteration(line) test_iteration.append(int(matched.group(1))) elif strstr(line, 'Test net output'): matched = match_evaluation(line) detection_eval.append(float(matched.group(1))) # print("TRAIN", train_iteration, train_loss) # print("TEST", test_iteration, detection_eval) # print("LEARNING_RATE", train_iteration, lr) # loss plt.plot(train_iteration, train_loss, 'b', label='Train loss') plt.legend() plt.ylabel('Loss') plt.xlabel('Number of iterations') plt.savefig('loss.png') # learning rate plt.plot(train_iteration, lr, 'g', label='Learning rate') plt.legend() plt.ylabel('Learning rate') plt.xlabel('Number of iterations') plt.savefig('learning_rate.png') # evaluation plt.clf() plt.plot(test_iteration, detection_eval, 'r', label='Detection evaluation') plt.legend(loc='lower right') plt.ylabel('Detection_eval') plt.xlabel('Number of iterations') plt.savefig('evaluation.png') # overlays # 1 - training loss vs. detection evaluation fig, ax1 = plt.subplots() ax1.plot(train_iteration, train_loss, 'b', label='Train loss') ax1.set_xlabel('Number of iterations') ax1.set_ylabel('Loss', color='b') ax2 = ax1.twinx() ax2.plot(test_iteration, detection_eval, 'r', label='Detection evaluation') ax2.set_ylabel('Detection_eval', color='r') plt.savefig('loss_eval.png') # 2 - training loss vs. learning rate fig, ax1 = plt.subplots() ax1.plot(train_iteration, lr, 'g', label='Learning rate') ax1.set_xlabel('Number of iterations') ax1.set_ylabel('Learning rate', color='g') ax2 = ax1.twinx() ax2.plot(train_iteration, train_loss, 'b', label='Train loss') ax2.set_ylabel('Loss', color='b') plt.savefig('lr_loss.png') # 3 - learning rate vs. detection evaluation fig, ax1 = plt.subplots() ax1.plot(train_iteration, lr, 'g', label='Learning rate') ax1.set_xlabel('Number of iterations') ax1.set_ylabel('Learning rate', color='g') ax2 = ax1.twinx() ax2.plot(test_iteration, detection_eval, 'r', label='Detection evaluation') ax2.set_ylabel('Detection_eval', color='r') plt.savefig('lr_eval.png') f, axarr = plt.subplots(3, sharex=True) axarr[0].plot(train_iteration, train_loss) axarr[0].set_title('Iters vs. Loss') axarr[1].plot(train_iteration, lr, 'r') axarr[1].set_title('Iters vs. Learning Rate') axarr[2].plot(test_iteration, detection_eval, 'g') axarr[2].set_title('Iters vs. Detection Evaluation') plt.savefig('tri.png') plt.show()
def main(): log_files = process_arguments(sys.argv) train_iteration = [] train_loss = [] test_iteration = [] test_loss = [] test_accuracy = [] pixel_accuracy = [] mean_accuracy = [] mean_IU = [] frequency_weighted_IU = [] base_test_iter = 0 base_train_iter = 0 for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] base_test_iter = test_iteration[-1] for line in f: # TRAIN NET if strstr(line, 'Iteration') and strstr(line, 'lr'): matched = match_iteration(line) train_iteration.append(int(matched.group(1))+base_train_iter) elif strstr(line, 'Iteration') and strstr(line, 'loss'): matched = match_loss(line) train_loss.append(float(matched.group(1))) # TEST NET elif strstr(line, 'Testing net'): matched = match_iteration(line) test_iteration.append(int(matched.group(1))+base_test_iter) elif strstr(line, 'Test net output'): matched = match_loss(line) if matched: test_loss.append(float(matched.group(1))) else: matched = match_accuracy(line) test_accuracy.append(float(matched.group(1))) #train_loss.sort() print("TRAIN", train_iteration, train_loss) print("TEST", test_iteration, test_loss) print("ACCURACY", test_iteration, test_accuracy) # loss plt.plot(train_iteration, train_loss, 'k', label='Train loss') plt.plot(test_iteration, test_loss, 'r', label='Test loss') plt.legend() plt.ylabel('Loss') plt.xlabel('Number of iterations') plt.savefig('loss.png') # evaluation plt.clf() plt.plot(range(len(pixel_accuracy)), pixel_accuracy, 'k', label='pixel accuracy') plt.plot(range(len(mean_accuracy)), mean_accuracy, 'r', label='mean accuracy') plt.plot(range(len(mean_IU)), mean_IU, 'g', label='mean IU') plt.plot(range(len(frequency_weighted_IU)), frequency_weighted_IU, 'b', label='frequency weighted IU') plt.legend(loc=0) plt.savefig('evaluation.png')
def main(): output_data, log_files = process_arguments(sys.argv) train_iteration = [] train_loss = [] train_accuracy0 = [] train_accuracy1 = [] train_accuracy2 = [] train_accuracy3 = [] train_accuracy4 = [] train_accuracy5 = [] base_train_iter = 0 for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] for line in f: if strstr(line, 'Iteration') and strstr(line, 'loss'): matched = match_loss(line) train_loss.append(float(matched.group(1))) matched = match_iteration(line) train_iteration.append(int(matched.group(1))+base_train_iter) # strong labels elif strstr(line, 'Train net output #0: accuracy '): matched = match_net_accuracy(line) train_accuracy0.append(float(matched.group(1))) elif strstr(line, 'Train net output #1: accuracy '): matched = match_net_accuracy(line) train_accuracy1.append(float(matched.group(1))) elif strstr(line, 'Train net output #2: accuracy '): matched = match_net_accuracy(line) train_accuracy2.append(float(matched.group(1))) # weak labels elif strstr(line, 'Train net output #0: accuracy_bbox'): matched = match_net_accuracy_bbox(line) train_accuracy0.append(float(matched.group(1))) elif strstr(line, 'Train net output #1: accuracy_bbox'): matched = match_net_accuracy_bbox(line) train_accuracy1.append(float(matched.group(1))) elif strstr(line, 'Train net output #2: accuracy_bbox'): matched = match_net_accuracy_bbox(line) train_accuracy2.append(float(matched.group(1))) elif strstr(line, 'Train net output #3: accuracy_strong'): matched = match_net_accuracy_strong(line) train_accuracy3.append(float(matched.group(1))) elif strstr(line, 'Train net output #4: accuracy_strong'): matched = match_net_accuracy_strong(line) train_accuracy4.append(float(matched.group(1))) elif strstr(line, 'Train net output #5: accuracy_strong'): matched = match_net_accuracy_strong(line) train_accuracy5.append(float(matched.group(1))) if output_data == 'loss': for x in train_loss: print(x) if output_data == 'acc1': for x,y,z in zip(train_accuracy0, train_accuracy1, train_accuracy2): print(x, y, z) if output_data == 'acc2': for x,y,z in zip(train_accuracy3, train_accuracy4, train_accuracy5): print(x, y, z) ## loss plt.plot(train_iteration, train_loss, 'k', label='Train loss') plt.legend() plt.ylabel('Loss') plt.xlabel('Number of iterations') plt.savefig('loss.png') ## evaluation plt.clf() if len(train_accuracy3) != 0: plt.plot(range(len(train_accuracy0)), train_accuracy0, 'k', label='accuracy bbox 0') plt.plot(range(len(train_accuracy1)), train_accuracy1, 'r', label='accuracy bbox 1') plt.plot(range(len(train_accuracy2)), train_accuracy2, 'g', label='accuracy bbox 2') plt.plot(range(len(train_accuracy3)), train_accuracy3, 'b', label='accuracy strong 0') plt.plot(range(len(train_accuracy4)), train_accuracy4, 'c', label='accuracy strong 1') plt.plot(range(len(train_accuracy5)), train_accuracy5, 'm', label='accuracy strong 2') else: plt.plot(range(len(train_accuracy0)), train_accuracy0, 'k', label='train accuracy 0') plt.plot(range(len(train_accuracy1)), train_accuracy1, 'r', label='train accuracy 1') plt.plot(range(len(train_accuracy2)), train_accuracy2, 'g', label='train accuracy 2') plt.legend(loc=0) plt.savefig('evaluation.png')
def main(): #log_files = process_arguments(sys.argv) # get newest log log_dir = 'log' log_files = [ max(glob.iglob(os.path.join(log_dir, '*.log')), key=os.path.getctime) ] print(log_files) XLIM = [] YLIM = [] #XLIM = (0, 1000) #YLIM = (0.06, 0.5) LOGSCALE = False #LOGSCALE = True REMOVE_ZERO_LOSS = True train_iteration = [] train_loss = [] test_iteration = [] test_loss = [] base_test_iter = 0 base_train_iter = 0 curr_train_loss = [] curr_test_loss = [] for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] base_test_iter = test_iteration[-1] line_count = 0 for line in f: line_count += 1 #print('{}: {}'.format(line_count, line)) if strstr(line, 'Iteration') and strstr(line, ', loss'): matched = match_iteration(line) train_iteration.append( int(matched.group(1)) + base_train_iter) TEST_LOSS = False TRAIN_LOSS = True elif strstr(line, 'Iteration') and strstr(line, 'loss'): TEST_LOSS = True TRAIN_LOSS = False matched = match_test_iteration(line) test_iteration.append( int(matched.group(1)) + base_test_iter) else: TEST_LOSS = False TRAIN_LOSS = False # TRAIN LOSS if TRAIN_LOSS: matched = match_train_loss(line) if matched: curr_train_loss.append(float(matched.group(1))) #print('#{} train loss: {}'.format(k_train,float(matched.group(2)))) train_loss.append(np.sum(curr_train_loss)) print('Iter {}: total train loss: {}'.format( train_iteration[-1], np.sum(curr_train_loss))) curr_train_loss = [] # TEST LOSS if TEST_LOSS: matched = match_test_loss(line) if matched: curr_test_loss.append(float(matched.group(1))) #print('#{} test loss: {}'.format(k_test,float(matched.group(2)))) test_loss.append(np.sum(curr_test_loss)) print('Iter {}: total test loss: {}'.format( test_iteration[-1], np.sum(curr_test_loss))) curr_test_loss = [] log_base = os.path.splitext(os.path.basename(log_files[0]))[0] result = {'TRAIN': (train_iteration, train_loss)} pickle.dump(result, open(log_base + '.pkl', "wb")) print('read {} lines'.format(line_count)) print("TRAIN", np.shape(train_iteration), np.shape(train_loss)) print("TEST", np.shape(test_iteration), np.shape(test_loss)) if REMOVE_ZERO_LOSS: # convert to numpy Ntrain0 = len(train_loss) train_loss = np.array(train_loss) train_iteration = np.array(train_iteration) idx = train_loss > 0.0 train_iteration = train_iteration[idx] train_loss = train_loss[idx] print('Removed {} zeros: Size changed from {} to {}'.format( np.sum(idx), Ntrain0, np.size(train_loss))) if len(train_loss) < len(train_iteration): Ntrain = len(train_loss) else: Ntrain = len(train_iteration) if len(test_loss) < len(test_iteration): Ntest = len(test_loss) else: Ntest = len(test_iteration) if Ntrain > 0: print("Best TRAIN performance at index:") min_train_loss = np.min(train_loss) best_idx = np.where(train_loss == min_train_loss)[0][0] print(best_idx) print("{}: iteration {}, loss {}".format(log_base, train_iteration[best_idx], min_train_loss)) if Ntest > 0: print("Best TEST performance at index:") min_test_loss = np.min(test_loss) best_idx = np.where(test_loss == min_test_loss)[0][0] print(best_idx) print("{}: iteration {}, loss {}".format(log_base, test_iteration[best_idx], min_test_loss)) # Smoothing window = int(0.1 * float(Ntrain)) window = -int(0.1 * float(Ntrain)) # median if abs(window) > 100: window = 100 * abs(window) / window # keep sign if window % 2 == 0: # make uneven window = window + 1 testwindow = int(0.1 * float(Ntest)) testwindow = -int(0.1 * float(Ntest)) # median if abs(testwindow) > 100: testwindow = 100 * abs(testwindow) / testwindow # keep sign if testwindow % 2 == 0: # make uneven testwindow = testwindow + 1 if Ntrain > 0 and window < Ntrain: print('smoothing {} data points with window of {}'.format( Ntrain, window)) if window > 0 and window < Ntrain and window > 3: train_loss_smooth = savgol_filter( train_loss, abs(window), 3) # window size, polynomial order else: train_loss_smooth = medfilt(train_loss, abs(window)) # window size 51 else: train_loss_smooth = [] if Ntest > 0 and window < Ntest: print('smoothing {} data points with window of {}'.format( Ntest, testwindow)) if window > 0 and window > 3: test_loss_smooth = savgol_filter( test_loss, abs(testwindow), 3) # window size, polynomial order else: test_loss_smooth = medfilt(test_loss, abs(testwindow)) # window size 51 else: test_loss_smooth = [] ##print("Best smoothed TRAIN performance at index:") ##min_train_loss = np.min(train_loss_smooth[window:Ntrain-abs(window/2)]) ##best_idx = np.where(train_loss_smooth[window:Ntrain-abs(window/2)]==min_train_loss)[0][0] + -abs(window/2) ##print(best_idx) ##print("{}: iteration {}, loss {}".format(log_base,train_iteration[best_idx],min_train_loss)) ##print("Best smoothed TEST performance at index:") ##min_test_loss = np.min(test_loss_smooth[window:Ntest-abs(window/2)]) ##best_idx = np.where(test_loss_smooth[window:Ntest-abs(window/2)]==min_test_loss)[0][0] + -abs(window/2) ##print(best_idx) ##print("{}: iteration {}, loss {}".format(log_base,test_iteration[best_idx],min_test_loss)) # Visualization plot_loss(train_iteration, train_loss, test_iteration, test_loss, LOGSCALE, XLIM, YLIM, log_base) plot_loss(train_iteration, train_loss_smooth, test_iteration, test_loss_smooth, LOGSCALE, XLIM, YLIM, log_base + '_smooth')
def main(): output_data, log_files = process_arguments(sys.argv) train_iteration = [] train_loss = [] train_accuracy0 = [] train_accuracy1 = [] train_accuracy2 = [] train_accuracy3 = [] train_accuracy4 = [] train_accuracy5 = [] base_train_iter = 0 for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] for line in f: if strstr(line, 'Iteration') and strstr(line, 'loss'): matched = match_loss(line) train_loss.append(float(matched.group(1))) matched = match_iteration(line) train_iteration.append( int(matched.group(1)) + base_train_iter) # strong labels elif strstr(line, 'Train net output #0: accuracy '): matched = match_net_accuracy(line) train_accuracy0.append(float(matched.group(1))) elif strstr(line, 'Train net output #1: accuracy '): matched = match_net_accuracy(line) train_accuracy1.append(float(matched.group(1))) elif strstr(line, 'Train net output #2: accuracy '): matched = match_net_accuracy(line) train_accuracy2.append(float(matched.group(1))) # weak labels elif strstr(line, 'Train net output #0: accuracy_bbox'): matched = match_net_accuracy_bbox(line) train_accuracy0.append(float(matched.group(1))) elif strstr(line, 'Train net output #1: accuracy_bbox'): matched = match_net_accuracy_bbox(line) train_accuracy1.append(float(matched.group(1))) elif strstr(line, 'Train net output #2: accuracy_bbox'): matched = match_net_accuracy_bbox(line) train_accuracy2.append(float(matched.group(1))) elif strstr(line, 'Train net output #3: accuracy_strong'): matched = match_net_accuracy_strong(line) train_accuracy3.append(float(matched.group(1))) elif strstr(line, 'Train net output #4: accuracy_strong'): matched = match_net_accuracy_strong(line) train_accuracy4.append(float(matched.group(1))) elif strstr(line, 'Train net output #5: accuracy_strong'): matched = match_net_accuracy_strong(line) train_accuracy5.append(float(matched.group(1))) if output_data == 'loss': for x in train_loss: print(x) if output_data == 'acc1': for x, y, z in zip(train_accuracy0, train_accuracy1, train_accuracy2): print(x, y, z) if output_data == 'acc2': for x, y, z in zip(train_accuracy3, train_accuracy4, train_accuracy5): print(x, y, z) ## loss plt.plot(train_iteration, train_loss, 'k', label='Train loss') plt.legend() plt.ylabel('Loss') plt.xlabel('Number of iterations') plt.savefig('loss.png') ## evaluation plt.clf() if len(train_accuracy3) != 0: plt.plot(range(len(train_accuracy0)), train_accuracy0, 'k', label='accuracy bbox 0') plt.plot(range(len(train_accuracy1)), train_accuracy1, 'r', label='accuracy bbox 1') plt.plot(range(len(train_accuracy2)), train_accuracy2, 'g', label='accuracy bbox 2') plt.plot(range(len(train_accuracy3)), train_accuracy3, 'b', label='accuracy strong 0') plt.plot(range(len(train_accuracy4)), train_accuracy4, 'c', label='accuracy strong 1') plt.plot(range(len(train_accuracy5)), train_accuracy5, 'm', label='accuracy strong 2') else: plt.plot(range(len(train_accuracy0)), train_accuracy0, 'k', label='train accuracy 0') plt.plot(range(len(train_accuracy1)), train_accuracy1, 'r', label='train accuracy 1') plt.plot(range(len(train_accuracy2)), train_accuracy2, 'g', label='train accuracy 2') plt.legend(loc=0) plt.savefig('evaluation.png')
def main(): log_files = process_arguments(sys.argv) train_iteration = [] train_loss = [] lr = [] test_iteration = [] test_loss = [] test_accuracy = [] top1_accuracy = [] top5_accuracy = [] base_test_iter = 0 base_train_iter = 0 for log_file in log_files: with open(log_file, 'rb') as f: if len(train_iteration) != 0: base_train_iter = train_iteration[-1] base_test_iter = test_iteration[-1] for line in f: # TRAIN NET if strstr(line, 'Iteration') and strstr(line, 'lr'): matched = match_iteration(line) train_iteration.append(int(matched.group(1))) matched = match_lr(line) lr.append(float(matched.group(1))) elif strstr(line, 'Train net output'): matched = match_loss(line) train_loss.append(float(matched.group(1))) # TEST NET elif strstr(line, 'Iteration') and strstr(line, 'Testing net'): matched = match_iteration(line) test_iteration.append(int(matched.group(1))) elif strstr(line, 'Test net output #2'): matched = match_loss(line) test_loss.append(float(matched.group(1))) elif strstr(line, 'Test net output #0'): matched = match_top1(line) top1_accuracy.append(float(matched.group(1))) elif strstr(line, 'Test net output #1'): matched = match_top5(line) top5_accuracy.append(float(matched.group(1))) print("TRAIN", train_iteration, train_loss) print("TEST", test_iteration, test_loss) print("LEARNING RATE", train_iteration, lr) print("TOP1_ACCURACY", test_iteration, top1_accuracy) print("TOP5_ACCURACY", test_iteration, top5_accuracy) # loss plt.plot(train_iteration, train_loss, 'k', label='Train loss') plt.plot(test_iteration, test_loss, 'r', label='Test loss') plt.legend() plt.ylabel('Loss') plt.xlabel('Number of iterations') plt.savefig('loss.png') plt.show() # learning rate plt.clf() plt.plot(train_iteration, lr, 'g', label='Learning rate') plt.legend() plt.ylabel('Learning rate') plt.xlabel('Number of iterations') plt.savefig('lr.png') plt.show() # evaluation plt.clf() plt.plot(test_iteration, top1_accuracy, 'm', label='Top-1 accuracy') plt.plot(test_iteration, top5_accuracy, 'c', label='Top-5 accuracy') plt.legend(loc=0) plt.savefig('evaluation.png') plt.show()