def main(): filename = [] for i in range(0, 4): tmp = "../sample/camera_photo/0317_test/" + str(i + 1) + ".jpg" filename.append(tmp) ## 5 classes model classes = [ 'musical_symbol_bass_clef', 'musical_symbol_half_note', 'musical_symbol_quarter_note', 'musical_symbol_quarter_rest', 'musical_symbol_g_clef' ] model_number = "0401_9" model_dir = "../Model/" + model_number + "/" model_name = "model_5_classes_" + model_number + ".meta" true_label_file = [] for i in range(0, 4): tmp = "../sample/true_label/0317_test/" + str( i + 1) + "_true_label_5_classes.csv" true_label_file.append(tmp) t_acc_matrix = init_Conf_mat.InitConfMat(classes) #for i in range(0, 1): for i in range(0, 4): y_position, y_position_bottom = five.five_lines(filename[i]) symbol_Info = x_scan.x_cut(y_position, y_position_bottom, filename[i]) allInfo_symbol = predict.predict_symbol(symbol_Info, filename[i], classes, model_dir, model_name) allInfo_symbol, t_acc_matrix = accuracy.acc(allInfo_symbol, true_label_file[i], classes, filename[i], t_acc_matrix, model_name) OutputResult.Output(allInfo_symbol, classes, filename[i], model_name) #pitch.Identify(allInfo_symbol, filename[i], y_position, y_position_bottom); summary.Summary(t_acc_matrix, model_name)
def one_graph(prob_missingness): p = prob_missingness without_imp = np.zeros(5) grand_mean = np.zeros(5) conditional_mean = np.zeros(5) closest = np.zeros(5) regression = np.zeros(5) without_removing = np.zeros(5) multiple_closest = np.zeros(5) multiple_regression = np.zeros(5) training_set_size = ['50', '100', '250', '500', '1000'] dini = (pandas.read_csv('data_banknote_authentication.csv').to_numpy()) np.random.shuffle(dini) X1i = dini[0:1000, 0:4] X2i = dini[1000:, 0:4] Y1i = dini[0:1000, 4] Y2i = dini[1000:, 4] for i in range(nb_tests): df = (pandas.read_csv('data_banknote_authentication.csv').to_numpy()) np.random.shuffle(df) for i in range(1371): for j in range(4): if ((np.random.uniform()) < p * 2) and (df[i, 4] == 1.0): df[i, j] = 0 fullX1 = df[0:1000, 0:4] fullY1 = df[0:1000, 4] X2 = df[1000:, 0:4] Y2 = df[1000:, 4] for j in range(5): without_removing[j] += acc.acc(X1i[0:used_trs[j]][:], Y1i[0:used_trs[j]], X2i, Y2i, 'no_imputation') without_imp[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'no_imputation') grand_mean[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'grand_mean') conditional_mean[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'conditional_mean') closest[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'closest') regression[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'regression') multiple_closest[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'multiple_closest') multiple_regression[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'multiple_regression') without_removing /= nb_tests without_imp /= nb_tests grand_mean /= nb_tests conditional_mean /= nb_tests closest /= nb_tests regression /= nb_tests multiple_closest /= nb_tests multiple_regression /= nb_tests plt.figure(figsize=(10, 5)) plt.plot(training_set_size, without_removing, color='purple', linewidth=2) plt.plot(training_set_size, without_imp, color='green', linewidth=2) plt.plot(training_set_size, grand_mean, color='blue', linewidth=2) plt.plot(training_set_size, conditional_mean, color='orange', linewidth=2) plt.plot(training_set_size, closest, color='red', linewidth=2) plt.plot(training_set_size, regression, color='black', linewidth=2) plt.plot(training_set_size, multiple_closest, color='pink', linewidth=2) plt.plot(training_set_size, multiple_regression, color='grey', linewidth=2) mylabels = [ 'without_removing', 'No imputation', 'Grand Mean', 'Conditional Mean', 'Closest neighbour', 'Regression', 'multiple_closest', 'multiple_regression' ] #,'multiple_closest','multiple_regression' plt.title(' Real_life_data MNAR ' + ' Prob_missingness: ' + str(p)) plt.legend(labels=mylabels) plt.ylabel('Accuracy', fontsize=10) plt.xlabel('Training size', fontsize=10) plt.show()
def one_graph(ncov, dim, type_missingness, prob_missingness): p = prob_missingness without_imp = np.zeros(6) grand_mean = np.zeros(6) conditional_mean = np.zeros(6) closest = np.zeros(6) regression = np.zeros(6) without_removing = np.zeros(6) #multiple_closest=np.zeros(6) #multiple_regression=np.zeros(6) training_set_size = ['50', '100', '250', '500', '1000', '2000'] for i in range(nb_tests): data = dt.full_gen(ncov, dim, 2000, 1000, type_missingness, prob_missingness) fullX1 = data[0] fullY1 = data[1] X2 = data[2] Y2 = data[3] X1i = data[4] X2i = data[5] for j in range(6): without_removing[j] += acc.acc(X1i[0:used_trs[j]][:], fullY1[0:used_trs[j]], X2i, Y2, 'no_imputation') without_imp[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'no_imputation') grand_mean[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'grand_mean') conditional_mean[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'conditional_mean') closest[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'closest') regression[j] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'regression') #multiple_closest[j]+=acc.acc(fullX1[0:used_trs[j],:],fullY1[0:used_trs[j]],X2,Y2,'multiple_closest') #multiple_regression[j]+=acc.acc(fullX1[0:used_trs[j],:],fullY1[0:used_trs[j]],X2,Y2,'multiple_regression') without_removing /= nb_tests without_imp /= nb_tests grand_mean /= nb_tests conditional_mean /= nb_tests closest /= nb_tests regression /= nb_tests #multiple_closest/=nb_tests #multiple_regression/=nb_tests plt.figure(figsize=(10, 5)) plt.plot(training_set_size, without_removing, color='purple', linewidth=2) plt.plot(training_set_size, without_imp, color='green', linewidth=2) plt.plot(training_set_size, grand_mean, color='blue', linewidth=2) plt.plot(training_set_size, conditional_mean, color='orange', linewidth=2) plt.plot(training_set_size, closest, color='red', linewidth=2) plt.plot(training_set_size, regression, color='black', linewidth=2) #plt.plot(training_set_size,multiple_closest, color = 'pink', linewidth = 2) #plt.plot(training_set_size,multiple_regression, color = 'grey', linewidth = 2) mylabels = [ 'without_removing', 'No imputation', 'Grand Mean', 'Conditional Mean', 'Closest neighbour', 'Regression' ] #,'multiple_closest','multiple_regression' plt.title('Covariance: ' + ncov + ' Dim: ' + str(dim) + ' Type missingness: ' + type_missingness + ' Prob_missingness: ' + str(p)) plt.legend(labels=mylabels) plt.ylabel('Accuracy', fontsize=10) plt.xlabel('Training size', fontsize=10) plt.axis([0, 6, 0.7, 0.95]) plt.show()
config['link_conf_thr'] = 0.8 config['min_area'] = 300 config['min_height'] = 10 i = 0 total_time1 = time.time() for impath in impaths: #impath = '/home/blin/Downloads/text_detection/test/1-123152001-OCR-LF-C01.jpg' #impath = '/home/blin/Downloads/text_detection/test/1-142434001-OCR-AH-A01.jpg' imname = os.path.basename(impath) im = cv2.imread(impath) print(impath) t1 = time.time() bboxs = detection(im, sess_d, input_x, segm_logits, link_logits, config) t2 = time.time() print('detection_time: ', (t2 - t1), 'result', bboxs) #bboxs = ['792, 364, 792, 298, 923, 298, 923, 364\n', '972, 375, 972, 303, 1271, 303, 1271, 375\n', '972, 455, 972, 389, 1109, 389, 1109, 455\n'] predicted = recognition(im, sess_r_h, sess_r_v, bboxs, (240, 32), images_ph_h, images_ph_v, model_out_h, model_out_v, decoded_h, decoded_v) #predicted = recognition(im, sess_r_h , bboxs, (240, 32), images_ph_h, model_out_h, decoded_h) t3 = time.time() print('recognition_time: ', (t3 - t2), 'result', predicted) i += 1 print(i) line = imname + ' ' + predicted + '\n' res_txt.write(line) res_txt.close() total_time2 = time.time() print('total_time: ', (total_time2 - total_time1)) acc('/home/blin/containernumber_result.txt')
def one_graph(ncov, dim, type_missingness, prob_missingness): p = prob_missingness without_imp = np.zeros((6, nb_tests)) grand_mean = np.zeros((6, nb_tests)) conditional_mean = np.zeros((6, nb_tests)) closest = np.zeros((6, nb_tests)) regression = np.zeros((6, nb_tests)) without_removing = np.zeros((6, nb_tests)) training_set_size = ['50', '100', '250', '500', '1000', '2000'] for i in range(nb_tests): data = dt.full_gen(ncov, dim, 2000, 1000, type_missingness, prob_missingness) fullX1 = data[0] fullY1 = data[1] X2 = data[2] Y2 = data[3] X1i = data[4] X2i = data[5] for j in range(6): without_removing[j][i] += acc.acc(X1i[0:used_trs[j]][:], fullY1[0:used_trs[j]], X2i, Y2, 'no_imputation') without_imp[j][i] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'no_imputation') grand_mean[j][i] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'grand_mean') conditional_mean[j][i] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'conditional_mean') closest[j][i] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'closest') regression[j][i] += acc.acc(fullX1[0:used_trs[j], :], fullY1[0:used_trs[j]], X2, Y2, 'regression') v_without_removing = np.var(without_removing, axis=1) v_without_imp = np.var(without_imp, axis=1) v_grand_mean = np.var(grand_mean, axis=1) v_conditional_mean = np.var(conditional_mean, axis=1) v_closest = np.var(closest, axis=1) v_regression = np.var(regression, axis=1) plt.figure(figsize=(10, 5)) plt.plot(training_set_size, v_without_removing, color='purple', linewidth=2) plt.plot(training_set_size, v_without_imp, color='green', linewidth=2) plt.plot(training_set_size, v_grand_mean, color='blue', linewidth=2) plt.plot(training_set_size, v_conditional_mean, color='orange', linewidth=2) plt.plot(training_set_size, v_closest, color='red', linewidth=2) plt.plot(training_set_size, v_regression, color='black', linewidth=2) mylabels = [ 'without_removing', 'No imputation', 'Grand Mean', 'Conditional Mean', 'Closest neighbour', 'Regression' ] plt.title('Accuracy Variance' + 'Covariance: ' + ncov + ' Dim: ' + str(dim) + ' Type missingness: ' + type_missingness + ' Prob_missingness: ' + str(p)) plt.legend(labels=mylabels) plt.ylabel('Accuracy', fontsize=10) plt.xlabel('Training size', fontsize=10) plt.savefig('Variance ' + 'Covariance: ' + ncov + ' Dim: ' + str(dim) + ' Type missingness: ' + type_missingness + ' Prob_missingness: ' + str(p) + '.jpg') plt.show()