def find_matches_in_cluster(cluster_and_descriptor): cluster, input_descriptors = cluster_and_descriptor imgMatch = cluster[0] print 'Matching ', os.path.dirname(imgMatch), '...' cluster_dir = os.path.dirname(imgMatch) cluster_files = glob.glob(os.path.join(cluster_dir, '*.png')) cluster_files = [f for f in cluster_files if not f.endswith('template.png')] cluster_descriptors = match.load_images_descriptors(cluster_files, os.path.join(cluster_dir, 'images.db')) return match.find_match(input_descriptors, cluster_descriptors)
def find_card_in_clusters(img, resize_factor=0.25): alls = time() start = time() print 'Loading clusters descriptors...' template_descriptors = load_clusters_template_descriptors() print 'Loaded clusters in:', time() - start, 'seconds' start = time() print 'Loading input image descriptors...' input_descriptors = match.descriptors_for_input_image(img, resize_factor) print 'Loaded input image in:', time() - start, 'seconds' print 'Searching for top clusters...' start = time() cluster_matches = match.find_match(input_descriptors, template_descriptors, top=4) print 'Found top clusters in:', time() - start, 'seconds' start = time() print 'Searching for image in top clusters...' matches = find_matches_in_clusters(input_descriptors, cluster_matches, num_of_processors=4) print 'Found match in:', time() - start, 'seconds with score:', matches[0][1] print 'Total time:', time() - alls, 'seconds' return matches[0]
def __init__(self, no): QtWidgets.QWidget.__init__(self) self.setAutoFillBackground(True) p = self.palette() p.setColor(self.backgroundRole(), QtGui.QColor(102, 185, 51)) self.setPalette(p) matches = match.find_match(profiles, no - 1) self.setWindowTitle('We found you a match!') self.resize(350, 580) layout = QtWidgets.QGridLayout() heading = QtGui.QFont() heading.setBold(True) heading.setPointSize(20) detailsHead = QtWidgets.QLabel("We found you a match!") detailsHead.setAlignment(QtCore.Qt.AlignCenter) detailsHead.setFont(heading) layout.addWidget(detailsHead) for i in range(1, no): cur = matches[i] frame = QtWidgets.QFrame() frame.setFrameShape(0x3) frame.setFrameShadow(0x30) frame.setMaximumSize(350, 150) frame.setAutoFillBackground(True) p = frame.palette() p.setColor(frame.backgroundRole(), QtCore.Qt.white) frame.setPalette(p) frameLayout = QtWidgets.QGridLayout() bold = QtGui.QFont() bold.setBold(True) label = QtWidgets.QLabel('Matched With: \n') label.setAlignment(QtCore.Qt.AlignCenter) label.setFont(bold) matchLabel = QtWidgets.QLabel('Name: ' + str(cur[0]) + '\nAge: ' + str(cur[1]) + '\nLanguage: ' + str(cur[2])) matchLabel.setAlignment(QtCore.Qt.AlignCenter) frameLayout.addWidget(label) frameLayout.addWidget(matchLabel) frame.setLayout(frameLayout) layout.addWidget(frame) frame = QtWidgets.QFrame() frame.setMaximumSize(350, 70) frameLayout = QtWidgets.QHBoxLayout() frame.setLayout(frameLayout) buttonReject = QtWidgets.QPushButton('Reject') buttonReject.clicked.connect(self.close) buttonChat = QtWidgets.QPushButton('Open Chat') buttonChat.clicked.connect(self.close) frameLayout.addWidget(buttonReject) frameLayout.addWidget(buttonChat) layout.addWidget(frame) self.setLayout(layout)
def example(): # get face count of the group face_count = get_face_count(group_similarity_src) # select from database print("\n\n\n############ Training Session ############") datas = [] for cnn_build_time_range in cnn_build_time_range_set: start_time = cnn_build_time_range[0] end_time = cnn_build_time_range[1] this_datas = get_data(start_time, end_time) datas = datas + this_datas results.clear() # print import data result for banchmark print("\n\n\n------ Original Result ------") find_rate(datas, 'person_first_id') # build automaton root_c = build_model(datas) # apply decision model matched_face_table, unmatched_face_table, unmatched_validate_table, unmatched_face_objects = find_match( root_c.next, face_count) # convert data to vector for neuron network unmatched_face_table, unmatched_validate_table, unmatched_face_objects = convert_to_vector( matched_face_table, unmatched_face_table, unmatched_validate_table, unmatched_face_objects) # separate training and test data division = int(unmatched_face_table.shape[0] * 0.7) X = unmatched_face_table[:division] y = unmatched_validate_table[:division] X_test = unmatched_face_table[division:] y_test = unmatched_validate_table[division:] # build cnn, and save the model model = conv_net(X, y, X_test, y_test) # use the model to make prediction on the construct session print("\n\n\n------ Unmatched Face (Corrected by CNN) ------") cnn_result = make_prediction(model, unmatched_face_table, unmatched_face_objects) print("\n\n\n------ Overall Result (Corrected by CNN) ------") all_result = list(results) + list(cnn_result) find_rate(all_result, 'result_id') print(predict_time_range_set) for predict_time_range in predict_time_range_set: print(predict_time_range) # get data from different session for testing print("\n\n\n############ Indipendent Testing Session ############") start_time = predict_time_range[0] end_time = predict_time_range[1] datas = get_data(start_time, end_time) results.clear() # print import data result for banchmark print("\n\n\n------ Original Result ------") find_rate(datas, 'person_first_id') # build automaton root_c = build_model(datas) # apply decision model matched_face_table, unmatched_face_table, unmatched_validate_table, unmatched_face_objects = find_match( root_c.next, face_count) # convert data to vector for neuron network unmatched_face_table, unmatched_validate_table, unmatched_face_objects = convert_to_vector( matched_face_table, unmatched_face_table, unmatched_validate_table, unmatched_face_objects) # evaluate the model using data of the indipendent test session score = model.evaluate(unmatched_face_table, unmatched_validate_table) print("\nCross Session Accuracy: ", score[-1]) # use the model to make prediction on the indipendent test session print("\n\n\n------ Unmatched Face ------") find_rate(unmatched_face_objects, 'person_first_id') print("\n\n\n------ Unmatched Face (Corrected by CNN) ------") cnn_result = make_prediction(model, unmatched_face_table, unmatched_face_objects) print("\n\n\n------ Overall Result (Corrected by CNN) ------") all_result = list(results) + list(cnn_result) find_rate(all_result, 'result_id')