def __init__(self, parent=None): super(JHFaceGUI, self).__init__(parent) self.data_builder = data_builder.DataBuilder( global_variables.DATA_PATH) self.face_cascade = cv2.CascadeClassifier( global_variables.CASCADE_PATH) self.num_of_faces = 0 self.recognizer = recognizer.Recognizer( self.data_builder, recognizer.FACE_RECOGNIZER_LBPH) self.recognizer_enable = False self.setupUi(self) self.nameLineEdit.setText('') self.capture = cv2.VideoCapture(0) self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.cameraLabel.width()) self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.cameraLabel.height()) _, self.current_image = self.capture.read() self.addDataButton.clicked.connect(self.add_data) self.trainButton.clicked.connect(self.train) self.recognizeButton.clicked.connect(self.toggle_recognizer) self.thresholdLineEdit.textChanged.connect(self.set_threshold) self.timer = QtCore.QTimer() self.timer.timeout.connect(self.display_video_stream) self.timer.start(30)
def __init__(self): QMainWindow.__init__(self) self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint | QtCore.Qt.X11BypassWindowManagerHint) self.setAttribute(Qt.WA_NoSystemBackground, True) self.setAttribute(Qt.WA_TranslucentBackground, True) # x, y, width, height self.setGeometry(*config.BOARD_POINT, config.BOARD_SIZE, config.BOARD_SIZE) self.setWindowOpacity(0.8) self.is_white = True self.white_checkbox = QAction('White', checkable=True, checked=True) self.white_checkbox.triggered.connect(self.set_white) self.black_checkbox = QAction('Black', checkable=True) self.black_checkbox.triggered.connect(self.set_black) self.recognizer = recognizer.Recognizer() self.point_from = None self.point_to = None # init engine self.engine # noqa
def recognizer(self): self.progress_bar() self.TProgressbar1['value'] = 0 root.update_idletasks() #os.system("C:\\Users\\acer\\AppData\\Local\\Temp\\cnn.py\\recognizer.py") obj=recognizer.Recognizer() self.TProgressbar1['value'] = 0
def __init__(self, draw_widget, btAddr): super(DrawingRecognizer, self).__init__() self.draw_widget = draw_widget self.connectingWiimote(btAddr) self.initUI() self.recognition = recognizer.Recognizer() self.setSavedTemplates() self.transformation = pt() # start WiiControler recognition self.gameInterface()
def main(): rospy.init_node("main") speaker = speak.Speaker() speaker.intro() rec = recognizer.Recognizer() res = rec.start_recognizer() if res[0] == "HELLO ": num_of_right_answers = 0 num_of_wrong_answers = 0 while (num_of_right_answers < 15) or (num_of_wrong_answers < 3): answer = speaker.game() rec = recognizer.Recognizer() res = rec.start_recognizer() if answer.lower() == res[:-1].lower: speaker.correct_answer() num_of_right_answers += 1 else: speaker.wrong_answer() num_of_wrong_answers += 1 else: speaker.shutdown()
def __init__(self, flags): # flags[0] = quit # flags[1] = add user # flags[2] = camera on or off self.flags = flags self.to_scan = 0 self.db_update = 0 self.face_thread = None self.db_thread = None self.db = database.Database('python', 'Hinoob22') self.recognizer = recognizer.Recognizer() self.faces = set() self.faceCascade = cv2.CascadeClassifier( "haarcascade_frontalface_default.xml")
def run(args): if args['mode'] == 'train': ## preprocessing steps preprocess.preprocess_train('./cars/cars_train', './cars/devkit/cars_train_annos.mat') preprocess.preprocess_test('./cars/cars_test', './cars/devkit/cars_test_annos.mat') split_test_val.test_with_labels( './cars/cars_test', './cars/devkit/cars_test_annos_withlabels.mat') split_test_val.val_test_split() train.start(train_path, val_path) elif args['mode'] == 'test': car_detector = det.Detector() car_recognizer = rec.Recognizer() images_dir = os.listdir(args['data'] + "/test/") for imagepath in images_dir: no_of_cars, car_boxes = car_detector.test_model(args['data'] + "/test/" + imagepath) print('car_boxes', car_boxes[0]) if no_of_cars > 0: print(imagepath) image_ = image.load_img(args['data'] + "/test/" + imagepath) image_ = image.img_to_array(image_) height, width, ch = image_.shape if no_of_cars > 1: for cars in range(0, no_of_cars): print('cars', cars) rec_image = image_[int(car_boxes[cars][0] * height):int(car_boxes[cars][2] * height), int(car_boxes[cars][1] * width):int(car_boxes[cars][3] * width)] result = car_recognizer.load_images_predict(rec_image) print("found {} in the above image".format(result)) else: rec_image = image_[int(car_boxes[0][0] * height):int(car_boxes[0][2] * height), int(car_boxes[0][1] * width):int(car_boxes[0][3] * width)] result = car_recognizer.load_images_predict(rec_image) print("found {} in the above image".format(result))
def main(): mic = r.Recognizer() # En esta línea se obtiene el texto bien sea por voz o por texto text = "Cuántos departamentos tiene Colombia".lower() tokens = text.split(' ') # tokenizacion del texto pln = p.NLP(text) # Procesamiento del texto entities = e.get_entities(text) # Obtencion de las entidades POS = pln.getPosDoc() # Obtencion de las partes del habla no_stop = pln.getNoStopToken() # Obtencion de las palabras de parada if 'ADJ' in POS: # Obtencion de todas las posibles palabras candidatas a ser tablas o campos for lista in POS['ADJ']: POS['NOUN'].append(lista) if 'NOUN' in POS: # Si obtenemos palabras candidatas realizaremos el procesamiento nouns = POS['NOUN'] tables = pdb.get_Possible_Table(nouns) # Obtencion de posibles tablas fields = pdb.get_Possible_Fields(nouns) # Obtencion de posibles campos # Método para saber si es una pregunta especial o no special_question = pdb.get_special_question(nouns, entities) if not special_question: print(fields, entities) if fields and entities: # Después de realizar todo el procesamiento de la frase, enviamos lo obtenido a una consulta db.consulta(entities[0][1], fields[0], tokens) else: print("No podemos realizar tu consulta") else: if c.how_many in tokens: size = db.depCol() # Consulta de preguntas especiales print("Colombia tiene {} departamentos".format(size)) else: db.depCol() # Consulta de preguntas especiales else: print("No hemos podido procesar tu frase")
from flask import Flask from flask import request import recognizer temp_dir = 'temp' app = Flask(__name__) r = recognizer.Recognizer(temp_dir=temp_dir) r.load_models() @app.route('/upload', methods=['POST']) def rec(): if request.method == 'POST': data = request.data f = open(temp_dir + '/test.wav', 'wb') f.write(data) f.close() response = r.recognize() return str(response) return 'who' if __name__ == '__main__': app.run()
def __init__(self, flags): self.flags = flags self.to_scan = 0 self.face_thread = None self.recognizer = recognizer.Recognizer() time.sleep(10)
def __init__(self, keep_run=[False]): self.keep_running = keep_run self.faceCascade = cv2.CascadeClassifier( "haarcascade_frontalface_default.xml") self.recognizer = recognizer.Recognizer()
t2(1,2,3) - t5(4,2,3), t2(1,2,4) - t13(2,3,4), t3(1,2,3) - t10(4,1,3), t3(1,2,4) - t12(4,2,1), t3(1,3,4) - t3(4,2,3), t4(1,2,3) - t7(3,1,4), t4(1,2,4) - t5(1,2,3), t4(1,3,4) - t8(4,2,3), t4(2,3,4) - t6(2,3,4), t5(1,2,4) - t6(3,1,4), t5(1,3,4) - t7(2,3,4), t6(1,2,3) - t9(1,2,4), t7(1,2,3) - t9(1,3,4), t7(1,2,4) - t8(1,2,3), t8(1,2,4) - t11(3,4,2), t8(1,3,4) - t9(2,4,3), t9(1,2,3) - t11(3,1,4), t10(1,2,3) - t12(1,3,4), t10(1,2,4) - t11(1,2,3), t10(2,3,4) - t13(4,2,1), t11(1,2,4) - t13(1,3,4), t12(1,2,3) - t13(1,2,3) end """ R = recognizer.Recognizer() ans1 = R.recognize(data1) ans2 = R.recognize(data2) ans3 = R.recognize_snappy('m004(1,1)')
""" This files contains examples and tests of the recognizers """ import cv2 import os import sys dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(dir_path) import recognizer as rc # intuitive implementation of Recognizer recognizer = rc.Recognizer(.6, rc.SMART_RECOGNITION) while True: recognizer.next_frame(data_on_frame=True, show_frame=True) if cv2.waitKey(1) != -1: break recognizer.close_window()
def __init__(self): """Initialize a DemoApp object""" self.init_gui() self.__recognizer = recognizer.Recognizer() self.__recognizer.add_listener(self.final_result)