def __init__(self, size, devices, parent=None): super(MiniGameWidget, self).__init__(parent) self.width, self.height = size self.scores = [] self.player, self.conductor, self.template = self.init_ui() rec_1 = Recognizer() rec_2 = Recognizer() rec_1.set_callback(self.on_result) rec_2.set_callback(self.on_result) self.template.template_selected.connect( self.player.on_template_selected) self.template.template_selected.connect( self.conductor.on_template_selected) self.player.finished_unistroke.connect( lambda points, name, t_name: self.on_rec(rec_1, points, name, t_name)) self.conductor.finished_unistroke.connect( lambda points, name, t_name: self.on_rec(rec_2, points, name, t_name)) self.devices = None if devices is not None and len(devices) > 0: self.connect_devices(devices, self.player, self.conductor) self.devices = devices self.template.show() self.player.show() self.conductor.show() self.show() self.setHidden(False) self.template.draw()
def recognize_api(): if 'file' not in request.files: return 'No file part' file = request.files['file'] if file.filename == '': return 'No filename' time = datetime.datetime.now() try: with tempfile.NamedTemporaryFile() as temp_file: recognizer = Recognizer() filename = temp_file.name file.save(filename) text = recognizer.get_text(filename) used = datetime.datetime.now() - time usage = Usage() usage.log() return "Text reconegut: [{0}] - temps usat {1}".format(text, used) except ValueError as e: return ( "No és un fitxer d'àudio en format PCM WAV, AIFF/AIFF-C o FLAC ", 400)
def identify(self, event): """ On-line face recognition """ rgb_img, depth_img, depth_mtx = kinect.get_sample() r_bw, r_depth, r_mtx = preprocessor.normalize_sample( rgb_img, depth_img, depth_mtx) if r_bw == None: event.Skip() self.Controller_statusbar.SetStatusText( "There where no faces detected") return #0: eigenfaces (1), 1: fisherfaces(2) if self.combo_box_2.GetSelection() == 0: model = 1 else: model = 2 #0: automatic("auto"), 1:"nbw", 2:"ndepth" if self.combo_box_2_copy.GetSelection() == 0: mode = "auto" elif self.combo_box_2_copy.GetSelection() == 1: mode = "nbw" else: mode = "ndepth" rec = Recognizer(model) rec.predict(mode, r_bw, r_depth) event.Skip()
def __init__(self, cameras=[], port=9004, users_file="users.json", recognition_db="recognition.db"): Server.__init__(self, port=port, usessl=False) self.recognition_db = recognition_db self.last_user_uuid = "" self.last_len_persons_detected = -1 self.last_len_users = -1 self.camera_clients = [] self.recognizer = Recognizer(users_file) self.cameras = cameras self.start() self.method_handlers = {} self.method_handlers["list_users"] = self.list_users self.method_handlers["select_camera"] = self.select_camera self.method_handlers["list_users_with_level"] = self.list_users_with_level self.method_handlers["add_association"] = self.add_association self.users_recognized = [] asyncio.get_event_loop().create_task(self.poll())
def recognize_data(self, group, folder_name): classifier_path = f"classifiers/{self.photos_size}/{folder_name}" paths_to_recognize = [] y_true = [] photos_ids = [] for photos_path in group: paths_to_recognize.append(photos_path) student_id = int(os.path.split(photos_path)[-1].split("-")[0]) photo_id = int( os.path.split(photos_path)[-1].split("-")[1].split(".")[0]) y_true.append(student_id) photos_ids.append(photo_id) recognizer = Recognizer(paths_to_recognize, classifier_path) eigenfaces_y_pred = recognizer.eigenfaces() eigenfaces_metrics = MetricsCalculator(y_true, photos_ids, eigenfaces_y_pred) eigenfaces_metrics.calculate_metrics() eigenfaces_metrics.print_metrics() self.eigenfaces_metrics.append(eigenfaces_metrics) fisherfaces_y_pred = recognizer.fisherfaces() fisherfaces_metrics = MetricsCalculator(y_true, photos_ids, fisherfaces_y_pred) fisherfaces_metrics.calculate_metrics() fisherfaces_metrics.print_metrics() self.fisherfaces_metrics.append(fisherfaces_metrics) lbph_y_pred = recognizer.lbph() lbph_metrics = MetricsCalculator(y_true, photos_ids, lbph_y_pred) lbph_metrics.calculate_metrics() lbph_metrics.print_metrics() self.lbph_metrics.append(lbph_metrics)
def __init__(self, treshold): self.Threshold = treshold self.SHORT_NORMALIZE = (1.0 / 32768.0) self.chunk = 128 self.FORMAT = pyaudio.paInt16 self.CHANNELS = 1 self.RATE = 16000 self.swidth = 2 self.Max_Seconds = 5 self.seconds_of_record = 0.5 self.timeout_signal = ((self.RATE / self.chunk * self.Max_Seconds) + 2) self.silence = True self.filename = 'temp/test.wav' self.Time = 0 self.p = pyaudio.PyAudio() self.stream = self.p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, output=True, frames_per_buffer=self.chunk, input_device_index=1) self.r = Recognizer() self.r.load_models()
def process_folder(source_folder, collection): db = DBManager(collection) recognizer = Recognizer(db) source_folder = os.path.normpath(source_folder) assert os.path.isdir(source_folder), 'Folder does not exist' labels_folder = os.path.join(source_folder, 'labels') first_subfolders = [ os.path.join(labels_folder, l) for l in os.listdir(labels_folder) ] for first_subfolder in first_subfolders: second_subfolders = [ os.path.join(first_subfolder, l) for l in os.listdir(first_subfolder) ] for second_subfolder in second_subfolders: json_files = glob.glob(os.path.join(second_subfolder, '*.json')) for json_file in json_files: json_data = load_json(json_file) locations = get_locations(json_data, second_subfolder) if locations is None: continue locations, img = locations ids = [] for location in locations: _id = recognizer.recognaze_face(img, location) ids.append(_id) #face_encodings = get_face_encodings(json_data) new_json_data = update_ids(json_data, ids) save_json(json_file, new_json_data)
def __init__(self, clarifai_token, telegram_token): self.recognizer = Recognizer(clarifai_token) self.bot = Bot(telegram_token) self.secure = Secure() self.users_status = dict() self.users_login = dict() self.users_message = dict()
def __init__(self): """Initialize brain class of Sara.""" self.log = logging.getLogger() self.log.info('initialize brain...') self.config = configs.default_obj() if os.path.exists('user.json'): self.config = configs.update(self.config, 'user.json') self.speech = Speech(self.config.speech) self.speech.speak("Load language: " + self.config.language.name[self.config.language.code]) self.phrases = configs.language_obj(self.config.language.languages_dir, self.config.language.code) self.help = Help(self.config.language, self.phrases) self.speech.speak(self.phrases.general.start) self.generals = {'quit': False, 'text': '', 'arg': None} self.bd = Databases(self.config.sqlite) self.assist = Assistant(self.speech, self.config, self.phrases, self.bd) if True if "true" == self.config.general.setup else False: self.assist.setup() self.backups = Backups(self.config.backups, self.bd) self.hotkeys = Hotkeys(self.config.hotkeys, self.generals) self.recognizer = Recognizer(self.config.recognition, self.config.language.code, self.generals, self.speech, self.phrases.recognition)
def __init__(self, new=False): self.reg = None self.dir_yml = join("yml") self.dir_data = join("data") self.path = join(self.dir_data, "reg.pkl") self.rel2id = yaml.load(open(join(self.dir_yml, "rel2id.yml"), encoding="utf-8"), Loader=yaml.SafeLoader) self.int2id = yaml.load(open(join(self.dir_yml, "int2id.yml"), encoding="utf-8"), Loader=yaml.SafeLoader) if exists(self.path) and not new: with open(self.path, "rb") as f: self.reg = pickle.load(f) else: relations = yaml.load(open(join(self.dir_yml, "relation.yml"), encoding="utf-8"), Loader=yaml.SafeLoader) ques_word = yaml.load(open(join(self.dir_yml, "quesword.yml"), encoding="utf-8"), Loader=yaml.SafeLoader) wrong_word = yaml.load(open(join(self.dir_yml, "wrong_table.yml"), encoding="utf-8"), Loader=yaml.SafeLoader) concepts = relations concepts.update(ques_word) concepts.update(wrong_word) self.reg = Recognizer(concepts) pickle.dump(self.reg, open(self.path, "wb"))
def detect_faces_in_image(file_stream): result = {} face_recognizer = Recognizer("face_gallery", file_stream) locs, names = face_recognizer.startEngine() for loc, name in zip(locs, names): result[name] = loc return jsonify(result)
def decrypt(self): if not self.fname: return self.recognizer_thread = Recognizer() self.recognizer_thread.finished.connect(self.do_decrypt) self.status_bar.showMessage("Say the password") self.recognizer_thread.start()
def __init__(self): self.recognizer = Recognizer() self.handler = TargetHandler() self.camera_input = CameraInput() self.preview_output = PreviewOutput() self.displacement_estimator = DisplacementEstimator() self.horizontal_field_of_view = HORIZONTAL_FIELD_OF_VIEW self.drone_control = DroneControl(self.handler) self.drone_control.startup_simulation(TAKEOFF_HEIGHT, MAX_FRAMES_PER_SECOND) self.simple_guidance = None
def repeat(self, password): if not password: self.status_bar.showMessage("Error: empty password") return self.password = password self.recognizer_thread = Recognizer() self.recognizer_thread.finished.connect(self.do_encrypt) self.status_bar.showMessage("Your password is '" + password + "'. Repeat to confirm.") self.recognizer_thread.start()
def __init__(self, agent): self.posestate = "rest" # "rest" or "ready" self.controlstate = "controlled" # "controlled" or "indep" self.agent = agent self.target = None self.image = None self.__recognizer = Recognizer() self.__recognizer.on_keyword = self.on_keyword self.__thread = Thread(target=self.__recognizer.run) self.__ran = 0
def __init__(self, master=None): super().__init__(master) self.master = master self.speak_engine = SpeakEngine(self.speak_engine_did_finish_utterance) self.voices = self.speak_engine.get_voices() self.recognizer = Recognizer(self.speak_engine) self.pack() self.create_widgets()
def adjust(response): recognizer = Recognizer() PATH = response.GET["canvasURL"] text = response.GET["text"] imgList = SymbolFinder.find(PATH) text = list(text) print(text) recognizer.adjust(imgList, text) return HttpResponse("Adjusting was successful")
def train_model(self, event): """Trains the Face Recognition model""" if self.combo_box_2.GetSelection() == 0: mode = 1 else: mode = 2 selection = self.combo_box_2_copy.GetSelection() if selection == 0: self.Controller_statusbar.SetStatusText( "Mode only for on-line recognition") elif selection == 1: #Black and white image rec = Recognizer(mode, "nbw") rec.tr() self.Controller_statusbar.SetStatusText( "Model successfully trained") elif selection == 2: #Depth image rec = Recognizer(mode, "ndepth") rec.tr() self.Controller_statusbar.SetStatusText( "Model successfully trained") event.Skip()
def test_model(self, event): """Tests the Face Recognition model""" if self.combo_box_2.GetSelection() == 0: mode = 1 else: mode = 2 selection = self.combo_box_2_copy.GetSelection() if selection == 0: self.Controller_statusbar.SetStatusText( "Mode only for on-line recognition") elif selection == 1: #Black and white image rec = Recognizer(mode, "nbw") ret = rec.ts() print "ERROR: ", ret self.Controller_statusbar.SetStatusText( "Classification error: %.2f" % (ret)) elif selection == 2: #Depth image rec = Recognizer(mode, "ndepth") ret = rec.ts() print "ERROR: ", ret self.Controller_statusbar.SetStatusText( "Classification error: %.2f" % (ret)) event.Skip()
def __init__(self, *args, accounts_title_re: str, accounts_rects: dict, **kwargs): super().__init__(*args, **kwargs) self.accounts_title_re = accounts_title_re self.accounts_rects = accounts_rects self.accounts_recognizer = Recognizer(name=self.name + '_accounts', in_width=16, in_height=16, n_units=100, n_out=128, filter_mode=self.filter_mode, chrome=self.chrome) self.accounts_handle = None self.account = new_account(self.name)
def solve(topLeftCoords, bottomRightCoords, rows, columns, mines): # Look for the user pressing escape to stop the loop # In the loop, escapeListener.is_alive() will return false when escape has been pressed def on_press(key): if key == keyboard.Key.esc: return False escapeListener = keyboard.Listener(on_press=on_press) escapeListener.start() print(topLeftCoords, bottomRightCoords, rows, columns) recognizer = Recognizer(topLeftCoords, bottomRightCoords, rows, columns) analyzer = Analyzer(recognizer) solver = Solver(recognizer, analyzer, mines) solver.solve(shouldContinue=escapeListener.is_alive)
def main(args): """Entry point.""" # TODO Replace with argparse if len(args) > 1 and os.path.isfile(args[1]): with io.open(args[1], 'r', encoding='utf-8') as f: data = f.read() else: stream = io.open(sys.stdin.fileno(), 'r', encoding='utf-8') data = stream.read() # TODO add '--verbose' flag to disable logging logging.basicConfig(format='%(levelname)s %(name)s: %(message)s', level=logging.DEBUG) recognizer = Recognizer() result = recognizer.recognize(data) print(','.join(result))
def __init__(self, name, bind_address, *, title_re: str, instruments: List[str] = (), capture_rects: List[Dict[str, Tuple]], filter_mode: int = 0, chrome: bool = False, master_address): super().__init__(name, bind_address, master_address=master_address) self.service = name self.title_re = title_re self.instruments = instruments or ([''] * len(capture_rects)) self.capture_rects = capture_rects self.last_handle = None self.chrome = chrome self.recognizer = Recognizer(name=name, in_width=16, in_height=16, n_units=100, n_out=128, filter_mode=filter_mode, chrome=chrome) self._lock = Lock()
def adjustInNewThread(self, text): try: self.recognizer.adjust(self.imgList, text) self.ids.text_input.text = "".join(text) PATH = SettingsController.canvasImg self.ids.text_input.text = "Loading..." threading.Thread(target=self.recognizeInNewThread, args=(PATH, ), daemon=True).start() except Exception: recognizer = Recognizer() self.ids.text_input.text = "Error" print("EXCEPTION => Adjust exception") self.threadWorking = False
def __init__(self, recognizer_path, retina=False, on_gpu=False, emotions=False): self.on_gpu = on_gpu if retina: self.finder = FaceFinder(on_gpu=on_gpu) else: self.finder = None if emotions: self.emotions = Emanalisis(on_gpu=on_gpu,path_to_classifier="net_714.pth", finder=self.finder) else: self.emotions = None self.recog = Recognizer(finder=self.finder) self.recog.load_model(recognizer_path) self.clust = Clusterizer(samples=5)
def __init__(self, filename, count, **kwargs): super().__init__(**kwargs) self.filename = filename self.count = count self.recog = Recognizer() # Elemento de reconhecimento self.label_recognition = Label(text=f'Reconhecimento 1 de {self.count}', font_size=30) self.clear_widgets() self.layout = BoxLayout(orientation='vertical') # Inicializando thread de reconhecimento threading.Thread(target=self._recognition).start() self.layout.add_widget(self.label_recognition) self.add_widget(self.layout)
def __init__(self, camera=None): super().__init__() self.camera = camera self.central_widget = QWidget() self.win = pg.GraphicsLayoutWidget() self.view = self.win.addViewBox() self.img = pg.ImageItem() self.view.addItem(self.img) # testPushButton = QPushButton("This is a test") # cam_layout = QVBoxLayout(self.central_widget) cam_layout = QVBoxLayout() # cam_layout.addWidget(self.win) # Had this uncommented. # cam_layout.addWidget(testPushButton) cam_layout.addStretch(1) # self.setCentralWidget(self.central_widget) main_layout = QGridLayout() # main_layout.addLayout(cam_layout, 0, 0, 1, 1) main_layout.addWidget(self.win, 0, 0) main_layout.addLayout(cam_layout, 0, 1, 1, 1) # main_layout.setRowStretch(1, 1) # main_layout.setRowStretch(2, 1) # Had these uncommented. # main_layout.setColumnStretch(0, 2) # main_layout.setColumnStretch(1, 1) self.setLayout(main_layout) self.update_timer = QTimer() self.update_timer.timeout.connect(self.update) self.update_timer.start(10) self.recognizer = Recognizer() self.recognizer.load_models() self.setWindowTitle("Recognizer")
def __init__(self, recognizer_path, retina=False, on_gpu=False, emotions=False, confidence_threshold=0.02, top_k=5000, nms_threshold=0.4, keep_top_k=750, vis_thres=0.6, network='resnet50', distance_threshold=0.4, samples=5, eps=0.3): self.on_gpu = on_gpu if retina: self.finder = FaceFinder(on_gpu=on_gpu, confidence_threshold=confidence_threshold, top_k=top_k, nms_threshold=nms_threshold, keep_top_k=keep_top_k, vis_thres=vis_thres, network=network) else: self.finder = None if emotions: self.emotions = Emanalisis(on_gpu=on_gpu, path_to_classifier="net_714.pth", finder=self.finder) else: self.emotions = None self.recognizer_retrained = True self.recog = Recognizer(finder=self.finder, distance_threshold=distance_threshold) self.recog.load_model(recognizer_path) self.clust = Clusterizer(samples=samples, eps=eps) self.em_labels = [ 'ANGRY', 'DISGUST', 'FEAR', 'HAPPY', 'SAD', 'SURPRISE', 'NEUTRAL' ]
def __init__(self): # lower and upper bound for marker color self._lower_hue_0 = numpy.array([80, 90, 100]) self._lower_hue_1 = numpy.array([120, 255, 255]) self._upper_hue_0 = numpy.array([80, 90, 100]) self._upper_hue_1 = numpy.array([120, 255, 255]) # image processing kernels self._kernel_median_blur = 27 self._kernel_dilate_mask = (9, 9) # marker properties self._x = -1 self._y = -1 self._dx = 0 self._dy = 0 self._vx = 0 self._vy = 0 self._histdx = [] self._histdy = [] self._points = [] self._max_points = 50 self._min_change = 10 self._min_veloxy = 2.0 self._marker_ctr = None self._marker_tip = None # frames per second self._fps = 20 # render elements self._render_marker = True self._render_trails = True # recognizer self._recognizer = Recognizer() # opencv version self._opencv_version = int(cv2.__version__.split('.')[0]) return
def build_aho(path, f_type='yml'): """ 重新构建一个匹配器对象 ------------------------------------------ Args: path: f_type: Returns: """ if exists(path): d = None if f_type is 'yml': d = yaml.load(open(path, 'r', encoding='utf-8'), Loader=yaml.SafeLoader) elif f_type is 'json': d = json.load(open(path, 'r', encoding='utf-8')) if d: return Recognizer(d)