def main(): #Datastructure for faces and names fd = FaceData() rec = Recognition() root = Tk() ui = Ui(root, fd) def task(): #while True: #print("length: "+ str(len(known_face_names))) rec.find_faces(fd) ui.updateList() ## TODO: Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): print("q pressed") # break root.after(20, task) # reschedule event in 2 seconds root.after(20, task) ui.startLoop() rec.close()
def run(): print(device_lib.list_local_devices()) configuration = Configuration('configuration/configuration.cfg') DataUtils.check_and_create_folders(configuration) DataUtils.create_cache_if_not_exists(configuration) recognition = Recognition(configuration) recognition.train()
def main(data: dict): """main :param data: :type data: dict """ rec = Recognition(data) rec.convert_base64_to_img() return "ok"
def __init__(self): self.recognizer = Recognition() # 语音识别 self.nlu = Nlu() # 语义识别 self.speaker = Speaker() # 语音合成 self.music_player = Music_Player() # 音乐播放器 self.weather = Weather(self.speaker) # 查询天气功能 self.chat = Chat(self.speaker) # 闲聊功能 self.ticket = Ticket(self.speaker) # 查询车票功能 self.noun = Noun(self.speaker) # 名词解释功能 self.music = Music(self.speaker, self.music_player) # 播放音乐功能
def stream(conn, num): faces, names, ids = LoadDB.loadofflineDB() r = Recognition(faces, names, ids) v = video() data = b"" payload_size = struct.calcsize(">L") print("payload_size: {}".format(payload_size)) while True: global isReady while len(data) < payload_size: #print("Recv: {}".format(len(data))) data += conn.recv(4096) '''if len(data) == 0 : print("Breaking Face Recognition") conn.close() break''' #print("Done Recv: {}".format(len(data))) packed_msg_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack(">L", packed_msg_size)[0] #print("msg_size: {}".format(msg_size)) while len(data) < msg_size: data += conn.recv(4096) '''if len(data) == 0: print("Breaking Face Recognition") conn.close() break''' frame_data = data[:msg_size] data = data[msg_size:] frame = pickle.loads(frame_data, fix_imports=True, encoding="bytes") frame = cv2.imdecode(frame, cv2.IMREAD_COLOR) frame, name, id = r.startFaceRecognition(frame) #print(id) cv2.waitKey(1) cv2.imwrite('outgoing.jpg', frame) isReady = True v.appendframes(frame) with open(f"{date.today()}.txt", "a") as f: f.write( f"{name} is seen on {date.today()} at {datetime.now().strftime('%I:%M:%S %p')}\n" ) f.close() v.appendframes(frame) '''if len(data) == 0: print("Breaking Face Recognition") conn.close() break''' #yield (b'--frame\r\n' # b'Content-Type: image/jpeg\r\n\r\n' + open('outgoing.jpg', 'rb').read() + b'\r\n') '''conn.close()
class App: def __init__(self): self.config = self.read_config() self.recognition = Recognition(config=self.config) def run(self): self.read_config() self.recognition.reader() @staticmethod def read_config(): with open('./config.json') as c: config = json.load(c) return config
class Robot(): def __init__(self, config): self.config = config self.recognizer = Recognition() # 语音识别 # HA接口 def hass_api(self, api_url, data): try: print(api_url) cfg = self.config api_url = cfg['url'].strip('/') + '/api/' + api_url result = requests.post(api_url, json=data, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + cfg['token'] }, timeout=6) print(result) except Exception as ex: print('请求超时:') print(ex) # 识别语音并进行对应的处理 def process(self, fname): speech = self.recognizer.recognize(fname) # 语音识别(语音转文字) if speech is not None: print('识别结果:{0}'.format(speech)) if speech != '': self.hass_api('services/conversation/process', { 'text': speech, 'source': 'xunfei' }) return
class Robot(): def __init__(self): self.recognizer = Recognition() # 语音识别 self.nlu = Nlu() # 语义识别 self.speaker = Speaker() # 语音合成 self.music_player = Music_Player() # 音乐播放器 self.weather = Weather(self.speaker) # 查询天气功能 self.chat = Chat(self.speaker) # 闲聊功能 self.ticket = Ticket(self.speaker) # 查询车票功能 self.noun = Noun(self.speaker) # 名词解释功能 self.music = Music(self.speaker, self.music_player) # 播放音乐功能 def get_music_player(self): return self.music_player # 识别语音并进行对应的处理 def process(self, fname): speech = self.recognizer.recognize(fname) # 语音识别(语音转文字) if speech is not None: skill, response = self.nlu.query(speech) # 语义识别(情感倾向) if skill == 'weather': print("命中技能天气") self.weather.process(response) elif skill == 'chat': print("命中技能闲聊") self.chat.process(response) elif skill == 'noun_interpretaion': print("命中技能名词解释") self.noun.process(response) elif skill == 'ticket': print("命中技能订购车票") self.ticket.process(response) elif skill == 'music': print("命中技能播放音乐") self.music.process(response)
def __init__(self): self.db = DataBase(data_base='cache.db') self.camera, self.communication = self.read_config() self.timer_process = TimerProcess() self.r = Recognition(config=self.camera, db=self.db) self.c = Communication(config=self.communication, code_recognition=self.r, timer_process=self.timer_process, db=self.db) self.station = self.camera.station_id self.carts = mp.Array('i', self.r.create_list(12)) self.connection = self.c.data_call() self.QRBE1 = self.c.create_digit('QRBE1,{}'.format(self.station))
def __init__(self, camera): self.actions = mp.Array('i', [1]) self.cameras = { 'usb': 0, 'china': 'rtsp://192.168.1.11:554/live/0/MAIN', 'intelbras': 'rtsp://*****:*****@@192.168.1.11:554', } self.box = mp.Array('i', [0]) self.battery = mp.Array('i', [0, 0, 0, 0, 0, 0, ]) self.lat_long_actual = mp.Array('d', [0.0, 0.0]) self.c = Communication(port='/dev/SERIAL_PORT') self.r = Recognition(camera=self.cameras[camera], camera_rasp=False, show_image=True)
def check(self): c = Thread(target=self.counter) c.daemon = True c.start() self._frames_checked = 0 while c.is_alive(): res = Recognition.recognize() if res is not None: self._frames_checked = self._frames_checked + 1 print("%s, %s" % (res[0], res[1])) if self._id != res[0]: self._face = False print(self._face) break elif self._id == res[0] and res[1] > 80: self._face = False print(self._face) break
def run(self): #tworzymy lub pobieramy wartości mfcc [MG_train, MG_unknown, M_midi]=self.instrument_variant() mfcc_library=MG_train.import_or_make_mfcc() mfcc_unknown=MG_unknown.import_or_make_mfcc() #sprawdzamy jaki jest zakres dźwięków midi midi=M_midi.range_of_midi() #tworzymy modele dźwieków midi gmms=GMM_Model(midi,mfcc_library).model() #rozpoznajemy wysokosci we wszystkich plikach z podanego folderu recognition=Recognition(gmms,mfcc_unknown,midi).rec_every_unknown() #ostatecznie przyporządkowujemy wysokości end_result=End_Results(recognition,self.SInstrument,self.SPathUnknown,self.CallbackProgress).end_result_for_each() if self.Callback is not None: self.Callback(end_result)
def run(self): while True: # Firebase reference data = fb.get_data() # setting timer variable to firebase timer value threshold = data['doorbell']['facial_recognition']['threshold'] print(threshold) # If face check is turned on and camera is off, start capturing video from camera if self.perform_action is True and self.using_camera is False: self.capture_camera() # If face check if off and camera instance is on, release camera elif self.perform_action is False and self.using_camera is True: self.release_camera() # If camera is true if self.using_camera is True: # set face recognizer values[ID, accuracy] to res res = Recognition.recognize() print(res) #print(threshold) # If res is not None and accuracy is less than threshold if res is not None and res[1] < threshold: # Set id to res id self._id = res[0] # Set face to true self._face = True # Setup thread to run the check function t = Thread(target=self.check) # Start the thread t.start() # Join the thread to the main thread t.join() # Release the camera self.release_camera() # If face is true for 20 frames that are detected in a row if self._face is True and self._frames_checked >= 20: # unlock door self.lock.unlock_door() print(self._id) print(self._face) sleep(10)
def run(self): board = self.s.open_connection('/dev/ttyUSB0') if board is None: logger.error('Serial port error') receive, send = mp.Pipe() actions = mp.Array('i', [1, 1, 0, 0, 1]) s_service = mp.Process(target=self.communication, args=(board, actions, send)) r = Recognition() r_service = mp.Process(target=r.recognition, args=(board, actions, receive, 0, True)) s_service.start() r_service.start() s_service.join() r_service.join()
class App: def __init__(self): self.db = DataBase(data_base='cache.db') self.camera, self.communication = self.read_config() self.timer_process = TimerProcess() self.r = Recognition(config=self.camera, db=self.db) self.c = Communication(config=self.communication, code_recognition=self.r, timer_process=self.timer_process, db=self.db) self.station = self.camera.station_id self.carts = mp.Array('i', self.r.create_list(12)) self.connection = self.c.data_call() self.QRBE1 = self.c.create_digit('QRBE1,{}'.format(self.station)) @staticmethod def read_config(): db = DataBase( data_base='/home/madruga/developer/projects/config/config.db') camera = controller.consult_camera(db) communication = controller.consult_communication(db) return camera, communication def check_database(self): db = os.path.exists('./cache.db') if not db: self.db.create_data_base() self.db.create_table()
class Camera(object): def __init__(self, modelPath, device='cpu'): self.TF = TinyFace(modelPath, device=device) self.REC = Recognition(device=device) self.lastImg = None def detectDiff(self, img): tStart = time.time() if self.lastImg is None: ret, binary_img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY) binary_img = binary_img / 255 self.lastImg = binary_img return 99999999 diff = 0 ret, binary_img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY) if self.lastImg is not None: binary_img = binary_img / 255 diff = binary_img - self.lastImg diff = np.abs(np.sum(diff)) self.lastImg = binary_img else: self.lastImg = binary_img / 255 log.info("diff: %6d, using %.6f sec" % (diff, time.time() - tStart)) return diff def detectFaces(self, img): imgRegion = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) bboxes = self.TF.detect_faces(imgRegion, conf_th=0.9, scales=[1]) bboxesLength = len(bboxes) predType, predName = (-1, True), (-1, True) if bboxesLength > 0: imgRegion = draw_bboxes(img, bboxes, thickness=1) predType, predName = self.REC.detect(imgRegion, True) return bboxesLength, predType, predName
# tts.setVolume(0.75) # Wake up robot print("Initializing robot.") motion.wakeUp() # build model print("Building knowledge graph.") master = Master() master.buildModel() for _ in range(5): # Listening using recognition.py, record.py, and sftp.py # try: print("Listening for question.") rcg = Recognition() qst = rcg.recognize() # with open("tmp.txt", "w") as f: # f.write(qst) print("Question: " + qst) # query using the model print("Finding answer in graph.") ans = master.query(qst) print("Answer: " + ans) tts.say(ans, {"bodyLanguageMode": "contextual"}) # time.sleep(0.5) # except KeyboardInterrupt:
sys.path.insert(0, "server_package") from server_package import server #accept = server.Accept() #accept.giveInfoConnect() #async def tasksrun(): # task1 = asyncio.create_task(accept.resive()) # await asyncio.gather(task1) Say.speak("Салам алейкум") # try: # #asyncio.run(tasksrun()) # while True: # cmd = Recognition.lisen_name() # source_cmd = Recognition.lisen_name() # if source_cmd != None: # accept.send(source_cmd) # except KeyboardInterrupt: # sys.exit() while True: source_cmd = Recognition.lisen_name() cmd = Recognition.callback(source_cmd) print(str(cmd) + " " + str(source_cmd)) if cmd != None: IntefaceCMD.execute_cmd(cmd)
def main(): os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1" with open("config.yml", "r") as config_file: cfg = yaml.load(config_file) det_cfg = cfg["detection"] rec_cfg = cfg["recognition"] logging.basicConfig( format="%(asctime)s %(module)-12s %(levelname)-8s %(message)s", level=cfg["log_level"]) logging.info("Starting detection") detection = Detection(det_cfg) found_frames = detection.detect_subtitle_region(cfg["video"]) y_start, y_end = detection.get_subtitle_region() char_width = detection.get_char_width() char_dist = detection.get_char_dist() if char_width == 0 or char_dist == 0: logging.error("Char width is 0") return logging.info( "Found y pos ({}, {}), character width {}, character distance {}". format(y_start, y_end, char_width, char_dist)) recognition = Recognition(rec_cfg["model"], rec_cfg["weights"], rec_cfg["dictionary"]) cyk = True for index, f in enumerate(FONTS): font = load_font(f, char_width) font2 = load_font(f, char_width // 2) if font is None: logging.error("No CYK font found") cyk = False else: logging.info("Loaded font {}".format(FONTS[index])) for frame in found_frames: text = [] img = Image.fromarray(frame) draw = ImageDraw.Draw(img) for char_region, start, stop in detection.detect_char_regions( frame[y_start:y_end, ]): res = recognition.recognize_character(char_region) text.append((start, stop, res[1], res[2])) for start, stop, char, prob in text: draw.rectangle([(start, y_start), (stop, y_end)], outline=RECTANGLE_COLOR) draw.rectangle([(start + 1, y_start + 1), (stop - 1, y_end - 1)], outline=RECTANGLE_COLOR) draw.rectangle([(start + 2, y_start + 2), (stop - 2, y_end - 2)], outline=RECTANGLE_COLOR) probability = str(int(prob * 100)) + "%" if cyk: draw.text((start, y_start - (stop - start)), char, fill=FONT_COLOR, font=font) draw.text((start, y_start - 1.5 * (stop - start)), probability, fill=FONT_COLOR, font=font2) else: logging.info("Detected character {} ({})".format( char, probability)) cv2.imshow('image', np.array(img)) cv2.waitKey(0) cv2.destroyAllWindows()
from action import Action from recognition import Recognition act = Action() rec = Recognition() while True: rec.start(act)
import sys sys.path.insert(0, '/home/pi/Zumi_Content/Data/face-recognition') from recognition import Recognition from zumi.util.camera import Camera from zumi.util.screen import Screen import time import cv2 import IPython.display import PIL.Image import numpy as np import os fd = Recognition() camera = Camera(auto_start=False) screen = Screen() def collectPictures(): camera.start_camera() fd.name = input("input name : ") print("Capture 50 pictures") time.sleep(1) while True: try: image = camera.capture() fd.makeDataset(image) IPython.display.display(PIL.Image.fromarray(image)) print("Progress : " + str(fd.cap) + "/50") screen.draw_image( PIL.Image.fromarray(fd.streaming_image).convert('1'))
def before_server_start(app, loop): app.rec = Recognition() #创建对比对象
def __init__(self, config): self.config = config self.recognizer = Recognition() # 语音识别
from register import RegisterUser from recognition import Recognition import os path = 'training-data' os.mkdir(path) while True: os.system("clear") print("1 - Cadastrar novo usuário") print("2 - Iniciar Reconhecimento") print("0 - Sair") o = input("Opção: ") if o == '0': break elif o == '1': RegisterUser(path) elif o == '2': rec = Recognition(path) rec.main_loop()
car_id = input("Insert Car ID :") print("\n") message = '{"type": "credentials", "status": "%s", "username": "******", "password": "******", "customer_id": "%s", "car_id": "%s"}' % ( status, username, password, customer_id, car_id) # sending back the credentials data to Master Pi for data validation # Master Pi will return the message of validation result sock.sendall(str.encode(message)) time.sleep(10) clear() # if the option is 'use face recognition' elif index == 1: recog = Recognition() data = recog.start_recognition(status) sock.sendall(str.encode(data)) time.sleep(10) clear() # if the option is 'Return Car' elif index == 1: status = "returned" title = 'Please select an authorization option: ' options = ['use username and password', 'use face recognition'] option, index = pick(options, title) print(option) print("##########") print("########## \n")
from flask_cors import CORS, cross_origin from flask import Flask, request, redirect from client import TensorflowClient from recognition import Recognition UI_LOCATION = os.getenv('UI_LOCATION', 'http://*****:*****@app.route('/') def index(): return redirect(UI_LOCATION[0]) @app.route('/recognise', methods=['POST']) @cross_origin(origins=UI_LOCATION) def recognise(): log.info('Image recognition request was received') img_raw = request.files['image'] img_binary = img_raw.read() log.debug('Successfully retrieved the file, recognising') digit = recognition.recognise(img_binary)
def __init__(self): self.config = self.read_config() self.recognition = Recognition(config=self.config)
class Action(): def __init__(self): self.ans = Answer() self.rec = Recognition() locale.setlocale(locale.LC_ALL, "ru") def action(self, command, addition=None): if command == 'ctime': now = datetime.datetime.now() # self.ans.say("Сейчас " + str(now.hour) + ":" + str(now.minute)) self.ans.say("Сейчас " + now.strftime("%H:%M")) print("Сейчас " + now.strftime("%H:%M")) if command == 'off': if "точно подтверждаю" in addition or "подтверждаю точно" in addition: self.ans.say("Выключаю пк через 10 секунд") time.sleep(10) print("Shutdown PC") # os.system('shutdown -s -t 0') else: self.ans.say("Нет подтвеждения") if command == 'reboot': # if "точно подтверждаю" in addition or "подтверждаю точно" in addition: # self.ans.say("Перезагружаю пк через 10 секунд") # time.sleep(10) # print("Shutdown PC") # # os.system('shutdown -r -t 0') # else: # self.ans.say("Нет подтвеждения") self.ans.say("Точно перезагрузить?") for i in range(3): a = self.rec.recognite_simple() if a: if "да" in a or "точно" in a or "перезагрузи" in a: self.ans.say("Перезагружаю пк через 5 секунд") for j in range(5): a = self.rec.recognite_simple() if a: if "стой" in a or "подожди" in a or "стоп" in a: self.ans.say("Остановка перезагрузки") return 0 time.sleep(1) print("Shutdown PC") # os.system('shutdown -r -t 0') break time.sleep(1) self.ans.say("Перезагрузка не подтверждена") if command == 'here': self.ans.say("Я на месте, не кричи") if command == 'date': now = datetime.datetime.now() self.ans.say("Сегодня, " + now.strftime("%A, %d число. %B, %m месяц")) print("Сегодня " + now.strftime("%A, %d. %B, %m")) if command == 'run': if addition: print("Запускаю - " + addition) program = None for pr in PROGRAMS: if pr in addition: program = pr break if program: for p in PROGRAMS[program]: os.startfile(p) self.ans.say("Запускаю " + pr) else: self.ans.say("Не определена программа") else: self.ans.say("Не передана программа") elif command == 'gachi': print("gachi - " + addition) sound = None for word in WORDS_LIST: if word in addition: sound = word for word in WORDS: if word in addition: sound = WORDS[word] if sound: print("SENDING...") resp = send_sound(sound) if (resp == 200): print("SUCESS SEND") else: print(resp) else: print('Команда не распознана, повторите!')
def __init__(self, modelPath, device='cpu'): self.TF = TinyFace(modelPath, device=device) self.REC = Recognition(device=device) self.lastImg = None
def __init__(self): self.ans = Answer() self.rec = Recognition() locale.setlocale(locale.LC_ALL, "ru")