コード例 #1
0
def detect():
    recognize()
    #cv2.imshow("Face",img)
    #if(cv2.waitKey(1) == ord('q')):
    #    break
    #elif(userId != 0):
    #    cv2.waitKey(1000)
    #    cam.release()
    #    cv2.destroyAllWindows()
    #    return redirect('records/details/'+str(userId))
    return redirect(url_for('index'))
コード例 #2
0
def getVoiceKeyWord():
    match = False
    while match != True:
        key = recognize('keyword')
        t2s('you are want to find: {}'.format(key))
        cf = recognize('YES or OK to confirm and NO to try again')
        if cf in ['yes', 'ok', 'oke', 'đúng', 'phải', 'đồng ý']:
            t2s('ok')
            match = True
        else:
            t2s('oh sorry! please speech keyword again')
    return key
コード例 #3
0
ファイル: PyTunes.py プロジェクト: dangvansam98/final-pytunes
    def excCommand(self):
        self.Pause()
        command = listen_command()
        if command == 0:
            self.PlayPause()
        elif command == 1:
            self.Next()
        elif command == 2:
            self.Stop()
            t2s("Phát lại")
        elif command == 3:
            self.VoiceSearch()
        elif command == 4:
            self.volUp()
            t2s("đã tăng")
            self.PlayPause()
        elif command == 6:
            self.volDown()
            t2s("đã giảm")
            self.PlayPause()

        elif command == 5:
            t2s('Bạn chắc chắn muốn thoát không')
            t2s('Xác nhận bằng cách nói ok')
            cf_exit = recognize('')
            if cf_exit == 'yes' or cf_exit == 'ok' or cf_exit == 'oke':
                t2s("Chào bạn hẹn gặp lại bạn hihi")
                exit()
            else:
                print('no exit')
コード例 #4
0
ファイル: main.py プロジェクト: megaloss/Licence_plate_reader
def t_recognize(img, c=0.3):  # function to ocr images
    save_ll = False
    #print ('splitting...')
    chars = dumb_split_plate(img)

    if save_ll:  # save pic
        for char in chars:
            cv2.imwrite('./ll/' + str(random.randint(0, 1000000)) + ".png",
                        char)

    if not chars:
        #print ('[main]:no chars')
        return False, 0, 0
    #print('[main]: returned chars:', len(chars))
    if len(chars) > 1:
        #print('processing...')
        nums = process_number(chars)
    else:
        #print ('less than one char')
        return False, 0, 0
    if len(nums) > 1:
        #print('recognizing...')
        try:
            t, conf = recognize(nums, c)

        except:
            #print('something went wrong')
            return False, 0, 0
        return t, conf, chars
    return False, 0
コード例 #5
0
def webcam():

    models = load_models()

    iteration = 0

    cam = cv2.VideoCapture(0)
    cv2.namedWindow('HackDrone', cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty('HackDrone', cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    while True:

        ret_val, image = cam.read()

        if iteration > 5:
            # RECOGNITION
            print('Recognition')
            result = recognize(image, models)
            if result is not None:
                letter, prob = result
                text = 'Letter %s, confidence: %.2f' % (letter, prob)
                cv2.putText(image, text, bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)

        cv2.imshow('HackDrone', image)

        sleep(1)

        iteration += 1

        if cv2.waitKey(1) == 27:
            break  # esc to quit

    cv2.destroyAllWindows()
コード例 #6
0
def listen_command():

    t2i_en = {
        'play': 0,
        'next': 1,
        'stop': 2,
        'search': 3,
        'select song': 4,
        'exit': 5
    }
    t2i_vn = {
        'phát': 0,
        'tiếp tục': 0,
        'tiếp theo': 1,
        'kế tiếp': 1,
        'dừng lại': 2,
        'tìm kiếm': 3,
        'chọn bài hát': 4,
        'chọn bài': 4,
        'thoát': 5,
        'thoát chương trình': 5
    }

    match = False
    while match != True:
        t2s('speech command')
        text = recognize()
        if text in t2i_en.keys() or text in t2i_vn.keys():
            if text in t2i_en.keys():
                command = t2i_en[text]
                match = True
            elif text in t2i_vn.keys():
                command = t2i_vn[text]
                match = True
    return int(command)
コード例 #7
0
def process_inputframes(frames_path, nth=1, duration=10):
    frame_files = sorted(glob.glob(frames_path + '/*'))
    num_frames = len(frame_files)
    print(
        'Detecting and recognizing text from {} frames for every {}th frame: {}'
        .format(num_frames, nth, str(datetime.now())))

    entries = []
    for f_index, filename in tqdm(enumerate(frame_files), total=num_frames):
        if (f_index % nth == 0):
            boxes, scores = detection.detect(filename)

            if scores.shape[0] != 0:
                texts = recognition.recognize(filename, boxes)
                for index, box in enumerate(boxes):
                    entry = {}
                    entry['f_index'] = f_index
                    entry['time_stamp'] = '{:2.2f}'.format(
                        f_index / num_frames * duration)
                    entry['text'] = texts[index]
                    entry['bbox'] = [
                        box[0], box[1], box[2] - box[0], box[3] - box[1]
                    ]
                    entry['score'] = scores[index].item()
                    entries.append(entry)
    return entries
コード例 #8
0
def wav_to_text():
    if request.method == 'POST':

        wav_file = request.files['file']

        wav_file.save('temp')

        #Check if we have already translated, based on file hash
        #Use a temp file and then destroy it
        temp_file_obj = open('temp', 'rb')
        file_hash = md5(temp_file_obj)

        #If we dont find it, created
        if not os.path.exists('./en_text_dir/' + file_hash):

            #Turn the file to text
            output_file = open(file_hash, 'w')
            rec.recognize(open('temp', 'rb'), output_file)

            #Move a saved copy to appropriate folder
            os.rename(('./' + file_hash), ('./en_text_dir/' + file_hash))

            #Delete the temp file
            os.remove('temp')

            return "Created"
        else:

            #Delete the temp file
            os.remove('temp')

            return "Already there"

    elif request.method == 'GET':
        wav_file = request.files['file']

        #Check if we have already translated, based on file hash
        file_hash = md5(wav_file)

        if os.path.exists('./en_text_dir/' + file_hash):

            return send_from_directory('./en_text_dir/',
                                       file_hash,
                                       as_attachment=True)
        else:

            return 'Not found', 404
コード例 #9
0
def listen():
    time.sleep(2)
    while (True):
        text = recognition.recognize().lower()
        #text = input().lower()
        print(text)
        if (text is not ""):
            parse_command(text)
コード例 #10
0
def read_square():
    while(not programEnd):
        if(len(tasks)>0):
            print("do new tasks, still have {} tasks left".format(len(tasks)))
            current_task = tasks.pop()
            print("task info:{}".format(current_task))
            results.append(Results(current_task,recognition.recognize(current_task.frame,current_task.box, not usingPiCamera)))
        time.sleep(.1)
コード例 #11
0
ファイル: PyTunes.py プロジェクト: dangvansam98/final-pytunes
def getVoiceKeyWord():
    match = False
    while True:
        #sleep(1)
        key = recognize('keyword')
        #t2s('you are want to find: {}'.format(key))
        if key != "error":
            t2s("Có phải bạn muốn tìm kiếm {}".format(key))
            cf = recognize('Nói OK hoặc Đồng ý để xác nhận')
        else:
            continue

        if cf in ['yes', 'ok', 'oke', 'đúng', 'phải', 'đồng ý']:
            t2s('Ok tìm kiếm cho {}'.format(key))
            #match = True
            break
        #else:
        #t2s('oh sorry! please speech keyword again')
    return key
コード例 #12
0
ファイル: recording.py プロジェクト: j12358962/Powersocket
def on_message(client, userdata, msg):
    # print("Data: " + str(msg.payload))
    # print("Topic: " + msg.topic + "\n" +
    #       "Message: " + msg.payload.decode("utf-8"))
    Save_power_Data(msg.payload)  # save raw data samples
    global data_accumulation, data_list
    data_accumulation = data_accumulation + 1
    if data_accumulation >= frame:
        recognize(data_list)  # run recognition function
        data_accumulation = 0
        data_list = []
    else:
        # 有時候下面這行 json_data = json.loads(msg.payload)  會出錯, 錯誤訊息如下
        # raise JSONDecodeError("Expecting value", s, err.value) from None
        # json.decoder.JSONDecodeError: Expecting value: line 1 column 54 (char 53)
        try:
            json_data = json.loads(msg.payload)
            data_list.append([json_data['V'], json_data['A'], json_data['PF'],
                              json_data['W'],  json_data['VA'], json_data['VAR']])
        except Exception as e:
            pass
コード例 #13
0
def file_upload():
    # convert request body to valid base64 string
    encoded_image = request\
        .data\
        .decode()\
        .replace('data:image/png;base64,', '')\
        .replace(' ', '+')

    # decode image content
    content = base64.b64decode(encoded_image)

    # create file buffer
    tmp_file = io.BytesIO(content)

    # send image to recognition
    recognition_results = recognize(tmp_file)

    # return JSON response
    return jsonify(recognition_results)
コード例 #14
0
ファイル: PyTunes.py プロジェクト: dangvansam98/final-pytunes
def listen_command():
    t2s('Bạn muốn tôi làm gì hãy ra lệnh cho tôi')
    #t2s('Ví dụ tìm kiếm, kế tiếp, tiếp tục')
    t2i_en = {
        'play': 0,
        'next': 1,
        'replay': 2,
        'search': 3,
        'volume up': 4,
        'volume down': 6,
        'exit': 5
    }
    t2i_vn = {
        'phát': 0,
        'tiếp tục': 0,
        'tiếp theo': 1,
        'kế tiếp': 1,
        'phát lại từ đầu': 2,
        'phát lại': 2,
        'nghe lại': 2,
        'bắt đầu lại': 2,
        'tìm kiếm': 3,
        'tăng âm lượng': 4,
        'tăng âm': 4,
        "giảm âm lượng": 6,
        "giảm âm": 6,
        'thoát': 5,
        'thoát chương trình': 5
    }

    match = False
    while match != True:
        t2s('Đọc lệnh bạn muốn')
        text = recognize()
        if text in t2i_en.keys() or text in t2i_vn.keys():
            if text in t2i_en.keys():
                command = t2i_en[text]
                match = True
            elif text in t2i_vn.keys():
                command = t2i_vn[text]
                match = True
    return int(command)
コード例 #15
0
def drop(event):
    if event.data:
        print('Dropped data:\n', event.data)
        #print_event_info(event)
        if event.widget == listbox:
            files = listbox.tk.splitlist(event.data)
            for f in files:
                if os.path.exists(f):
                    print('Dropped file: "%s"' % f)
                    listbox.insert('end', f)
                    split_characters.Spliting(f)
                    result = recognition.recognize('./temp')
                    for item in result:
                        text.insert('end', item)
                    del_file('./temp/')
                else:
                    print('Not dropping file "%s": file does not exist.' % f)
        else:
            print('Error: reported event.widget not known')
    return event.data
コード例 #16
0
ファイル: flight.py プロジェクト: dazcona/sign2text
def dronesign():
    drone = tellopy.Tello()
    try:
        drone.connect()
        drone.wait_for_connection(60.0)
        drone.takeoff()
        sleep(5)
        drone.down(50)
        sleep(5)
        drone.start_video()
        container = av.open(drone.get_video_stream())

        for frame in container.decode(video=0):

            image = cv2.cvtColor(numpy.array(frame.to_image()),
                                 cv2.COLOR_RGB2BGR)

            # RECOGNITION
            print('Recognition')
            result = recognize(image)
            if result is not None:
                letter, prob = result
                text = 'Letter %s, confidence: %.2f' % (letter, prob)
                print(text)

            sleep(5)
            stop = raw_input('Stop? [y]')
            if stop.lower() == 'y':
                break

        sleep(5)
        drone.land()
        sleep(5)
    except Exception as ex:
        # exc_type, exc_value, exc_traceback = sys.exc_info()
        # traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
コード例 #17
0
 def excCommand(self):
     self.Pause()
     t2s('OK. Im here')
     t2s('Please speech a command')
     command = listen_command()
     if command == 0:
         self.PlayPause()
     elif command == 1:
         self.voiceNext()
     elif command == 2:
         self.Stop()
     elif command == 3:
         self.VoiceSearch()
     elif command == 4:
         self.voiceSelect()
     elif command == 5:
         t2s('You are sure to exit!')
         t2s('Speech Yes or OK to comfirm!')
         cf_exit = recognize('')
         if cf_exit == 'yes' or cf_exit == 'ok' or cf_exit == 'oke':
             exit()
         else:
             print('no exit')
コード例 #18
0
def worker():
    is_normal = True
    while True:
        item = q.get()
        time.sleep(1)

        text = recognize(item["file_path"], language="ja-JP")
        add_log(item["file_name"], text)
        if any(x in text for x in start_words):
            i2c_motor_driver.start()
            is_normal = True
        elif any(x in text for x in stop_words):
            i2c_motor_driver.stop()
        elif any(x in text for x in reverse_words):
            if is_normal:
                i2c_motor_driver.reverse()
            else:
                i2c_motor_driver.start()
            is_normal = not is_normal
        elif any(x in text for x in accelerate_words):
            i2c_motor_driver.accelerate()
        elif any(x in text for x in decelerate_words):
            i2c_motor_driver.decelerate()
        q.task_done()
コード例 #19
0
def infer_save_frames(input_frames_path, out_frames_path, every_nth=1):
    frame_files = sorted(glob.glob(input_frames_path + '/*'))

    num_frames = len(frame_files)
    detect_time = 0
    recognize_time = 0
    print('Detecting and recognizing text from {} frames: {}'.format(
        num_frames, str(datetime.now())))
    wordBB = None
    score = None
    text = None

    for index, filename in tqdm(enumerate(frame_files), total=num_frames):
        out_name = out_frames_path + '/out_{0:04d}.png'.format(index)
        if (index % every_nth == 0):
            wordBB, score = detection.detect(filename)

            if score.shape[0] == 0:
                wordBB = None
                score = None
            else:
                text = recognition.recognize(filename, wordBB)

        utilities.save(filename, wordBB, text, out_name)
コード例 #20
0
def select_by_speech():
    t2i_en = {
        'one': 1,
        'two': 2,
        'three': 3,
        'four': 4,
        'five': 5,
        'six': 6,
        'seven': 7,
        'eight': 8,
        'nine': 9,
        'ten': 10
    }
    t2i_en2 = {
        'number one': 1,
        'number two': 2,
        'number three': 3,
        'number four': 4,
        'number five': 5,
        'number six': 6,
        'number seven': 7,
        'number eight': 8,
        'number nine': 9,
        'number ten': 10
    }
    t2i_vn = {
        'một': 1,
        'hai': 2,
        'ba': 3,
        'bốn': 4,
        'năm': 5,
        'sáu': 6,
        'bẩy': 7,
        'tám': 8,
        'chín': 9,
        'mười': 10
    }
    t2i_vn2 = {
        'số 1': 1,
        'số 2': 2,
        'số 3': 3,
        'số 4': 4,
        'số 5': 5,
        'số 6': 6,
        'số 7': 7,
        'số 8': 8,
        'số 9': 9,
        'số 10': 10
    }
    t2i = {
        '1': 1,
        '2': 2,
        '3': 3,
        '4': 4,
        '5': 5,
        '6': 6,
        '7': 7,
        '8': 8,
        '9': 9,
        '10': 10
    }
    match = False
    while match != True:
        t2s('select song')
        text = recognize()
        if text in t2i_en.keys() or text in t2i_en2.keys(
        ) or text in t2i_vn.keys() or t2i_vn2.keys() or text in t2i.keys():
            if text in t2i_en.keys():
                number = t2i_en[text]
                match = True
            elif text in t2i_en2.keys():
                number = t2i_en2[text]
                match = True
            elif text in t2i_vn.keys():
                number = t2i_vn[text]
                match = True
            elif text in t2i_vn2.keys():
                number = t2i_vn2[text]
                match = True
            elif text in t2i.keys():
                number = t2i[text]
                match = True
    return int(number) - 1
コード例 #21
0
def worker():
    while True:
        item = q.get()
        text = recognize(item["file_path"], language="ja-JP")
        add_log(item["file_name"], text)
        q.task_done()
コード例 #22
0
 def recognize(self):
     if self.img_path is None:
         pass
     r_char = recognize(self.config, self.img_path)
     self.tbRecognitionResult.setText(r_char)
コード例 #23
0
	    voice = browser.find_elements_by_tag_name("iframe")[2]

	    browser.switch_to.frame(voice)

	    time.sleep(2)

	    download = browser.find_element_by_css_selector('.rc-audiochallenge-tdownload-link')

	    download.send_keys(Keys.ENTER)

	    time.sleep(15)

	    convert()

	    recognize()
	    
	    result = browser.find_element_by_css_selector('#audio-response')
	    result.send_keys(recognize.text , Keys.ENTER)
		    
	    time.sleep(15)
	    
	    while "iframe" in browser.page_source:
		    download = browser.find_element_by_css_selector('.rc-audiochallenge-tdownload-link')
		    download.send_keys(Keys.ENTER)
		
		    time.sleep(15)

		    convert()

		    recognize()
コード例 #24
0
ファイル: segment.py プロジェクト: samirkhanal35/minor-demo
def segFun(heighty, widthx, img):
    import numpy as np
    import image as img1
    import cv2 as cv
    import recognition as rg

    c = []

    i = 0
    flag_dot = 0
    flag_dots = 0
    i_low = 0
    i_high = 0
    j_left = 0
    j_right = 0
    i_nhigh = 0
    i_nlow = 0
    iscount = 0

    while (i <= heighty - 1):
        count_dot = 0
        for j in range(0, widthx - 1):
            avalue = np.mean(img[i, j])
            if (avalue == 0):
                count_dot = count_dot + 1

        if count_dot >= 4:
            if flag_dot == 0:
                flag_dot = 1
                i_high = i - 1
        else:
            if flag_dot == 1:
                flag_dot = 0
                i_low = i
            else:
                flag_dot = 0

        if ((i_low != 0) & (i_high != 0)):
            flag_dots = 0
            for line_j in range(0, widthx - 1):
                lj = line_j
                count_cr = 0
                for line_i in range(i_high, i_low):
                    avalue1 = np.mean(img[line_i, line_j])
                    if (avalue1 == 0):
                        count_cr = count_cr + 1

                if count_cr >= 1:
                    if flag_dots == 0:
                        flag_dots = 1
                        j_left = line_j - 1

                else:
                    if flag_dots == 1:
                        flag_dots = 0
                        j_right = line_j
                    else:
                        flag_dots = 0

                if ((j_left != 0) & (j_right != 0)):
                    flag_dotd = 0
                    for v_i in range(i_high, i_low + 1):
                        count_cv = 0
                        for h_j in range(j_left, j_right + 1):
                            avalue2 = np.mean(img[v_i, h_j])
                            if (avalue2 == 0):
                                count_cv = count_cv + 1
                        if count_cv >= 1:
                            if flag_dotd == 0:
                                flag_dotd = 1
                                i_nhigh = v_i
                                for drawn_j in range(j_left, j_right + 1):
                                    img[v_i - 1, drawn_j, 2] = 255
                                    img[v_i - 1, drawn_j, 1] = 0
                                    img[v_i - 1, drawn_j, 0] = 0

                            else:
                                img[v_i - 1, j_left, 2] = 255
                                img[v_i - 1, j_left, 1] = 0
                                img[v_i - 1, j_left, 0] = 0
                                img[v_i - 1, j_right, 2] = 255
                                img[v_i - 1, j_right, 1] = 0
                                img[v_i - 1, j_right, 0] = 0

                        else:
                            if flag_dotd == 1:
                                flag_dotd = 0
                                i_nlow = v_i
                                for drawn_j in range(j_left, j_right + 1):
                                    img[v_i, drawn_j, 2] = 255
                                    img[v_i, drawn_j, 1] = 0
                                    img[v_i, drawn_j, 0] = 0

                                img[v_i - 1, j_left, 2] = 255
                                img[v_i - 1, j_left, 1] = 0
                                img[v_i - 1, j_left, 0] = 0
                                img[v_i - 1, j_right, 2] = 255
                                img[v_i - 1, j_right, 1] = 0
                                img[v_i - 1, j_right, 0] = 0

                    #newly added

                    hs = i_nlow - i_nhigh
                    ws = j_right - j_left
                    img1 = np.zeros((hs, ws, 3), np.uint8)
                    img1[:] = (255, 255, 255)
                    for i_s in range(1, hs):
                        for j_s in range(1, ws):
                            img1[i_s, j_s] = img[i_nhigh + i_s, j_left + j_s]
                    #cv.imshow("demos(%d)"%iscount,img1)
                    # print(img1.shape)
                    img2 = cv.resize(img1, (32, 32),
                                     interpolation=cv.INTER_CUBIC)
                    c.append(rg.recognize(img2))

                    #********
                    j_right = 0
                    j_left = 0

            i_high = 0
            i_low = 0
        i = i + 1

    return (c)
コード例 #25
0
def check_party_affiliation():
    json_obj = request.get_json(force=True)
    result = recognition.recognize(json_obj['query'].lower().split())
    obj = { 'response': result }
    return make_my_response(obj)
コード例 #26
0
digits_dir = argv[1]
gt_filename = argv[2]

digit_templates = []
for i in range(0, 10):
    digit_templates.append(generate_template(digits_dir + '/' + str(i)))

gt = []
filenames = []
for line in open(gt_filename).readlines():
    (filename, label) = line.rstrip().split(' ')
    filenames.append(filename)
    gt.append((int(label[0]), int(label[1]), int(label[2])))

testing_path = dirname(gt_filename)
accuracy = 0
for i, filename in enumerate(filenames):
    img = imread(testing_path + '/' + filename)

    digits = recognize(img, digit_templates)

    if digits == gt[i]:
        accuracy += 1

accuracy /= float(len(filenames))

stdout.write('%1.2f\n' % accuracy)


コード例 #27
0
captchaFailed = True
loggedIn = False
while captchaFailed:
    print('getting CAPTCHA...')
    f = explorer.open(
        urllib.request.Request(url=baseURL + '/INC/VerifyCode.aspx',
                               headers=generalHeaders))
    captchabin = f.read()
    myfile = open('./tmp.jpg', 'wb')
    myfile.write(captchabin)
    myfile.flush()
    myfile.close()

    print('recognizing CAPTCHA...')
    captcha = r.recognize('./tmp.jpg')
    print('CAPTCHA was recognized: ' + captcha)

    print('trying to login...')
    logindata = {'title': 'login', 'yzm': captcha}
    logindata.update(loginInfo)
    f = explorer.open(
        urllib.request.Request(url=baseURL + '/ashx/ajaxHandler.ashx',
                               headers=generalHeaders,
                               data=bytes(urllib.parse.urlencode(logindata),
                                          encoding='utf-8')))

    rtn = json.loads(str(f.read(), encoding='utf-8'))
    if (rtn['status'] == 'failed'):
        if (rtn['des'] == '验证码输入错误!'):
            print('CAPTCHA is wrong, try again!')
コード例 #28
0
# Associação da aplicacao com a porta especificada
s.bind((TCP_IP, TCP_PORT))
print("Associacao feita com sucesso")

# Escuta iniciada para novas conexoes
s.listen(1)
print("Escutando na porta ", TCP_PORT)

# Aprovacao da conexao
conn, addr = s.accept()
print('Endereço da conexão:', addr)

# A unica conexao fica aberta por tempo indefinido (ou ate o aparelho ser desligado)
while 1:

    # Recebe o dado do arduino
    data = conn.recv(BUFFER_SIZE)
    if not data: break
    print("Dado recebido:", data)

    # Executa identificacao de conteudo
    ans = recognition.recognize(data)

    # Responde se o aparelho deve ou nao desligar
    if ans:
        conn.send(False)
    else:
        conn.send(True)

# Fecha conexao
conn.close()
コード例 #29
0
import recognition as r
import sys

filename = sys.argv[1]
print(r.recognize(filename))
コード例 #30
0
import cv2
import numpy as np
from os import system

parser = argparse.ArgumentParser()
parser.add_argument("echo")
args = parser.parse_args()

img = cv2.imread(args.echo)
net, sess = detection.detectorload()
model, converter = recognition.crnnloader()
cropedimage = detection.ctpn(sess, net, img, 1)
print("\n" * 50)

text = []
print(len(cropedimage))
for image in cropedimage:
    buf = cv2.resize(image.copy(),
                     (int(image.shape[1] * 2), int(image.shape[0] * 2)))
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    text_sim, cost = recognition.recognize(model, converter, image)
    print('Text: ' + text_sim)

    print('                       Recognition Time: ' + str(np.round(cost, 4)))
    print("\n")
    text.append(text_sim)
    cv2.imshow('My window', buf)
    cv2.waitKey(50)
    system('say ' + text_sim)
    cv2.waitKey(2500)
コード例 #31
0
ファイル: PyTunes.py プロジェクト: dangvansam98/final-pytunes
def select_by_speech():
    t2i_en = {
        'one': 1,
        'two': 2,
        'three': 3,
        'four': 4,
        'five': 5,
        'six': 6,
        'seven': 7,
        'eight': 8,
        'nine': 9,
        'ten': 10
    }
    t2i_en2 = {
        'number one': 1,
        'number two': 2,
        'number three': 3,
        'number four': 4,
        'number five': 5,
        'number six': 6,
        'number seven': 7,
        'number eight': 8,
        'number nine': 9,
        'number ten': 10
    }
    t2i_vn = {
        'một': 1,
        'hai': 2,
        'ba': 3,
        'bốn': 4,
        'năm': 5,
        'sáu': 6,
        'bẩy': 7,
        'tám': 8,
        'chín': 9,
        'mười': 10
    }
    t2i_vn2 = {
        'số 1': 1,
        'số 2': 2,
        'số 3': 3,
        'số 4': 4,
        'số 5': 5,
        'số 6': 6,
        'số 7': 7,
        'số 8': 8,
        'số 9': 9,
        'số 10': 10
    }
    t2i = {
        '1': 1,
        '2': 2,
        '3': 3,
        '4': 4,
        '5': 5,
        '6': 6,
        '7': 7,
        '8': 8,
        '9': 9,
        '10': 10
    }
    match = False
    firttime = True
    while match != True:
        if firttime:
            t2s('Mời bạn chọn bài hát bằng cách nói số thứ tự bài')
            firttime = False
        else:
            t2s("Tôi chưa hiểu hãy thử lại")
        text = recognize()
        if text in t2i_en.keys() or text in t2i_en2.keys(
        ) or text in t2i_vn.keys() or t2i_vn2.keys() or text in t2i.keys():
            if text in t2i_en.keys():
                number = t2i_en[text]
                match = True
                break
            elif text in t2i_en2.keys():
                number = t2i_en2[text]
                match = True
                break
            elif text in t2i_vn.keys():
                number = t2i_vn[text]
                match = True
                break
            elif text in t2i_vn2.keys():
                number = t2i_vn2[text]
                match = True
                break
            elif text in t2i.keys():
                number = t2i[text]
                match = True
                break
            # mặc định trả về 0 nếu không nhận dạng được, nếu không mặc định
            # thì hỏi đến lúc nhận dạng đúng
            #else:
            #match = True
            #number = 1
            #break
    return int(number) - 1