Exemple #1
0
	def stereo(self,wdata0,wdata1,framerate):
		'''
		stereo recognition

		It used the variable "Fast" to improve the speed of recognition. If left channel we matched 
		an audio(example "source1.wav") in firstly, then search in next channel it will compare the 
		matched audio("source1.wav"),rather than search all reference file again.

		Especially, if the accuracy is high enough, we will don't need to search in another channel. 
		'''
		chann0 = 0
		chann1 = 1

		#1> Analyze the chann0 first. 
		index_L, accuracy_L, avgdb_L, location_L= recognize(self.catalog,wdata0,framerate,chann0,Fast=None)
		#if accuracy is high enough, directly return
		if accuracy_L>0.7:
			return self.catalog[index_L], accuracy_L, avgdb_L, location_L
		#2> Analyze the chann1. 'Fast'means Fast recognition.
		index_R, accuracy_R, avgdb_R, location_R = recognize(self.catalog,wdata1,framerate,chann1,Fast=index_L)

		#handle the result from chann0 and chann1.
		accuracy   = round((accuracy_L+accuracy_R)/2,3)
		average_db = round((avgdb_L+avgdb_R)/2,3)
		if accuracy_L > accuracy_R:
			location = location_L
		else:
			location = location_R

		if (index_L != None) and (index_L == index_R):
			return self.catalog[index_L], accuracy ,average_db, location
		else:
			return None,0, average_db, 0
Exemple #2
0
def lt():
    while True:
        try:
            t = datetime.datetime.now()
            #if t.minute == 30:
            #t = datetime.datetime.now()
            if t.minute in range(16, 44):
                #Sleep functio for 30 minutes during class.
                print("Half Hour Sleep Time")
                time.sleep(1800)
            elif t.minute in range(45, 59):
                #Again call recognizer for exiting attendance.
                print("Calling Recognizer")
                recognize.recognize()
            #if t.minute == 58:
            #db_query(t.hour, names [])
            elif t.minute in range(00, 15):
                #Call Recofnize function for 1st 15 minutes for taking attendance.
                print("Calling Recognizer Function")
                recognize.recognize()
            #print(t.minute)
            #print("Done Bro")
            else:
                print("Waiting.....")
                #time.sleep(10)
                print(str(t.hour) + ":" + str(t.minute) + ":" + str(t.second))
                return True
        except KeyboardInterrupt:
            print("Exiting Program\n")
            sys.exit(0)
Exemple #3
0
def RecognizeSpeech(AUDIO_FILENAME, num_seconds = 5):
    # record audio of specified length in specified audio file
    record_audio(num_seconds, AUDIO_FILENAME)

    # reading audio
    audio = read_audio(AUDIO_FILENAME)

    # send to WIT.AI
    recognize(audio)
Exemple #4
0
def login(): 
   if request.method == 'POST': 
      user = request.form['nm'] 
      identity=str(recognize())
      return redirect(url_for('success',name = user+identity)) 
   else: 
      user = request.args.get('nm') 
      identity=str(recognize())
      return redirect(url_for('success',name = user+identity)) 
Exemple #5
0
def RecognizeSpeech(AUDIO_FILENAME, num_seconds=5):
    # sleep for n seconds instead record a file
    sleep(num_seconds)

    # reading audio
    audio = read_audio(AUDIO_FILENAME)

    # send to WIT.AI
    recognize(audio)
def RecognizeSpeechAndRemoveFile(AUDIO_FILENAME):
    #print("Recognize and remove file", AUDIO_FILENAME)

    # reading audio
    audio = read_file(AUDIO_FILENAME)

    delete_file(AUDIO_FILENAME)

    # send to WIT.AI
    recognize(audio)
Exemple #7
0
def RecognizeSpeechAndRemoveFile(AUDIO_FILENAME):
    # reading audio
    audio = read_file(AUDIO_FILENAME)

    # delete useless file because is already in "audio" variable
    delete_file(AUDIO_FILENAME)

    # send to WIT.AI
    recognize(audio)

    time.sleep(3)
Exemple #8
0
def single(file_path, dst_dir):
    logging.info('start {}'.format(file_path))
    name_bytes, time_bytes, crop_region = crop(file_path)
    time.sleep(1)
    chat_name = recognize(name_bytes)
    time.sleep(1)
    qr_time = recognize(time_bytes)
    exp_time = get_expiration(qr_time)
    ext_name = os.path.splitext(file_path)[1]
    dst_file_name = "{}.{}{}".format(chat_name, exp_time, ext_name)
    dst_file_path = os.path.join(os.path.abspath(dst_dir), dst_file_name)
    logging.info(dst_file_name)
    crop_region.save(dst_file_path)
    logging.info('finish {}'.format(file_path))
Exemple #9
0
def cli():

    if len(sys.argv) > 1:
        path = sys.argv[1]
    else:
        path = os.path.join(recognize_dir, 'test_set')

    captcha_list = []
    for fn in os.listdir(path):
        name, ext = os.path.splitext(fn)
        if ext == '.png' and len(name) == 4:  # assure that is test image file
            captcha_list.append(os.path.join(path, fn))

    result_list = recognize(captcha_list)
    correct = 0
    for path, result in zip(captcha_list, result_list):
        label = os.path.splitext(os.path.basename(path))[0][:4]
        print('%04s %04s ' % (label, result), end='')
        if label.lower() == result.lower():
            print(True)
            correct += 1
        else:
            print(False)

    print('accuracy: %f' % (correct / len(captcha_list)))
Exemple #10
0
def recognize(cmdline):
    survey = model.survey.Survey.load(cmdline['project'])
    import recognize
    if cmdline['identify']:
        return recognize.identify(survey)
    else:
        return recognize.recognize(survey)
Exemple #11
0
def run(ans):
    if ans == 1:
        com = input(
            'Enter folder name to protect or "create NEW_FOLDER_NAME" to create new\n > '
        )

        # if you want to create new folder
        if 'create' in com:
            com = com.split(' ')[1]
            os.system('mkdir {}'.format(com))

        # if directory does not exist
        if not os.path.isdir(com):
            print('Directory "{}" does not exist. EXIT.'.format(com))
            exit(0)

        passwd = ''

        # get image to decode folder later
        if recognize.get_image():
            print('Thank you, continue encoding.')
            # generate password
            passwd = generate_password()

            write_file(passwd)
        else:
            print('Bad image. Can not find face.')
            exit(0)

        # encode folder
        encode(com, passwd)
    elif ans == 2:
        filename = input('Enter .zip file name\n > ')
        if recognize.recognize():
            decode(filename, read_file())
Exemple #12
0
    def post(self):
        # param = request.args
        # id = param.get('name')
        user_id = request.form[
            'name']  #need to have UserId from database as a primary key or something

        if user_id and not (db.session.query(UsersDataSet).filter_by(
                UserId=user_id).count() == 0) and not (db.session.query(
                    ProfilePicDataSet).filter_by(UserId=user_id).count() == 0):
            dict1 = recognize.recognize(user_id, db)
            # full_image_path = []
            # for x in dict1.keys():
            #     full_image_path.append(file_manage.file_manage5(x) + "/current.jpg")              # {
            #     # 'katrina': (96.09733319308081, 'C:\\Users\\usama\\Desktop\\Heroku\\App/profile_katrina'),
            #     # 'trisha': (60.28771775012819, 'C:\\Users\\usama\\Desktop\\Heroku\\App/profile_trisha'),
            #     # }

            # dict2 = dict(zip(dict1.keys(), zip(dict1.values(), full_image_path)))
            return make_response(render_template("index.html", data0=dict1))
        else:
            return make_response(
                render_template(
                    "index.html",
                    var0=
                    "ERROR: Please make sure you entered your name and pics to input box and database correctly in sequence given",
                    data0={}))
Exemple #13
0
    def verifiThread(self):
        text = ""
        win32clipboard.OpenClipboard()
        win32clipboard.SetClipboardData(win32con.CF_UNICODETEXT, text)
        win32clipboard.CloseClipboard()

        while (len(text) == 0):
            win32api.SetCursorPos([self.x() + 800, self.y() + 370])
            win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, 0, 0)
            win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN |
                                 win32con.MOUSEEVENTF_LEFTUP, 0, 0)
            win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN |
                                 win32con.MOUSEEVENTF_LEFTUP, 0, 0)
            win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN |
                                 win32con.MOUSEEVENTF_LEFTUP, 0, 0)
            win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN |
                                 win32con.MOUSEEVENTF_LEFTUP, 0, 0)

            win32api.keybd_event(win32con.VK_CONTROL, 0, 0, 0)
            win32api.keybd_event(67, 0, 0, 0)
            win32api.keybd_event(67, 0, win32con.KEYEVENTF_KEYUP, 0)
            win32api.keybd_event(win32con.VK_CONTROL, 0,
                                 win32con.KEYEVENTF_KEYUP, 0)
            win32clipboard.OpenClipboard()
            text = win32clipboard.GetClipboardData(win32con.CF_UNICODETEXT)
            win32clipboard.CloseClipboard()

        self.click(560, 440)

        x, y = self.x(), self.y()
        img = ImageGrab.grab((x + 502, y + 416, x + 613, y + 447))
        img.save(constents.imgPath)
        self.sendText(recognize.recognize(description=text), 750, 430)
def cli():

    if len(sys.argv) > 1:
        path = sys.argv[1]
    else:
        path = os.path.join(recognize_dir, 'test_set')

    captcha_list = []
    for fn in os.listdir(path):
        name, ext = os.path.splitext(fn)
        if ext == '.png' and len(name) == 4:  # assure that is test image file
            captcha_list.append(os.path.join(path, fn))

    result_list = recognize(captcha_list)
    correct = 0
    for path, result in zip(captcha_list, result_list):
        label = os.path.splitext(os.path.basename(path))[0][:4]
        print('%04s %04s ' % (label, result), end='')
        if label.lower() == result.lower():
            print(True)
            correct += 1
        else:
            print(False)

    print('accuracy: %f' % (correct/len(captcha_list)))
def recognize_video(file):
    if request.method == 'POST':
        os_file_path = os.path.join(app.config['UPLOAD_FOLDER'], file)

        cap = cv2.VideoCapture(os_file_path)
        results = []
        count = 0
        while cap.isOpened():
            ret, frame = cap.read()
            if ret:
                if count % 10 == 0:
                    if frame is not None:
                        x = int(float(request.form['x']))
                        width = int(float(request.form['width']))
                        y = int(float(request.form['y']))
                        height = int(float(request.form['height']))
                        frame = frame[y:y + height, x:x + width]
                        result = recognize(frame)
                        if len(result) == 0:
                            result = "NOT RECOGNIZED"
                        results.append([len(results) + 1, result])
                        if len(results) >= 10:
                            break
                count += 1
            else:
                break
        return render_template('result_video.html', results=results)
    else:
        os_file_path = os_file_path = os.path.join(app.config['UPLOAD_FOLDER'], file)
        cap = cv2.VideoCapture(os_file_path)
        ret, frame = cap.read()
        os_frame_path = os.path.join(app.config['UPLOAD_FOLDER'], 'frames', file+'.png')
        cv2.imwrite(os_frame_path, frame)
        # TODO replace with os.path.join
        return render_template('crop.html', image_source=f'frames/{file}.png')
Exemple #16
0
def tagFile(path, returnFilePath=False):
    """
        Tags single file and renames it in format Artist - Song name.
        Args:
            path: path to the file.
            returnFilePath: if True function will return new fileName instead of -1
        Retruns:
            0 if tagging was successful.
            -1 if tagging failed.
    """
    # print("Tagging: "+path)
    song_id = recognize(path)
    if song_id == -1:
        return -1
    tags = getTags(song_id)
    af = eyed3.load(path)
    if not af.tag:
        af.initTag()
    af.tag.artist = tags['artist'].decode('utf-8')
    af.tag.title = tags['title'].decode('utf-8')
    if tags['album'] != None:
        af.tag.album = tags['album'].decode('utf-8')
    else:
        af.tag.album = u''
    if tags['track'] != None:
        af.tag.track_num = tags['track']
    af.tag.save()

    filename, fileExtension = os.path.splitext(path)
    newFilename = settings.rename(path, af.tag.artist, af.tag.title,
                                  af.tag.album, fileExtension)
    if returnFilePath:
        return newFilename
    return 0
Exemple #17
0
def RecognizeCaptcha(c):
    global SIpON
    img_base64 = SIpON.execute_script(
        """var ele = arguments[0];var cnv = document.createElement('canvas');cnv.width = 180; cnv.height = 50;cnv.getContext('2d').drawImage(ele, 0, 0);return cnv.toDataURL('image/png').substring(22);""",
        c)
    captcha = recognize(img_base64)
    return captcha
Exemple #18
0
def RecognizeCaptcha(c):
    global SIpON
    #code from https://gist.github.com/spirkaa/4c3b8ad8fd34324bd307#gistcomment-3157744
    img_base64 = SIpON.execute_script(
        """var ele = arguments[0];var cnv = document.createElement('canvas');cnv.width = 180; cnv.height = 50;cnv.getContext('2d').drawImage(ele, 0, 0);return cnv.toDataURL('image/png').substring(22);""",
        c)
    captcha = recognize(img_base64)
    return captcha
Exemple #19
0
    def test_recognize_shortcut_consistency(self):
        """The shortcut method should produce the same results as the "long way."""
        expected = set(["banana"])
        result = recognize.recognize("ana", vocabulary=self.vocabulary, distance=self.distance) 

        r = recognize.Recognizer(self.vocabulary, self.distance)
        long_way_result = r.recognize("ana")
        assert result == long_way_result
Exemple #20
0
	def mono(self,wdata,channel,framerate):
		'''
		mono recognition

		'''
		#audio recognition
		match_index, accuracy, avgdb, location = recognize(self.catalog,wdata,framerate,channel)

		return self.catalog[match_index],accuracy,avgdb,location
Exemple #21
0
def main(body):
    request = json.loads(body)
    if 'DEBUG' in os.environ:
        r = redis.Redis(host='redis')
    else:
        r = redis.Redis()

    em = Emoshape(host=EMOSHAPE_HOST, port=2424, secret=EMOSHAPE_SECRET)

    # В зависимости от того что пришло отдаем в Emoshape
    if 'text' in request:
        text = request['text']
        em.sendMessage(text)
        if text == ' ' or text == '':
            r.set(request['csrftoken'],
                  json.dumps({'errorMessage': 'No text found!'}))
            return
    elif 'file' in request:
        text = recognize(storage_uri=request['file'])
        em.sendMessage(text)
        if text == ' ' or text == '':
            r.set(request['csrftoken'],
                  json.dumps({'errorMessage': 'No text found!'}))
            return
            # em.tone_on('/home/vladislav/Музыка/1.mp3', '/home/vladislav/virtmic')
    else:
        return

    # Получаем выходные данные от Emoshape
    time.sleep(3)
    timeshtamp = datetime.datetime.now()
    emotionIndex, maxEmotions, maxEmotionsList = checkEmotion(em.getChannels())
    print(f" LOG: {timeshtamp} - [{request['csrftoken']}] [{text}] = " +
          emotionIndex + ' --- ' + str(maxEmotionsList))
    em.closeConnection()

    # Получаем выходные данные от Symbl AI
    s = SymblAI(message=text)
    res = s.getTopics()
    topics = []
    for item in res['topics']:
        print(item['text'])
        topics.append(item['text'])

    print(f" LOG: {timeshtamp} - [{request['csrftoken']}] [topics] = " +
          str(topics))

    # Сохраняем выходные данные в redis подписывая токеном пользователя
    r.set(
        request['csrftoken'],
        json.dumps({
            'emotionIndex': int(emotionIndex),
            'maxEmotions': ' '.join(maxEmotions),
            'maxEmotionsList': ', '.join(maxEmotionsList),
            'symblTopics': ', '.join(topics)
        }))
Exemple #22
0
def main():
    args = argumentparser()
    if args.t_path:
        trainRecognizer.train(args.t_path,None)
    elif args.r_path:
        recognize(args.r_path)
    elif args.ta_path:
        paths = [x[0] for x in os.walk(args.ta_path)][1:]
        for i in paths:
            trainRecognizer.train(i)
    elif args.ra_path:
        paths = trainRecognizer.listdir_nohidden(args.ra_path)
        for path in paths:
            print(path)
            recognize(path)
    elif args.store_true:
        pass
    else :
        print("Please enter a command!")
Exemple #23
0
def api_message():
    f = open('./file', 'wb')
    f.write(request.data)
    f.close()

    src = "./file"
    dst = "./test.wav"
    sound = AudioSegment.from_file(src)
    sound.export(dst, format="wav")
    identity = recognize()
    return "Binary message written!"
def RecognizeCaptcha(c):
    global EGRN
    try:
        img_base64 = EGRN.execute_script("""var ele = arguments[0];var cnv = document.createElement('canvas');cnv.width = 180; cnv.height = 50;cnv.getContext('2d').drawImage(ele, 0, 0);return cnv.toDataURL('image/png').substring(22);""", c)
        captcha = recognize(img_base64)
    except:
        print("Ошибка при обработке капчи. Скорее всего, это ошибка сайта.")
        captcha = ""
    if captcha == "44444":
        print("Ошибка при обработке капчи (44444).")
        captcha = ""
    return captcha
Exemple #25
0
def recognize(cmdline):
    survey = model.survey.Survey.load(cmdline['project'])
    import recognize

    if not cmdline['rerun']:
        filter = lambda : not (survey.sheet.verified or survey.sheet.recognized)
    else:
        filter = lambda: True

    if cmdline['identify']:
        return recognize.identify(survey, filter)
    else:
        return recognize.recognize(survey, filter)
Exemple #26
0
def recognize(cmdline):
    survey = model.survey.Survey.load(cmdline['project'])
    import recognize

    if not cmdline['rerun']:
        filter = lambda: not (survey.sheet.verified or survey.sheet.recognized)
    else:
        filter = lambda: True

    if cmdline['identify']:
        return recognize.identify(survey, filter)
    else:
        return recognize.recognize(survey, filter)
Exemple #27
0
def read_vector():
    # Get path string from form
    path = request.form.get("draw")

    # Make into image with functions from vector_helpers
    image = make_image(path)

    # Recognize image (after blurring)
    character = recognize(blur(image))

    # Saves image and recognition in session
    session["image"] = image.tolist()
    session["character"] = character

    return render_template("recognize.html", character=character)
def recognize_photo(file):
    if request.method == 'POST':
        os_file_path = os.path.join(app.config['UPLOAD_FOLDER'], file)

        img = cv2.imread(os_file_path)
        x = int(float(request.form['x']))
        width = int(float(request.form['width']))
        y = int(float(request.form['y']))
        height = int(float(request.form['height']))
        img = img[y:y+height, x:x+width]
        string = recognize(img)
        if len(string) == 0:
            string = "NOT RECOGNIZED"
        return render_template('result_photo.html', result=string)
    else:
        return render_template('crop.html', image_source=file)
Exemple #29
0
def main(input, output, display, detection, tolerance):
    print("[INFO] loading encodings...")
    data = pickle.loads(open('encodings.pickle', "rb").read())
    print("[INFO] LOADING VIDEO...")
    vid = cv2.VideoCapture(input)
    writer = None
    while True:
        names = []
        _, frame = vid.read()
        rgb = imutils.resize(frame, width=620)
        r = frame.shape[1] / float(rgb.shape[1])
        boxes = recognize(rgb, names, data, detection, tolerance)
        for ((top, right, bottom, left), name) in zip(boxes, names):
            if name == '???': continue
            top = int(top * r)
            right = int(right * r)
            bottom = int(bottom * r)
            left = int(left * r)
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
            y = top - 15 if top - 15 > 15 else top + 15
            cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                        (0, 255, 0), 2)
        # if the video writer is None *AND* we are supposed to write
        # the output video to disk initialize the writer
        if writer is None and output is not None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(output, fourcc, 20,
                                     (frame.shape[1], frame.shape[0]), True)
        # if the writer is not None, write the frame with recognized
        # faces to disk
        if writer is not None:
            writer.write(frame)
        # check to see if we are supposed to display the output frame to
        # the screen
        if display:
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF
            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break
    # do a bit of cleanup
    cv2.destroyAllWindows()
    vid.stop()
    # check to see if the video writer point needs to be released
    if writer is not None:
        writer.release()
Exemple #30
0
def read_file():
    # Get file from form
    file = request.files["file"]

    # Use scipy function to read image into pixel array
    arr = misc.imread(file)

    # Send array through a bunch of functions in bitmap_helpers
    image = process(arr)

    # Recognize image
    character = recognize(image)

    # Saves image and recognition in session
    session["image"] = image.tolist()
    session["character"] = character

    return render_template("recognize.html", character=character)
Exemple #31
0
def hyperspectralHandler(request):
    name = zipSaving(request)

    path = 'image_folder/' + name + "/"
    new_dir = 'cut_images/' + name + "/"
    for file in os.listdir(path):
        new_new_dir = new_dir + file
        cut(path + file, new_new_dir)

    result = recognize("../IA/model.h5", new_dir + "478nm.tiff")

    dir_save = "result_color"
    if not os.path.exists("static/" + dir_save):
        os.makedirs("static/" + dir_save)
    img = color(path + "610nm.tiff", path + "550nm.tiff", path + "466nm.tiff")
    img = img.convert("RGB")

    return (img, result)
Exemple #32
0
def classicPictureHandler(request):
    name = classicPictureSaving(request)

    path = 'image_folder/' + name + "/"
    new_dir = 'cut_images/' + name + "/"
    for file in os.listdir(path):
        new_new_dir = new_dir + file
        cut_jpg_png(path + file, new_new_dir)

    result = recognize("../IA/model.h5", new_dir + "/" + name)

    dir_save = "result_color"
    if not os.path.exists("static/" + dir_save):
        os.makedirs("static/" + dir_save)
    img = Image.open(path + name)
    img = img.convert("RGB")

    return (img, result)
Exemple #33
0
	def mono(self,wdata,channel,framerate,quick=None):
		'''
		To improve the speed of recognition, we use the "quick" to do it.
		If it matched an audio(example "source1.wav") in channel0, when search in channel1 we will directly
		compare the matched audio("source1.wav"), rather than search all reference file again. 
		'''
		if len(self.catalog)==0:
			print("Error: No data saved in pkl file." 
				"Please fisrtly save the fingerprint of the reference audio.")
			time.sleep(5)
			os._exit(1)

		#audio recognition
		audio_name, confidence,avgdb=recognize(self.catalog,wdata,framerate,channel,quick)
		#broken frame detection
		broframe=detect_broken_frame(wdata, framerate)

		return {"name":audio_name,"broken_frame":broframe,"confidence":confidence,"average_db":avgdb}
def recognition(path):
    audio_mp3 = path
    name = audio_mp3.split('/')[-1]
    name = name.replace('.mp3', '')

    audio_wav = conv_mp3_to_wav(str(audio_mp3))
    result = recognize(str(audio_wav))
    '''
	Сохранение результатов перевода в папку text/
	'''

    out_r = "text/" + name + "1.txt"
    save_in_file(result, out_r)
    '''
	Код ниже нужен лишь для перевода в нужную частоту, если вдруг исходная не подходит для диаризации
	'''

    sample_check(audio_wav)

    # Задумка в том, чтобы сохранять результаты в текстовик, которы будет лежать в папке text
    # Сохранение результатов перевода в папку text/

    out_d = "text/" + name + "2.txt"
    '''
	   -> str(audio_wav) - путь к записи, если нужен формат mp3, то можно заменить на audio_mp3
	   -> out_d путь по которому нужно сохранить результат
	'''
    #direalize(str(audio_wav), out_d)
    '''
	Путь ниже к файлу с рандомным таймкодом (чтобы протестить связную работу модуля)
	При подключении диаризации можно беспощадно удалить и раскоментить строку выше)
	'''
    out_d = "test.txt"

    result_src = merge(out_r, out_d)

    result_dst = "done/" + name + ".txt"
    open(result_dst, 'a')
    shutil.copyfile(result_src, result_dst)
    os.remove(result_src)

    #Удаление промежуточных результатов
    os.remove(out_r)
    os.remove(out_d)
Exemple #35
0
def test(scan, filename):
    p1 = scan.find('\\')
    p2 = scan.find('.')
    result_folder = 'results\\' + filename[(p1 + 1):p2] + '\\'
    expected_file = 'expected\\' + filename[(p1 + 1):p2] + '.txt'

    imgs_dir = 'results\\' + scan + '\\letters\\'
    img_path = imgs_dir + filename
    print(img_path)
    img = Image.open(img_path)
    ocr_letter = pytesseract.image_to_string(img,
                                             lang='srp',
                                             config='--psm 10')
    print(ocr_letter)

    res = rc.recognize(img_path)

    print(res)
    print('end')
Exemple #36
0
def submit_link():
    import urllib.request
    import base64
    import uuid
    from recognize import recognize
    if request.method == 'GET':
        userID = request.args.get('q', '')
        return render_template("submit_link.html", userID=userID)
    elif request.method == 'POST':
        url = request.form.get('url')
        userID = request.form.get('userID')
        # check if the post request has the file part
        if 'file' in request.files:

            file = request.files['file']

            # if user does not select file, browser also
            # submit an empty part without filename
            if file.filename == '':
                flash('No selected file')
                return redirect(request.url)
            if file and allowed_file(file.filename):
                filename = file.filename
                file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
                full_path = '{0}{1}'.format(UPLOAD_FOLDER, filename)
                path = 'img/{0}'.format(filename)
        elif url:
            uuid = uuid.uuid4()

            uuid = str(uuid)
            path = 'img/{0}.{1}'.format(uuid, 'png')
            full_path = '{0}/src/templates/{1}'.format(os.getcwd(), path)
            resource = urllib.request.urlopen(url)
            output = open(full_path, "wb")
            output.write(resource.read())
            output.close()
        else:
            flash('Please enter valid data')
            return redirect(request.url)
        with open(full_path, 'rb') as image:
            response = recognize(image.read(), path, userID)
            return render_template("result.html", score=response['score'],
                                   detected_labels=response['detected_labels'], imagePath=path, userID=userID)
Exemple #37
0
def perform_classification(image, model):
    data = "./fullrun_3"

    gt = "./mitoses_ground_truth"

    #Model_path = "./sift_svm_20181204-095339.pkl"
    Model_path = model
    #Pred_path = "./fullrun_1/TUPAC-TE-200.svs_01_07.png"
    Pred_path = image

    mitoses_model = recognize.recognize(data, gt)
    mitoses_model.sift.MBkmeans = joblib.load('./kmeans_20181204-095339.pkl')
    predict_val = mitoses_model.predict(Pred_path, Model_path)

    print("predict val =", predict_val)
    print("only binary==", predict_val[0])
    if math.isnan(predict_val[0]):
        return 0
    else:
        return predict_val[0]
def main_worker():
    """
        Основной воркер - обработчик обновлений
    """
    try:
        update_data = request.get_json()
        update_obj = update.Update(update_data)
        if update_obj.message.voice:
            voice_file = bot.download_file(update_obj.message.voice.file_id)
            if not voice_file:
                return "ok"
            audio_file = audio.AudioFile(voice_file, "ogg", "mp3")
            voice_text = recognize(audio_file)
            bot.send_message(update_obj.message.chat.id,
                             voice_text,
                             reply_to_message_id=update_obj.message.message_id)
    # В любом случае отдаем "ok" чтобы повторно не получать обновление
    except Exception as exc:
        return "ok"
    return "ok"
    def watch_photos(self): #metoda obsługująca strumień sterujacy parametrem usługi
        photos_input = self.get_input("photosInput") #obiekt interfejsu wejściowego
        user_output = self.get_output("userOutput")
        photos_recognized_output = self.get_output("photosRecognizedOutput")
        while self.running(): #główna pętla wątku obsługującego strumień sterujący
            self.photos_count = self.get_parameter("photosCount") #pobranie wartości parametru "photosCount"
            if len(self.photos) < self.photos_count:
                photo = np.loads(photos_input.read())
                label,confidence, name = recognize(photo, "eigenModel.xml", 12000)
                self.photos.append(name)
                print "send output with count", str(len(self.photos)), "label: ", str(label), "confidence: ", str(confidence)
                photos_recognized_output.send(len(self.photos))
#                photos_output.send(photo.dumps())
            else:
                counter = Counter(self.photos)
                most_common = [el for el, count in counter.most_common(1)][0]
                ratio = self.photos.count(most_common)/self.photos_count;
                if ratio > 0.6:
                    print ("sending " + most_common)
                    user_output.send(most_common)
                else:
                    user_output.send("UNKNOWN")

            print str(len(self.photos)), str(self.photos_count)
Exemple #40
0
 def test_recognize_shortcut_correctness(self):
     """The shortcut method should produce correct results."""
     # Test that the shortcut result is correct
     expected = set(["banana"])
     result = recognize.recognize("ana", vocabulary=self.vocabulary, distance=self.distance) 
     assert result == expected