コード例 #1
0
def recognize(input_video):
    #input_video = "videos/Asiri1.mp4"
    poses_folder = 'poses_over_gait_cycle'
    gei_folder = 'geis'
    gei_name = 'gei.png'

    def clean_folder(folder):
        for filename in os.listdir(folder):
            file_path = os.path.join(folder, filename)
            try:
                if os.path.isfile(file_path) or os.path.islink(file_path):
                    os.unlink(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
            except Exception as e:
                print('Failed to delete %s. Reason: %s' % (file_path, e))

    # clear poses frames folder
    if (os.path.exists(poses_folder)):
        clean_folder(poses_folder)

    #generate pose images
    cmd_generate_pose_images = "03_keypoints_from_image.exe " + input_video + " " + poses_folder
    os.system(cmd_generate_pose_images)

    #generate Gait Energy Image(GEI)
    if (not os.path.exists(gei_folder)):
        cmd_create_folder = "mkdir " + gei_folder
        os.system(cmd_create_folder)
    gei_out = gei_folder + '/' + gei_name
    GEI_Builder.build_gei(poses_folder, gei_out)
    return recognizer.recognize(gei_out)
コード例 #2
0
def handle_input(user_input, world, discourse, in_stream, out_streams):
    """Deal with input obtained, sending it to the appropriate module.

    The commanded character's concept is used when trying to recognize
    commands."""
    c_concept = world.concept[discourse.spin['commanded']]
    user_input = recognizer.recognize(user_input, discourse, c_concept)
    if user_input.unrecognized:
        user_input = clarifier.clarify(user_input, c_concept, discourse,
                                       in_stream, out_streams)
    if user_input.command:
        user_input, id_list, world = simulator(user_input, world,
                                               discourse.spin['commanded'])
        if hasattr(world.item['@cosmos'], 'update_spin'):
            discourse.spin = world.item['@cosmos'].update_spin(
                world, discourse)
        spin = discourse.spin
        if hasattr(world.item['@cosmos'], 'use_spin'):
            spin = world.item['@cosmos'].use_spin(world, discourse.spin)
        f_concept = world.concept[spin['focalizer']]
        tale, discourse = teller(id_list, f_concept, discourse)
        presenter.present(tale, out_streams)
    elif user_input.directive:
        texts, world, discourse = joker.joke(user_input.normal, world,
                                             discourse)
        for text in texts:
            if text is not None:
                presenter.present(text, out_streams)
    discourse.input_list.update(user_input)
    return (user_input, world, discourse)
コード例 #3
0
ファイル: run.py プロジェクト: mintyque/hackathon
def service():
    if request.method == 'POST':
        file = request.files['file']

        # Car model classification
        #format: LADA_PRIORA_B
        brand, model, veh_type = predict('image_test.jpg')
        # Car plate detection
        #plate_image = detect('image_path')

        #Car plate recognition
        car_plate = recognize(plate_image)

        response = {
            "brand": brand,
            "model": model,
            "probability": "72.5",
            "veh_type": veh_type,
            "coord": "[(398,292),(573,360)]",
            "id": "0001",
            "plate": "x000xxx111"
        }
        response = json.dumps(response)

        return Response(response=response,
                        status=200,
                        mimetype="application/json")
    return render_template("service.html")
コード例 #4
0
ファイル: curveship.py プロジェクト: jesstess/curveship
def handle_input(user_input, world, discourse, in_stream, out_streams):
    """Deal with input obtained, sending it to the appropriate module.

    The commanded character's concept is used when trying to recognize
    commands."""
    c_concept = world.concept[discourse.spin['commanded']]
    user_input = recognizer.recognize(user_input, discourse, c_concept)
    if user_input.unrecognized:
        user_input = clarifier.clarify(user_input, c_concept, discourse,
                                       in_stream, out_streams)
    if user_input.command:
        user_input, id_list, world = simulator(user_input, world,
                                                  discourse.spin['commanded'])
        if hasattr(world.item['@cosmos'], 'update_spin'):
            discourse.spin = world.item['@cosmos'].update_spin(world, 
                                                               discourse)
        spin = discourse.spin
        if hasattr(world.item['@cosmos'], 'use_spin'):
            spin = world.item['@cosmos'].use_spin(world, discourse.spin)
        f_concept = world.concept[spin['focalizer']]
        tale, discourse = teller(id_list, f_concept, discourse)
        presenter.present(tale, out_streams)
    elif user_input.directive:
        texts, world, discourse = joker.joke(user_input.normal, world,
                                             discourse)
        for text in texts:
            if text is not None:
                presenter.present(text, out_streams)
    discourse.input_list.update(user_input)
    return (user_input, world, discourse)
コード例 #5
0
def service():
    if request.method == 'POST':
        file = request.files['file']
        file.save('image_test.jpg')

        # Car model classification
        brand, model, veh_type = predict('image_test.jpg')

        #Car plate detection
        detect('image_test.jpg')

        #Car plate recognition
        text, prob = recognize('X000XX000.jpg')
        response = {
            "brand": brand,
            "model": model,
            "probability": prob,
            "veh_type": veh_type,
            "coord": "[(398,292),(573,360)]",
            "id": "0001",
            "plate": text
        }
        response = json.dumps(response, ensure_ascii=False)

        return Response(response=response,
                        status=200,
                        mimetype="application/json")
    return render_template("service.html")
コード例 #6
0
def ipcHandler(conn, data):
    global result, player
    if data == LINK:
        r = recognizer.init_arduino()
        if r == -1:
            conn.send(CONN_FAILED)
        else:
            conn.send(RUN_SUCCESS)
        return

    if data == SET_READY:
        if recognizer.set_ready():
            conn.send(RUN_SUCCESS)
            recognizer.capture(recognizer.rawCapture)
            result = recognizer.recognize(recognizer.rawCapture)
        else:
            conn.send(ARDUINO_FAILED)
        return

    if data == SET_SIMPLE_DEAL:
        recognizer.set_deal()
        recognizer.set_ready()
        conn.send(RUN_SUCCESS)
        recognizer.capture(recognizer.rawCapture)
        result = recognizer.recognize(recognizer.rawCapture)
        return

    if data == SET_DEAL:
        recognizer.set_deal()
        action = player.if_slap(result)
        if action == SET_HIT:
            recognizer.hit()
        elif action == SET_FAKE_HIT:
            recognizer.fake_hit()

        player.increment()

        #recognizer.set_ready()
        conn.send(str(result))
        #recognizer.capture(recognizer.rawCapture)
        #result = recognizer.recognize(recognizer.rawCapture)
        return
コード例 #7
0
def genre_stat(filename):
    song = sound.Sound(filename)
    song.load_and_gen_obj()
    stats = make_stats(song)
    prediction = recognizer.recognize('song.mp3')
    genre = index_to_genre(prediction)
    return jsonify({
        'genre': genre,
        'duration': song.duration,
        'tempo': song.tempo[0],
        'tuning': song.tuning
    }), 200
コード例 #8
0
    def RecognizeFaces(self, request, context):

        faces = recognizer.recognize(request.url)
        proto_faces = []
        for face in faces:
            area = face['area']
            encoding = face['encoding']
            proto_face = proto.Face(x=area['x'],
                                    y=area['y'],
                                    width=area['width'],
                                    height=area['height'],
                                    encoding=encoding)
            proto_faces.append(proto_face)

        metrics.request_counter.labels('recognize_faces').inc()

        return proto.RegognizeResponse(faces=proto_faces)
コード例 #9
0
def upload():
    """
    Handles /recognize endpoint
    Checks that we got proper file, process it
    and return json result to client
    :return:
    """
    if 'file' not in request.files:
        return make_error("Cannot find file")
    file = request.files['file']
    if file.filename == '':
        return make_error("No selected file")

    if file and allowed_file_extension(file.filename):
        filename = secure_filename(file.filename)
        file.save(path.join(app.config['UPLOAD_FOLDER'], filename))
        text, result = recognize(filename)
        return make_success(text) if result else make_error(text)
コード例 #10
0
ファイル: main.py プロジェクト: evanyui/Home-Assistant
def main():
    # Turn off hotword detector
    detector.terminate()

    # Ask
    replies = ["是,主人", "有什麼吩咐嗎", "是"]
    tts.speak(random.choice(replies))
    play_file()

    # Listen
    print("Listening...")
    sentence = recognizer.recognize()

    # Processing
    print("Processing...")
    success = sentence[0]
    userSays = " ".join(sentence[1])
    if success:
        confidence = classifier.getProbability(userSays,
                                               classifier.classify(userSays))
        intention = classifier.classify(userSays)
        print(confidence)

        # FEATURES
        if intention in features and confidence > 0.70:  # confidence boundary
            answer = features[intention]()
        # CONVERSATION
        elif confidence > 0.70:
            response = classifier.response(userSays)
            answer = response
        # NO ANSWER
        else:
            answer = "對不起,我聽不太懂"

        if tts.speak(answer):
            play_file()
    else:
        if tts.speak(userSays):
            play_file()

    # Turn back on hotword detector
    listen()
コード例 #11
0
def gen_livestream():
    global last_frame
    while True:
        if app.queue.qsize():
            frame = base64.b64decode(app.queue.get().split('base64')[-1])
            last_frame = frame
        else:
            if last_frame is None:
                fh = open(d + "/static/black.jpg", "rb")
                frame = fh.read()
                fh.close()
            else:
                frame = last_frame
        if last_frame:
            img_np = np.array(Image.open(io.BytesIO(frame)))
            img_np = recognize(img_np)
            frame = cv2.imencode('.jpg',
                                 cv2.cvtColor(img_np,
                                              cv2.COLOR_BGR2RGB))[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
コード例 #12
0
def draw_subtitles(clip):
    """
    Main subtitles drawing function
    Responds for sound division, recognizing and drawing
    :param clip: VideoFile instance containing initial video
    :return: the final version of VideoClip
    """
    # Creates temp directory for storing temporary files
    if not os.path.exists('temp'):
        os.mkdir('temp')
    else:
        cleanup()
    # This program uses Arial font for drawing subtitles, can be used anything else
    font = ImageFont.truetype('fonts/arial.ttf', size=clip.size[1]//30)

    # Gets a sequence of durations after finding close to optimal sound division
    durations = get_durations(clip)

    # Writes audio files for previously specified durations to be then
    # processed via speech_recognition package
    audio_filenames = extract_audio(clip, durations)
    subtitles = recognize(audio_filenames)  # text for corresponding duration
    cleanup()

    clips = []
    start = 0
    # Creates distinct clips for each duration and concatenates them
    for duration, text in zip(durations, subtitles):
        end = start + duration if start + duration < clip.duration else clip.duration

        subclip = clip.subclip(start, end)
        clips.append(add_overlay(subclip, text, font))

        start += duration

    clip = mp.concatenate_videoclips(clips)
    cleanup('text.png')
    if os.path.exists('temp'):
        os.rmdir('temp')
    return clip
コード例 #13
0
def main():
    # Turn off hotword detector
    detector.terminate()

    # Ask
    tts.speak("네! 주인님")
    play_file()

    # Listen
    print("Listening...")
    sentence = recognizer.recognize()

    # Processing
    print("Processing...")
    if sentence[0]:
        # answer = dialogflow.ask(sentence[1])
        confidence = classifier.getProbability(
            sentence[1], classifier.classify(sentence[1]))
        intention = classifier.classify(sentence[1])

        # FEATURES
        if intention in features:
            answer = features[intention]()
        # CONVERSATION
        elif confidence > 0.21:  # confidence boundary
            response = classifier.response(sentence[1])
            answer = response
        # NO ANSWER
        else:
            answer = "잘 모르겠어요"

        if tts.speak(answer):
            play_file()
    else:
        if tts.speak(sentence[1]):
            play_file()

    # Turn back on hotword detector
    listen()
コード例 #14
0
ファイル: rest.py プロジェクト: amatig/jDjango
            __model = getattr(__module, model)

    result = []
    if __model:
        dict_args = {}
        try:
            dict_args = dict(zip(args[:-1:2], args[1::2]))
        except Exception, e:
            print e

        if not request.GET and hasattr(__model, "objects") and hasattr(__model.objects, method):
            try:
                result = getattr(__model.objects, method)(**dict_args)
            except Exception, e:
                print e
        elif request.GET and hasattr(__model, method):
            # se ci sono dati in get vengono usati per prendere
            # una instanza e fare l'operazione su essa
            try:
                filter_args = {}
                for k, v in request.GET.items():
                    filter_args.update({str(k): str(v)})

                obj = __model.objects.get(**filter_args)  # prende instanza
                result = getattr(obj, method)(**dict_args)
            except Exception, e:
                print e

    data = recognize(result, app_label, model, method)
    return HttpResponse(data, mimetype="application/json")
コード例 #15
0
import sys

img_path = ''
if len(sys.argv) > 1:
    img_path = sys.argv[1]
else:
    exit(0)

frame = cv2.imread(img_path)
faces, locs = face_extraction(frame)
for face, loc in zip(faces, locs):
    pred = mask_detect(face)
    (mask, withoutMask) = pred[0]
    (startX, startY, endX, endY) = loc
    if mask < withoutMask:
        id = recognize(face)
        name = ''
        if id == 0:
            name = 'Unknown'
        else:
            name = id
        cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
        cv2.putText(frame, f'Not Safe: {name}', (startX + 1, startY - 2),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
    else:
        cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)
        cv2.putText(frame, 'Safe', (startX + 1, startY - 2),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

cv2.imshow('Name', frame)
key = cv2.waitKey(5000)
コード例 #16
0
def upload():
    imagefile = Image.open(request.files['file'])
    imagefile.save("pic.jpg")
    r.recognize()
    filename = 'result.jpg'
    return send_file(filename, mimetype='image/jpg')
コード例 #17
0
ファイル: yirendai.py プロジェクト: zhangchaolts/Captcha
def sign(username, password):

	# 获取Cookiejar对象(存在本机的cookie消息)
	cj = cookielib.CookieJar()
	#httpHandler = urllib2.HTTPHandler(debuglevel=1)
	#httpsHandler = urllib2.HTTPSHandler(debuglevel=1)
	# 自定义opener,并将opener跟CookieJar对象绑定
	opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj), RedirectHandler)
	# 安装opener,此后调用urlopen()时都会使用安装过的opener对象
	urllib2.install_opener(opener)

	# Step1
	url = 'https://www.yirendai.com/auth/login/home'
	html = urllib2.urlopen(url).read()

	content = urllib2.urlopen('https://p.yixin.com/randomCode?t=').read()
	fw = open('temp.jpg', 'w+')
	fw.write(content)
	fw.close()

	dir_train_pics = '/search/zhangchao/captcha/pics/yirendai/train_pics'
	authcode = '1234'
	authcode = recognizer.recognize('temp.jpg', dir_train_pics)

	print "recognize captcha done!"

	# Step2:登录
	login_url = "https://p.yixin.com/dologin.jhtml"

	login_data = {	"fromSite" : "YRD", \
					"username": username, \
					"password": password, \
					"authcode": authcode, \
					"rememberMe": "0" \
				}

	login_post_data = urllib.urlencode(login_data) 

	login_headers = {	"Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", \
						"Accept-Encoding" : "gzip, deflate", \
						"Accept-Language" : "zh-CN,zh;q=0.8", \
						"Cache-Control" : "max-age=0", \
						"Connection" : "keep-alive", \
						"Content-Length" : "82", \
						"Content-Type" : "application/x-www-form-urlencoded", \
						#"X-Requested-With" : "XMLHttpRequest", \
						"Host" : "p.yixin.com", \
						"HTTPS" : "1", \
						"Origin" : "https://www.yirendai.com", \
						"Referer" : "https://www.yirendai.com/auth/login/home", \
						"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.69 Safari/537.36 QQBrowser/9.0.3100.400" \
					}

	login_request = urllib2.Request(login_url, login_post_data, login_headers)
	#login_request.get_method = lambda: 'HEAD'

	location = ""

	try:
		login_response = opener.open(login_request).read()
	except urllib2.URLError, e:
		location = e.hdrs['Location']
		print location
コード例 #18
0
ファイル: g.py プロジェクト: psyclone20/Agent-GPY
myVar = 0
tres1 = 160
tres_gap = 10

minLineLength = 40
maxLineGap = 5
threshold = 1

while True:
    myVar = myVar + 1
    screen = cv2.cvtColor(
        np.array(grabscreen.grab_screen(region=(0, 30, 800, 540))),
        cv2.COLOR_BGR2RGB)

    startX, startY, endX, endY = recognizer.recognize(screen, args["team"])

    if startX != -1 and startY != -1:
        # for objectCoord in objectArray:
        # print(objectCoord[1], objectCoord[2], objectCoord[3], objectCoord[4])
        # square_in(objectCoord[1], objectCoord[2], objectCoord[3], objectCoord[4], WINDOW_START_X+(WINDOW_WIDTH/2), WINDOW_START_Y+(WINDOW_HEIGHT/2))
        # flickMovementThread = threading.Thread(target=flick_movement, args=[startX, startY, endX, endY])
        # flickMovementThread.start()
        print("Shoot him!!!!!!!!!!")
        flick_movement(startX, startY, endX, endY)
        # flick_movement(objectCoord[1], objectCoord[2], objectCoord[3], objectCoord[4])
        # break
    # print('-------------------------------------------------- ', myVar)

    else:
コード例 #19
0
ファイル: main.py プロジェクト: zhangchaolts/Captcha
	for pic_ptr in xrange(deal_number):

		pic_ptr_str = str('%04d' % pic_ptr)
		image_path = dir_path_base + pic_ptr_str + '.jpg'

		pic = Image.open(image_path)
		pic_preprocessed = preprocessor.preprocess(pic)

		output_path = dir_path_step + str(pic_step1) + '/' + pic_ptr_str + '_' + str(pic_step1) + '.jpg'
		print output_path
		pic_preprocessed.save(output_path)

		block_array = []
		spliter.split(pic_preprocessed, block_array)
		for i in xrange(len(block_array)):
			output_path = dir_path_step + str(pic_step2) + '/' + pic_ptr_str + '_' + str(pic_step2) + '_' + str(i) + '.jpg'
			print output_path
			block_array[i].save(output_path)

	for pic_ptr in xrange(deal_number):

		pic_ptr_str = str('%04d' % pic_ptr)
		image_path = dir_path_base + pic_ptr_str + '.jpg'

		captcha = recognizer.recognize(image_path, dir_path_train)
		if captcha != "":
			pic = Image.open(image_path)
			output_path = dir_path_step + str(pic_step3) + '/' + pic_ptr_str + '_' + str(pic_step3) + '_' + captcha + '.jpg'
			pic.save(output_path)

コード例 #20
0
ファイル: main.py プロジェクト: karamvol/orsocr
def orsocr_core(filename):
    """
    This function will handle the core OCR processing of images.
    """
    return recognize(filename)
コード例 #21
0
def add_labels(image):
    labels = recognizer.recognize(image)
    recognized_image = recognizer.add_labels(image, labels)
    return recognized_image
コード例 #22
0
ファイル: main2.py プロジェクト: gujiafan/IV-C-raspberrypi
        f = open('file_recvd.wav', 'wb')
        while True:
            try:
                data = connect.recv(bufsize)
            except:
                break
            if not data:
                break
            f.write(data)
        f.close()
        print("File received.")
        connect.close()
        recog = 0
        print("Start recognizing...")
        try:
            result = recognizer.recognize("file_recvd.wav")
            recog = 1
            print("Found")
        except:
            print("Not found")

        print("Waiting for connection...")
        connect, addr = s.accept()
        print("Connected from:", addr)

        if recog == 1:
            songName = result
            songName.encode("GB2312")
            connect.send(songName)
            connect.close()
            continue
コード例 #23
0
 def face_detect(self, image, recognizer):
     results = recognizer.recognize(image)
     print('face detected:' + str(results))
     if results and 'obama' in results:
         print('obama detected')