def webcam():
    if request.method == 'GET':
        return render_template('upload.html')
    elif request.method == 'POST':
        image_b64 = request.form['img']
        print(type(image_b64))
        robo_talker.talker(image_b64)

        # 等待返回结果
        time.sleep(1)

        result = ''
        is_fatigue = ''
        mar = ''
        ear = ''

        if robo_talker.f_img != None:
            fat_result_path = robo_talker.f_img
            is_fatigue, mar, ear = fat_result_path.split(',')
            robo_talker.f_img = None

        if robo_talker.drive_state != None:
            result = robo_talker.drive_state
            robo_talker.drive_state = None

        return 'state: {0}, is_fatigue: {1}, mar: {2}, ear: {3}'.format(
            result, is_fatigue, mar, ear)
Exemplo n.º 2
0
def upload():
    print('getting data from web.')
    if request.method == 'POST':
        image_b64 = request.form['img']
        robo_talker.talker(image_b64)
        imgdata = base64.b64decode(image_b64)
        with open('org_img.jpg', 'wb') as f:
            f.write(imgdata)
        imgdata = cv2.imread('org_img.jpg')

        imgdata = cv2.cvtColor(imgdata, cv2.COLOR_BGR2GRAY)
        face_cascade = cv2.CascadeClassifier(
            r'./haarcascade_frontalface_default.xml')
        faces = face_cascade.detectMultiScale(
            imgdata,
            scaleFactor=1.15,
            minNeighbors=5,
            minSize=(5, 5)  # ,
            # flags=cv2.CV_HAAR_SCALE_IMAGE
        )
        for i, (x, y, w, h) in enumerate(faces):
            cv2.rectangle(imgdata, (x, y), (x + w, y + w), (255, 0, 0), 2)
            gray_face = imgdata[y:y + h, x:x + w]
            gray_face_vector = extract_face_features(gray_face)
            gray_face = gray_face_vector.reshape((96, 96))
            cv2.imwrite('./gray_face.jpg', gray_face)

        time.sleep(3)
        if robo_talker.MSG != None:
            result = robo_talker.MSG
            robo_talker.MSG = None
            return "result:" + result
        else:
            return "无法识别"
Exemplo n.º 3
0
def upload():
    print('getting data from web.')
    if request.method == 'POST':
        image_b64 = request.form['img']
        robo_talker.talker(image_b64)
        imgdata = base64.b64decode(image_b64)
        with open('org_img.jpg', 'wb') as f:
            f.write(imgdata)

        time.sleep(3)
        if robo_talker.MSG != None:
            result = robo_talker.MSG
            robo_talker.MSG = None
            return "result:" + result
        else:
            return "无法识别"
Exemplo n.º 4
0
        cv2.imshow('frame',frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        break

# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()

# start waiting result
print("waiting...")
message = String()
message.data = "video detection start"
robo_talker.talker(message)


# then check the result
if robo_talker.MSG != None:
    result = robo_talker.MSG
    robo_talker.MSG = None 
    print("result:" + result) 
else:
    print("无法识别")

'''
videoCapture = cv2.VideoCapture()
videoCapture.open('output.avi')

fps = videoCapture.get(cv2.CAP_PROP_FPS)
Exemplo n.º 5
0
def upload():
    print('getting data from web.')
    if request.method == 'POST':
        image_b64 = request.form['img']
        robo_talker.talker(image_b64)
        imgdata = base64.b64decode(image_b64)
        with open('img/org_img.jpg', 'wb') as f:
            f.write(imgdata)
        # imgdata = cv2.imread('img/org_img.jpg')

        # imgdata = cv2.cvtColor(imgdata, cv2.COLOR_BGR2GRAY)
        # face_cascade = cv2.CascadeClassifier(r'./haarcascade_frontalface_default.xml')
        # faces = face_cascade.detectMultiScale(
        #                                     imgdata,
        #                                     scaleFactor=1.15,
        #                                     minNeighbors=5,
        #                                     minSize=(5, 5)  # ,
        #                                             # flags=cv2.CV_HAAR_SCALE_IMAGE
        #                                                 )
        # for i, (x, y, w, h) in enumerate(faces):
        #     cv2.rectangle(imgdata, (x, y), (x + w, y + w), (255, 0, 0), 2)
        #     gray_face = imgdata[y:y+h, x:x+w]
        #     gray_face_vector = extract_face_features(gray_face)
        #     gray_face = gray_face_vector.reshape((96, 96))
        #     cv2.imwrite('./gray_face.jpg', gray_face)

        global db_conn
        time.sleep(5)
        if robo_talker.MSG != None:
            result = robo_talker.MSG
            robo_talker.MSG = None

            #DB_Handler
            sql = "select * from Cars where licence = %s"
            car = None
            with db_conn.cursor() as cursor:
                cursor.execute(sql, (result))
                car = cursor.fetchone()
                db_conn.commit()
            print(car)
            if (car == None):
                cur_time = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')
                print(cur_time)
                sql = "insert into Cars (licence, time) values(%s, %s);"
                with db_conn.cursor() as cursor:
                    cursor.execute(sql, (result, cur_time))
                    db_conn.commit()
                return "Welcome~ " + result + "\nEnter at " + cur_time
            else:
                park_time = datetime.datetime.strptime(car['time'],
                                                       '%Y-%m-%d %H:%M:%S')
                cur_time = datetime.datetime.now()
                sql = "delete from Cars where licence = %s"
                with db_conn.cursor() as cursor:
                    cursor.execute(sql, (result))
                    db_conn.commit()
                return result + " has parked for " + str(
                    round((cur_time - park_time).seconds / 60, 2)) + " minutes"
        else:
            return "Can't recognize"