示例#1
0
文件: server.py 项目: webVJ/models
def main(input_img):
  pil_image = stringToImage(input_img[input_img.find(",")+1:])
  bgr_image = np.array(pil_image)
  img, faces, face_features = features.extract_image_features(bgr_image)
  estimated_gazes = gaze.test_faces(img, faces, face_features)

  return lib.to_output_json(faces, face_features, estimated_gazes)
示例#2
0
def main(input_img):
    pil_image = stringToImage(input_img[input_img.find(",") + 1:])
    bgr_image = np.array(pil_image)
    img, faces, face_features = features.extract_image_features(bgr_image)
    estimated_gazes = gaze.test_faces(img, faces, face_features)

    results = []
    for gaze_detected in estimated_gazes:
        results.append(gaze_detected.tolist())

    parsed_results = {"estimated_gazes": results}

    return parsed_results
    def __init__(self, src=0):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.webcam_stream = WebcamVideoStream(src).start()

        frame, frame_time = self.webcam_stream.read()

        self.frame_time = frame_time
        if frame is not None:
            (self.img, self.faces,
             self.face_features) = extract_image_features(frame)

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False
    def update(self):
        # keep looping infinitely until the thread is stopped
        while True:
            # if the thread indicator variable is set, stop the thread
            #  print('updating')
            if self.stopped:
                #  print('returning')
                return

            # otherwise, read the next frame from the stream
            frame, frame_time = self.webcam_stream.read()
            self.frame_time = frame_time

            if frame is not None:
                (self.img, self.faces,
                 self.face_features) = extract_image_features(frame)
示例#5
0
    def __next__(self):

        self.frameID += 1
        #print(self.frameID)
        outputs = []
        ret, frame = self.cap.read()
        img, dfaces, dface_features = extract_image_features(frame)
        if self.frameID % self.multiplier == 0:
            outputs = show_gaze(frame)
            print(self.frameID)
            gaze = detect_gaze(frame, outputs, 1280, 720, 12, 720)
            if self.g != gaze:
                self.timer = 0
                print('last for: %i seconds' % self.timer)
            else:
                self.timer += 1
                print('last for: %i seconds' % self.timer)
            self.g = gaze

        return frame, self.g, dfaces, dface_features
示例#6
0
def show_gaze(img):
    img, faces, face_features = extract_image_features(img)
    return test_imgs(img, faces, face_features)
示例#7
0
def extract_features_and_detect_gazes(img):
    img, faces, face_features = extract_image_features(img)
    return test_faces(img, faces, face_features)
示例#8
0
from WebcamVideoStream import WebcamVideoStream
from features import extract_image_features, draw_detected_features
from lib import current_time
import numpy as np

import cv2

cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)

try:
    for i in range(1, 100):
        _, img = cap.read()
        #  print(img.shape, faces, face_features)
        if img is not None:
            print('writing frame', i)
            cv2.imwrite('output/frame-' + str(i) + '.png', img)
            img, faces, face_features = extract_image_features(img)
            images_with_features = np.copy(img)
            draw_detected_features(images_with_features, faces, face_features)
            cv2.imwrite('output/frame-' + str(i) + '-features.png',
                        images_with_features)

            # do whatever you need to do with the data

finally:
    cap.release()
示例#9
0
def ex(img):
    img, faces, face_features = extract_image_features(img)
    return test_imgs(img, faces, face_features)