class GazeExtractor:
    def __init__(self):
        self.cv_bridge = CvBridge()
        self.gaze = GazeTracking()
        self.publish_annotated_frame = rospy.get_param(
            "~publish_annotated_frame", True)
        if self.publish_annotated_frame:
            self.annotated_frame_publisher = rospy.Publisher(
                'image_annotated_raw', Image, queue_size=10)
        self.gaze_publisher = rospy.Publisher('gaze_state',
                                              GazeState,
                                              queue_size=10)

    def extract_from_image(self, img_msg):

        #convert image to opencv type
        try:
            cv_image = self.cv_bridge.imgmsg_to_cv2(img_msg, "bgr8")
        except CvBridgeError as e:
            print(e)
            return

        #run gaze detection
        self.gaze.refresh(cv_image)

        #if desired, publish annotated frame
        if self.publish_annotated_frame:
            annotated_image_msg = self.cv_bridge.cv2_to_imgmsg(
                self.gaze.annotated_frame(), "bgr8")
            self.annotated_frame_publisher.publish(annotated_image_msg)

        #if no pupils detected, stop here
        if not self.gaze.pupils_located:
            return

        #pack gaze tracking result into a GazeState message and publish
        result_msg = GazeState()
        result_msg.header = img_msg.header
        result_msg.is_left = self.gaze.is_left()
        result_msg.is_right = self.gaze.is_right()
        result_msg.is_center = self.gaze.is_center()
        result_msg.is_blinking = self.gaze.is_blinking()
        result_msg.pupil_left_coords.x = self.gaze.pupil_left_coords()[0]
        result_msg.pupil_left_coords.y = self.gaze.pupil_left_coords()[1]
        result_msg.pupil_right_coords.x = self.gaze.pupil_right_coords()[0]
        result_msg.pupil_right_coords.y = self.gaze.pupil_right_coords()[1]
        result_msg.horizontal_ratio = self.gaze.horizontal_ratio()
        result_msg.vertical_ratio = self.gaze.vertical_ratio()

        self.gaze_publisher.publish(result_msg)
Example #2
0
                if (x1 >= 10):

                    theframe = frame[int(bbox_head[1]):int(bbox_head[3]),
                                     int(bbox_head[0]):int(bbox_head[2])]

                    gaze.refresh(theframe)
                    theframe = gaze.annotated_frame()
                    text = ""

                    if gaze.is_blinking():
                        text = "Blinking"
                        cv2.putText(image, text, (x1, y2 + dy + 45),
                                    cv2.FONT_HERSHEY_DUPLEX, 0.5,
                                    (147, 58, 31), 2)

                    h_ratio = ratio = gaze.horizontal_ratio()
                    v_ratio = ratio = gaze.vertical_ratio()

                    if h_ratio is None: continue
                    mygaze = (h_ratio, v_ratio)
                    mygaze = eyes = np.asarray(mygaze).astype(float)

                    bbox, eyes = identity_next[0]
                    eyes = np.asarray(eyes).astype(float)
                    eyes[0], eyes[1] = eyes[0] / float(
                        frame.shape[1]), eyes[1] / float(frame.shape[0])

                    pt1 = (int(eyes[0] * WIDTH), int(eyes[1] * HEIGHT))
                    arrow_size = 200
                    pt2 = (int(eyes[0] * WIDTH + arrow_size *
                               (mygaze[0] - 0.7)),
Example #3
0
def main():
    m = get_monitors()[0]
    h, w = m.height, m.width
    h = ((h - (h % 3)) / 3) - 45  #adjust for different screen size
    w = ((w - (w % 3)) / 3) - 35

    sg.theme('DarkGrey3')
    layout = [[
        sg.Frame('', [[
            sg.Image(
                'images/test.png', size=(w, h), key='img' + str(fr * 3 + fc))
        ]],
                 pad=(5, 5),
                 background_color='yellow',
                 key='frm' + str(fr * 3 + fc)) for fc in range(3)
    ] for fr in range(3)]
    layout.extend([[sg.Button('Exit', size=(10, 1), font='Helvetica 14')]])
    #update image with update(filename='')
    #enable events for images so theyre clickable
    window = sg.Window('GazePOD', layout, finalize=True, resizable=True)
    #window.Maximize()

    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)
    calibrate(gaze, webcam, window)

    h = []
    v = []
    d = []
    i = 0
    while True:
        #get event or timeout
        e, val = window.read(timeout=TO)
        if e == 'Exit' or e == sg.WIN_CLOSED:
            break
        elif e == 'Calibrate':
            calibrate(gaze, webcam, window)
            continue

        #get frame and gaze
        _, frame = webcam.read()
        gaze.refresh(frame)
        sh = gaze.horizontal_ratio()
        sv = gaze.vertical_ratio()

        #add to queue
        if sh is not None:
            h.append(sh)
            if len(h) > QLEN:
                h.pop(0)
        else:
            continue
        if sv is not None:
            v.append(sv)
            if len(v) > QLEN:
                v.pop(0)
        else:
            continue

        #get average gaze direction
        if i == QLEN:
            i = -QLEN
            s = get_dir(mean(h), mean(v))
            key = 'img' + str(s)
            window[key].update(visible=False)
            d.append(s)
            if len(d) > DLEN:
                d.pop(0)
        else:
            i += 1
            continue

        #select image
        if d.count(d[0]) == len(d):
            #select direction
            key = 'img' + str(d[0])
            window[key].update(visible=True)
        else:
            #set background color of d[0]
            pass

    window.close()
Example #4
0
    else:
      smooth_attentiveness = np.round((1.0 - beta) * attentiveness + beta * smooth_attentiveness,1)
      average_attentivenss = np.round((1./(frame_count))*attentiveness + (1. - 1./(frame_count))*average_attentivenss,1)



    update_dt = datetime.now()

    print("attentiveness is {}, smooth attentiveness is {} and average attentiveness is {}"\
          .format(attentiveness, smooth_attentiveness, average_attentivenss))

    left_pupil = gaze.pupil_left_coords()
    right_pupil = gaze.pupil_right_coords()
    if gaze.pupils_located:
    		v_ratio = np.round(gaze.vertical_ratio(),2)
    		h_ratio = np.round(gaze.horizontal_ratio(),2)
    else:
    	v_ratio, h_ratio = 0,0


    image_landmarks, lip_distance = mouth_open(frame, landmarks)
    cv2.imshow('Live Landmarks', image_landmarks)


    cv2.putText(frame, "Attentiveness: " + str(attentiveness), (15, 95), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
    # cv2.putText(frame, "Left pupil:  " + str(left_pupil), (30, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    # cv2.putText(frame, "Right pupil: " + str(right_pupil), (30, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv2.putText(frame, "Gaze direction: " + str(gaze_text), (15, 130), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
    cv2.putText(frame, "Head orientation: " + str(direction), (15, 165), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
    #cv2.putText(frame, "v & h ratios: " + str(v_ratio) + " "+ str(h_ratio), (30, 200), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
    #cv2.putText(frame, "Sleep counter: " + str(sleep_frames_counter), (15, 200), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
Example #5
0
def gaze():

    gaze = GazeTracking()
    # webcam = cv2.VideoCapture(0)
    distraction_point = 0
    tm = time.localtime()
    photo_block = []

    while True:
        # We get a new frame from the webcam
        # _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        # gaze.refresh(frame)

        # frame = gaze.annotated_frame()
        text = ""

        if gaze.is_blinking():
            text = "Blinking"
            # print("blinking")

        if gaze.is_right():
            text = "Looking right"
            # print("right")

        elif gaze.is_left():
            text = "Looking left"
            # print("left")

        elif gaze.is_center():
            text = "Looking center"
            # print("center")

        ## 수 정 시 작

        hori_ratio = gaze.horizontal_ratio()
        verti_ratio = gaze.vertical_ratio()

        try:

            if curr_location == [0, 0]:
                curr_location = [hori_ratio, verti_ratio]
                print(curr_location)
            else:
                prev_location = curr_location
                curr_location = [hori_ratio, verti_ratio]
                hori_diff = curr_location[0] - prev_location[0]
                verti_diff = curr_location[1] - prev_location[1]

                if prev_second2 == -1:
                    prev_second2 = tm.tm_sec
                    print(prev_second2)
                else:
                    curr_second2 = tm.tm_sec
                    if curr_second2 - prev_second2 == 1 or curr_second2 - prev_second2 < 0:
                        distance.append((hori_diff**2) + (verti_diff**2))
                        prev_second2 = curr_second2

                        if len(photo_block) < 3:
                            photo_block.append((hori_diff**2))

                # len(distance), sum(distance)임의 값 설정
                if len(distance) > 59:
                    if sum(distance) > 1:
                        print('주의 산만')
                        distraction_point += 1
                        distance = distance[1:]

        except:
            curr_location = [0.5, 0.5]
from gaze_tracking import GazeTracking

gaze = GazeTracking()
webcam = cv2.VideoCapture(0)

while True:
    # We get a new frame from the webcam
    _, frame = webcam.read()

    # We send this frame to GazeTracking to analyze it
    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ""
    print("Horizontal is")
    print(gaze.horizontal_ratio())
    print("Vertical is")
    print(gaze.vertical_ratio())
    '''if gaze.is_right() and gaze.is_up():
        text = "Looking right and up"
    elif gaze.is_left() and gaze.is_up():
        text = "Looking left and up"
    elif gaze.is_left() and gaze.is_down():
        text = "Looking right and down"
    elif gaze.is_left() and gaze.is_down():
        text = "Looking left and down"'''
    if gaze.is_right():
        text = "Looking right"
    elif gaze.is_left():
        text = "Looking left"
    elif gaze.is_up():
Example #7
0
File: test.py Project: rafitj/gaze
while True:
    _, frame = webcam.read()
    
    gaze.refresh(frame)
    frame = face_detector(frame)
    
    frame = gaze.annotated_frame()
    overlay = frame.copy()
    
    frame_count += 1
    frame_refresh = True if (frame_count % 10 is 0) else False
    

    
    if (gaze.horizontal_ratio()!= None and gaze.vertical_ratio() != None):
        temp_x = int(1280*(1-gaze.horizontal_ratio()))
        temp_y = int(720*gaze.vertical_ratio())
        x_pts.append(temp_x)
        y_pts.append(temp_y)
        if (len(x_pts)>10 or len(y_pts)>10):
            del x_pts[0]
            del y_pts[0]
    
    avg_x = int(statistics.mean(x_pts)) if x_pts else None
    avg_y = int(statistics.mean(y_pts)) if y_pts else None

    if frame_refresh:
        if (avg_x != None and avg_y != None):
            hor_pts.append(avg_x)
            vert_pts.append(avg_y)
Example #8
0
    # We send this frame to GazeTracking to analyze it
    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ""

    if gaze.is_blinking():
        text = "Blinking"
    elif gaze.is_right():
        text = "Looking right"
    elif gaze.is_left():
        text = "Looking left"
    elif gaze.is_center():
        text = "Looking center"

    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                (147, 58, 31), 2)

    left_pupil = gaze.horizontal_ratio()
    right_pupil = gaze.vertical_ratio()
    cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

    cv2.imshow("Demo", frame)

    if cv2.waitKey(1) == 27:
        break
Example #9
0
def calculate_cog_load():
    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)

    pupil_position = Pupil_position('center', 0)

    t = dt.datetime.now()
    start_time = dt.datetime.now()
    blink_count = 0
    saccades = 0
    pupil_dilation_x = []
    pupil_dilation_y = []
    fixations = [0]
    minute = 0
    blink_rate = 0
    saccades_rate = 0
    pup_dil_x = 0
    pup_dil_y = 0
    fixation_avg = 0
    cogload = 0

    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()

        # We send this frame to GazeTracking to analyze it
        gaze.refresh(frame)

        response = requests.get(
            "https://api.fitbit.com/1/user/7QCRW3/activities/heart/date/today/today.json",
            headers=header).json()

        frame = gaze.annotated_frame()
        text = ""

        pupil_dilation_x.append(gaze.horizontal_ratio())
        pupil_dilation_y.append(gaze.vertical_ratio())

        horizontal_ratio = gaze.horizontal_ratio()
        vertical_ratio = gaze.vertical_ratio()

        if horizontal_ratio is not None:
            pupil_dilation_x.append(horizontal_ratio)

        if vertical_ratio is not None:
            pupil_dilation_y.append(vertical_ratio)

        if gaze.is_blinking():
            text = "Blinking"
            blink_count = blink_count + 1

        elif gaze.is_right():
            delta = dt.datetime.now() - t
            position = Pupil_position('right', delta.seconds)
            if position.position != pupil_position.position:
                diff = delta.seconds - pupil_position.time
                fixations.append(diff)
                pupil_position = position
                saccades = saccades + 1

            text = "Looking right"

        elif gaze.is_left():
            delta = dt.datetime.now() - t
            position = Pupil_position('left', delta.seconds)
            if position.position != pupil_position.position:
                diff = delta.seconds - pupil_position.time
                fixations.append(diff)
                pupil_position = position
                saccades = saccades + 1

            text = "Looking left"

        elif gaze.is_center():
            delta = dt.datetime.now() - t
            position = Pupil_position('center', delta.seconds)
            if position.position != pupil_position.position:
                diff = delta.seconds - pupil_position.time
                fixations.append(diff)
                pupil_position = position
                saccades = saccades + 1

            text = "Looking center"

        cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,
                    (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()

        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Blink Rate: " + str(blink_rate), (90, 195),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Saccades Rate: " + str(saccades_rate), (90, 225),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Pupil dilation x: " + str(pup_dil_x), (90, 255),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Pupil dilation y: " + str(pup_dil_y), (90, 285),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Fixation: " + str(fixation_avg), (90, 315),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.putText(frame, "Cognitive Load: " + str(cogload), (90, 345),
                    cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        delta = dt.datetime.now() - t

        elapsed_time = dt.datetime.now() - start_time
        elapsed_time_second = elapsed_time.seconds

        cv2.putText(frame, "Elapsed Time: " + str(elapsed_time_second),
                    (90, 375), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        cv2.imshow("Demo", frame)

        if delta.seconds >= 10:
            minute = minute + 1
            blink_rate = blink_count / 10
            saccades_rate = saccades / 10

            Not_none_values_x = filter(None.__ne__, pupil_dilation_x)
            Not_none_values_y = filter(None.__ne__, pupil_dilation_y)

            pupil_dilation_x = list(Not_none_values_x)
            pupil_dilation_y = list(Not_none_values_y)

            pup_dil_x = sum(pupil_dilation_x) / len(pupil_dilation_x)
            pup_dil_y = sum(pupil_dilation_y) / len(pupil_dilation_y)

            fixation_avg = sum(fixations) / len(fixations)

            blink_count = 0
            saccades = 0

            pupil_position = Pupil_position('center', 0)

            t = dt.datetime.now()

            pupil_dilation_x = []
            pupil_dilation_y = []
            fixations = [0]

            print(
                response['activities-heart-intraday']['dataset'][-1]['value'])

            cogload = blink_rate   \
                + math.sqrt(pup_dil_x * pup_dil_x + pup_dil_y * pup_dil_y) \
                + saccades_rate \
                - fixation_avg

            print(blink_rate)
            print(pup_dil_x)
            print(pup_dil_y)
            print(saccades_rate)
            print(fixation_avg)
            print(cogload)
            write_csv('data.csv', minute, blink_rate, pup_dil_x, pup_dil_y,
                      fixation_avg, saccades_rate, cogload)

        if cv2.waitKey(33) == 27:
            break
        time.sleep(0.25)
def getAttention(temp_photo):
    beta = 0.9
    frame_count = 0
    blink_counter = 0
    sleep_frames_counter = 0
    blink_th = 3

    gaze = GazeTracking()
    yawns = 0
    yawn_status = False

    smooth_attentiveness = -1

    # 对图片进行处理
    frame = temp_photo

    # 检测人脸
    face = detector(frame)
    if len(face)==0 or face is None:
        return "no face"

    # We send this frame to GazeTracking to analyze it
    gaze.refresh(frame)

    landmarks = np.matrix([[p.x, p.y] for p in gaze.landmarks.parts()])

    # print(np.matrix([[p.x, p.y] for p in landmarks.parts()]))

    frame = gaze.annotated_frame()
    text = ""
    attentiveness = 100
    if gaze.is_blinking():
        blink_counter += 1
    elif is_right(gaze) or is_left(gaze):  # or is_up(gaze) or is_down(gaze):    不看中间专注度基数就为0
        attentiveness = 0
        blink_counter = np.maximum(blink_counter - 1, 0)
    elif is_center(gaze):
        attentiveness = 100
        blink_counter = np.maximum(blink_counter - 1, 0)

    # if blink_counter >= blink_th:
    #     attentiveness = 0
    # 瞳孔定位判断,用于显示
    if gaze.pupils_located:
        if gaze.is_blinking():
            gaze_text = "blinking"
        elif is_right(gaze):
            gaze_text = "looking right"
        elif is_left(gaze):
            gaze_text = "looking left"
        elif is_center(gaze):
            gaze_text = "looking center"
        elif is_down(gaze):
            gaze_text = "looking down"
        elif is_up(gaze):
            gaze_text = "looking up"
    else:
        gaze_text = "unsure"

    # 面部朝向判断
    direction = check_face_direction(landmarks)
    if direction == 'Center':
        attentiveness = 90
    else:
        print("人脸朝向")
        print(direction)
        attentiveness = 0.5 * attentiveness  # 0.5 * attentiveness
        print("降低后的专注度")
        print(attentiveness)
        print("瞳孔方向")
        print(gaze_text)


    sleep_chance = check_for_sleep(landmarks)
    if sleep_chance:
        if direction == 'Down':
            sleep_frames_counter += 1
        else:
            sleep_frames_counter = np.maximum(sleep_frames_counter - 1, 0)
    # 如果超过60次检测疲惫并且注意力变为为0,就设置为睡觉,这里要更改
    if sleep_frames_counter > 60:
        attentiveness = 0
        cv2.putText(frame, "sleep detected", (30, 300), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)

    # calculate smooth attentiveness score
    if smooth_attentiveness == -1:
        smooth_attentiveness = attentiveness
        average_attentivenss = attentiveness
    else:
        smooth_attentiveness = np.round((1.0 - beta) * attentiveness + beta * smooth_attentiveness, 1)
        #average_attentivenss = np.round(
        #    (1. / (frame_count)) * attentiveness + (1. - 1. / (frame_count)) * average_attentivenss, 1)

    update_dt = datetime.now()

    #print("attentiveness is {}, smooth attentiveness is {} and average attentiveness is {}" \
     #     .format(attentiveness, smooth_attentiveness, average_attentivenss))
    print(attentiveness)
    left_pupil = gaze.pupil_left_coords()
    right_pupil = gaze.pupil_right_coords()
    if gaze.pupils_located:
        v_ratio = np.round(gaze.vertical_ratio(), 2)
        h_ratio = np.round(gaze.horizontal_ratio(), 2)
    else:
        v_ratio, h_ratio = 0, 0

    image_landmarks, lip_distance = mouth_open(frame, landmarks)
    #cv2.imshow('Live Landmarks', image_landmarks)

    cv2.putText(frame, "Attentiveness: " + str(attentiveness), (15, 95), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
    # cv2.putText(frame, "Left pupil:  " + str(left_pupil), (30, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    # cv2.putText(frame, "Right pupil: " + str(right_pupil), (30, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv2.putText(frame, "Gaze direction: " + str(gaze_text), (15, 130), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
    cv2.putText(frame, "Head orientation: " + str(direction), (15, 165), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
    # cv2.putText(frame, "v & h ratios: " + str(v_ratio) + " "+ str(h_ratio), (30, 200), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
    # cv2.putText(frame, "Sleep counter: " + str(sleep_frames_counter), (15, 200), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)
    cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 235), 2)

    prev_yawn_status = yawn_status

    if lip_distance > 20:
        yawn_status = True
        # 可以增加一个时间控制判断
        cv2.putText(frame, "Subject is Yawning", (50, 450),
                    cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)

        # output_text = " Yawn Count: " + str(yawns + 1)
        # cv2.putText(frame, output_text, (50,50), cv2.FONT_HERSHEY_COMPLEX, 1,(0,255,127),2)

    else:
        yawn_status = False

    if prev_yawn_status == True and yawn_status == False:
        yawns += 1

    overlay = frame.copy()

    if smooth_attentiveness < 30:
        text_tpl = '   Low - {:04.1f}%'
        bkg_color = (0, 0, 255)  # Red
    elif smooth_attentiveness < 60:
        text_tpl = 'Medium - {:04.1f}%'
        bkg_color = (0, 255, 255)  # Yellow
    else:
        text_tpl = '  High - {:04.1f}%'
        bkg_color = (0, 255, 0)  # Green

    # noise = frame_count % 10 / 10.
    # print(noise)
    # if smooth_attentiveness + noise >= 100:
    #     noise -= 1
    # text = text_tpl.format(smooth_attentiveness + noise)

    # text += ' - {:04.1f}%'.format(attentiveness)

    #cv2.rectangle(overlay, rect_org, rect_end, bkg_color, -1);
    # cv2.putText(overlay,text,
    #   org=text_org,
    #   fontFace=font,
    #   fontScale=fontScale,
    #   color=(0,0,0),
    #   thickness=thickness,
    #   lineType=cv2.LINE_AA)

    #cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
    #out.write(frame)

    # cv2.imshow('Yawn Detection', frame )

    #cv2.imshow("Demo", frame)
    #cv2.waitKey(0)
    #return attentiveness
    #print(str(direction))
    #print(yawn_status)
    #print(sleep_chance)
    result = str(attentiveness)+"\n"+str(direction)+"\n"+str(yawn_status)+"\n"+str(sleep_chance)
    print("ddd"+result)
    return result
Example #11
0
xArr = np.ones(N)
yArr = np.ones(N)
pts = deque(maxlen=64)
i = 0
while True:
    # We get a new frame from the webcam
    _, frame = webcam.read()

    frame = cv2.flip(frame, 1)

    # We send this frame to GazeTracking to analyze it
    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ''
    if type(gaze.horizontal_ratio()) is float:
        if (calib):
            now = time.time() - start
            if now < 3:
                text = 'Look  Top Right'
                xRight.append(gaze.horizontal_ratio())
                yTop.append(gaze.vertical_ratio())
                frame = cv2.circle(frame, (frame.shape[1], 0), 20, (0, 0, 255),
                                   -1)
            elif now >= 3 and now < 6:
                text = 'Look Top Left'
                xLeft.append(gaze.horizontal_ratio())
                yTop.append(gaze.vertical_ratio())
                frame = cv2.circle(frame, (0, 0), 20, (0, 0, 255), -1)
            elif now >= 6 and now < 9:
                text = 'Look Bottom Left'
Example #12
0
        if ld > 15 and ld < 30:
            example2 = "香蕉"
            img = addText(img, 890, 220, example2, 40, (147, 58, 31), 2)
    if gaze.is_right():
        ru += 1
        ld, lu, rd = 0, 0, 0
        if ru > 15 and ru < 30:
            example3 = "杨梅"
            img = addText(img, 1800, 220, example3, 40, (147, 58, 31), 2)

    cv.putText(frame, text, (90, 60), cv.FONT_HERSHEY_DUPLEX, 1.6,
               (147, 58, 31), 2)

    left_pupil = gaze.pupil_left_coords()
    right_pupil = gaze.pupil_right_coords()
    hr = gaze.horizontal_ratio()
    vr = gaze.vertical_ratio()
    cv.putText(frame, "Left pupil:  " + str(left_pupil), (90, 130),
               cv.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv.putText(frame, "Right pupil: " + str(right_pupil), (90, 165),
               cv.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv.putText(frame, "Horizontal ratio: " + str(hr), (90, 200),
               cv.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv.putText(frame, "Vertical ratio: " + str(vr), (90, 235),
               cv.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv.putText(frame, "Direction " + gaze.getDirection(), (90, 300),
               cv.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
    cv.putText(img, "apple", (90, 220), cv.FONT_HERSHEY_DUPLEX, 0.9,
               (147, 58, 31), 1)
    cv.putText(img, "banana", (890, 220), cv.FONT_HERSHEY_DUPLEX, 0.9,
               (147, 58, 31), 1)
Example #13
0
class Callibrate(object):
    def __init__(self):
        self.gaze = GazeTracking()
        self.webcam = cv2.VideoCapture(0)
        self.hori = []
        self.verti = []
        self.circle = []
        self.i = 0
        self.window1 = Tk()
        pass

    def close_camera(self):
        self.webcam.release()

    def get_screen_size(self):
        self.hori = list(filter(None, self.hori))
        self.verti = list(filter(None, self.verti))
        return [min(self.hori), min(self.verti), max(self.hori), max(self.verti)]


    def calibrate(self):
        self.window1.attributes('-fullscreen', True)
        self.window1.update_idletasks()
        self.w = self.window1.winfo_screenwidth()
        self.h = self.window1.winfo_screenheight()
        self.coords = [(10, 10, 30, 30),
            (self.w-10, self.h-10, self.w-30, self.h-30),
            (self.w/2+10, 10, self.w/2-10, 30),
            (self.w/2+10, self.h-10, self.w/2-10, self.h-30),
            (10, self.h-10, 30, self.h-30),
            (self.w-10, 10, self.w-30, 30),
            (10, self.h/2-10, 30, self.h/2+10),
            (self.w-10, self.h/2-10, self.w-30, self.h/2+10)]
        self.canvas = Canvas(self.window1, bg='black', width=self.w, height=self.h)
        self.canvas.pack()
        self.display = self.canvas.create_text(self.w/2, self.h/2, fill="white", text="Press enter to start. Look at the white ball.")
        self._initialise_balls()
        self.window1.bind("<Escape>", lambda e: e.widget.quit())
        self.window1.bind("<Return>", self._callabirate)
        self.window1.mainloop()

    def _initialise_balls(self):
        for i in range(len(self.coords)):
            self.circle.append(self.canvas.create_oval(0, 0, 0, 0, outline='white', fill='white'))

    def _callabirate(self, event = None):
        self.canvas.coords(self.circle[self.i], self.coords[self.i])
        _, frame = self.webcam.read()
        self.window1.after(2000)
        self.gaze.refresh(frame)
        frame = self.gaze.annotated_frame()
        self.hori.append(self.gaze.horizontal_ratio())
        self.verti.append(self.gaze.vertical_ratio())
        self.window1.after(3000, self._success)

    def _deleteBall(self):
        self.canvas.move(self.circle[self.i], -10000, -10000)

    def _success(self):
        self.canvas.itemconfigure(self.display, text="Amazing! Press enter for the next ball please :)")
        self._deleteBall()
        if self.i == len(self.coords) - 1:
            self.canvas.itemconfigure(self.display, text="Finished! We will now start the application.")
            self.window1.after(1000, lambda: self.window1.destroy())
        self.i = self.i + 1
Example #14
0

Thread_run()

while True:
    _, frame = webcam.read()
    new_frame = np.zeros((500, 500, 3), np.uint8)

    gaze.refresh(frame)
    frame = gaze.annotated_frame()

    text = ""

    if test_count < 50:
        cv2.circle(frame, (25, 25), 25, (0, 0, 255), -1)
        if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
            total_left_hor_gaze += gaze.horizontal_ratio()
            total_top_ver_gaze += gaze.vertical_ratio()
            test_count += 1

    elif 50 <= test_count < 100:
        cv2.circle(frame, (610, 25), 25, (0, 0, 255), -1)
        if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
            total_right_hor_gaze += gaze.horizontal_ratio()
            total_top_ver_gaze += gaze.vertical_ratio()
            test_count += 1

    elif 100 <= test_count < 150:
        cv2.circle(frame, (25, 450), 25, (0, 0, 255), -1)
        if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
            total_left_hor_gaze += gaze.horizontal_ratio()