def main():
    # Configure LSL streams
    lsl_streams = StreamsObj()

    # Create Tobii glasses Controller
    tobiiglasses = TobiiGlassesController("192.168.71.50")

    # Start Streaming
    tobiiglasses.start_streaming()

    print("Please wait ...")
    time.sleep(1.0)

    input("Press any key to start streaming")

    old_time = time.time()
    try:
        while True:

            data = tobiiglasses.get_data()

            if time.time() - old_time > 0.020:  # Send data every 20ms/50Hz
                lsl_streams.sendData('mems', data['mems'])
                lsl_streams.sendData('left_eye', data['left_eye'])
                lsl_streams.sendData('right_eye', data['right_eye'])
                lsl_streams.sendData('gp', data['gp'])
                lsl_streams.sendData('gp3', data['gp3'])

                old_time = time.time()

    except Exception:
        trace = traceback.format_exc()
        print(trace)

    finally:
        tobiiglasses.stop_streaming()
        tobiiglasses.close()
예제 #2
0
def main():
    """
	How to connect with the tobii glasses

	0. Automatic discovery of the device

	TobiiGlassesController()

	1. If you know the IPv6 addr of the tobii glasses

	TobiiGlassesController("fe80::76fe:48ff:ff00:hell")

	2. If you know the IPv6 addr of the tobii glasses and the net interface
	   of your host system (in case of multiple interfaces)

	TobiiGlassesController("fe80::76fe:48ff:ff00:ff00%eth0")

	3. If you know the IPv4 addr of the tobii glasses (WLAN or LAN connections)

	TobiiGlassesController("192.168.71.50")
	"""

    TobiiGlassesController()
예제 #3
0
def main():

	tobiiglasses = TobiiGlassesController()

	tobiiglasses.start_streaming()

	raw_input("Press a key to start streaming (1000 samples will be shown)")

	for i in range(1000):
		print "Head unit: %s" % tobiiglasses.get_data()['mems']
		print "Left Eye: %s " % tobiiglasses.get_data()['left_eye']
		print "Right Eye: %s " % tobiiglasses.get_data()['right_eye']
		print "Gaze Position: %s " % tobiiglasses.get_data()['gp']
		print "Gaze Position 3D: %s " % tobiiglasses.get_data()['gp3']

	tobiiglasses.stop_streaming()
예제 #4
0
def demo(opt):
    def find_ellipses_parallel(img):
        gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        retval, gray_frame = cv2.threshold(gray_frame, 170, 235, 0)
        contours, hier = cv2.findContours(gray_frame, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)
        elps = []
        for cnt in contours:
            try:
                elp = cv2.fitEllipse(cnt)
                elps.append(elp)
            except:
                pass
        return elps

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.debug = max(opt.debug, 1)
    Detector = detector_factory[opt.task]
    detector = Detector(opt)
    detector.pause = False
    wait_signal = False

    # Wait for the UDP message to start
    wait_start_signal = wait_signal
    if wait_start_signal:
        import socket
        from socket import gethostbyname
        UDP_PORT_NO = 9013
        UDP_IP_ADDRESS = gethostbyname("0.0.0.0")
        serverSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        serverSock.bind((UDP_IP_ADDRESS, UDP_PORT_NO))
        while True:
            print("Waiting for the start message...")
            data, addr = serverSock.recvfrom(1024)

            print("Message: " + data.decode())
            if data.decode().startswith("START:"):
                _, game_id, session = data.decode().split(':')
                game_id = int(game_id)
                #session = int (session)
                print("Received game id %d." % (game_id))
                break

    # Decide the camera source
    if opt.demo == 'tobii':
        ipv4_address = "192.168.0.109"
        cap = cv2.VideoCapture("rtsp://%s:8554/live/scene" % ipv4_address)
        tobiiglasses = TobiiGlassesController(ipv4_address, video_scene=True)
        video_freq = tobiiglasses.get_video_freq()
        frame_duration = 1000.0 / float(video_freq)
        tobiiglasses.start_streaming()
    elif opt.demo == 'webcam' or \
      opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
        cap = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    # Set up the threading mechanism for factobiiglasses.start_streaming() recognition and stopp signal receiving.
    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as pool:
        # Wait for the UDP message to end
        wait_stopp_signal = wait_signal
        if wait_start_signal and wait_stopp_signal:

            def wait_stopp():
                while True:
                    print("Waiting for the end message...")
                    data, addr = serverSock.recvfrom(1024)

                    print("Message: " + data.decode())
                    if data.decode().startswith("STOPP:"):
                        break
                return

            udp_end_received = pool.submit(wait_stopp)

        ret, img = cap.read()
        future = pool.submit(find_ellipses_parallel, img)

        # Setup the HTTP patch method
        send_back = wait_signal
        if wait_start_signal and send_back:
            import requests
            import time
            ip = '192.168.0.58'
            port = ':3000'
            address = 'http://' + ip + port + '/game/' + str(game_id) + '/'

        save_log = True
        if wait_start_signal and save_log:
            infolog = open("log_" + str(game_id) + ".txt", "w+")
            infolog.write(str(datetime.datetime.now()))
            infolog.write("\n")
            infolog.write("START: ")
            infolog.write("\n")

        # Set up the face recognition
        detection_cnt = 0
        recognized = False
        MA = 0
        ma = 0
        elps = []
        head_degrees = np.array([0, 0, 0])

        screen_probability = -1.0
        tablet_probability = -1.0

        while (cap.isOpened()):
            data_update_ready = False

            info["focus"]["robot"]["gaze_focused"] = False
            info["focus"]["screen"]["gaze_focused"] = False
            info["focus"]["tablet"]["gaze_focused"] = False

            info["focus"]["robot"]["probability"] = -1.0
            info["focus"]["screen"]["probability"] = -1.0
            info["focus"]["tablet"]["probability"] = -1.0

            #if opt.demo == 'tobii':
            #  data_gy = tobiiglasses.get_data()['mems']['gy']['gy']
            #print(data_gy)
            #nput()
            #  head_degrees = head_degrees + np.array(data_gy)
            #  print(head_degrees)

            # Detect all the things in the scene
            detection_cnt = detection_cnt + 1
            ret, img = cap.read()
            if detection_cnt % 8 == 0:
                height, width = img.shape[:2]
                if ret == True:

                    # Detect everything
                    centernet_results = detector.run(img)

                    detect_robot = True
                    if detect_robot:
                        if future.done():
                            elps = future.result()
                            future = pool.submit(find_ellipses_parallel, img)

                    # Draw everyting
                    draw_robot = True
                    if draw_robot:
                        margin = 2.6
                        for elp in elps:
                            (x, y), (MA, ma), angle = elp

                            if 100 < MA < 440 and 100 < ma < 330 and 80264 < np.pi * ma * MA < 270000:
                                cv2.ellipse(
                                    img, ((x, y),
                                          (MA * margin, ma * margin), angle),
                                    (0, 255, 0), 3)

                    detect_gaze = True
                    if opt.demo == 'tobii' and detect_gaze:
                        data_gp = tobiiglasses.get_data()['gp']
                        data_pts = tobiiglasses.get_data()['pts']
                        data_gy = tobiiglasses.get_data()['mems']['gy']
                        data_ac = tobiiglasses.get_data()['mems']['ac']
                        data_lpd = tobiiglasses.get_data()['left_eye']['pd']
                        data_rpd = tobiiglasses.get_data()['right_eye']['pd']

                        offset = data_gp['ts'] / 1000000.0 - data_pts[
                            'ts'] / 1000000.0
                        gaze_detected = offset > 0.0 and offset <= frame_duration
                        if gaze_detected:
                            cv2.circle(img, (int(data_gp['gp'][0] * width),
                                             int(data_gp['gp'][1] * height)),
                                       30, (0, 0, 255), 2)
                            gaze_x = int(data_gp['gp'][0] * width)
                            gaze_y = int(data_gp['gp'][1] * height)

                    results = centernet_results["results"]
                    for j in [63, 64]:
                        for bbox in results[j]:
                            if bbox[4] > detector.opt.vis_thresh:
                                confidence = bbox[4]
                                name = coco_class_name[j - 1]
                                if name == "laptop" or name == "tv":
                                    if (bbox[2] - bbox[0])**2 + (
                                            bbox[3] -
                                            bbox[1])**2 < screen_criteria:
                                        gaze_on_tablet = True
                                        add_coco_bbox(bbox,
                                                      j,
                                                      "tablet",
                                                      img,
                                                      conf=confidence,
                                                      show_txt=True)
                                    else:
                                        gaze_on_screen = True
                                        add_coco_bbox(bbox,
                                                      j,
                                                      "screen",
                                                      img,
                                                      conf=confidence,
                                                      show_txt=True)

            # Forming the sending informaiton
            send_numbers = True
            detect_gaze_on_robot = True
            if detection_cnt % 8 == 0:
                gaze_on_robot = False
                if opt.demo == 'tobii' and detect_gaze and gaze_detected and detect_robot and draw_robot and detect_gaze_on_robot and elps is not []:
                    for elp in elps:
                        (x, y), (MA, ma), angle = elp
                        try:
                            #if 80264 <np.pi * ma *MA < 270000:
                            if 100 < MA < 440 and 100 < ma < 330 and 80264 < np.pi * ma * MA < 270000:
                                gaze_on_robot = ((gaze_y - y)**2 /
                                                 (ma * margin)**2 +
                                                 (gaze_x - x)**2 /
                                                 (MA * margin)**2 <= 1)
                        except ZeroDivisionError:
                            print(
                                "Zero Division happened, probabolly no face detected or detected wrong area."
                            )
                if gaze_on_robot:
                    print("Gaze on robot")

                gaze_on_screen = False
                gaze_on_tablet = False
                detect_gaze_on_object = True

                if opt.demo == 'tobii' and detect_gaze and gaze_detected and not gaze_on_robot:
                    results = centernet_results["results"]
                    # tv63 and laptop 64
                    for j in [63, 64]:
                        for bbox in results[j]:
                            if bbox[4] > detector.opt.vis_thresh:
                                confidence = bbox[4]
                                name = coco_class_name[j - 1]
                                if name == "laptop" or name == "tv":
                                    x_in_range = bbox[0] <= gaze_x <= bbox[
                                        2] if bbox[2] >= bbox[0] else bbox[
                                            2] <= gaze_x <= bbox[0]
                                    y_in_range = bbox[1] <= gaze_y <= bbox[
                                        3] if bbox[3] >= bbox[1] else bbox[
                                            3] <= gaze_x <= bbox[1]
                                    gaze_on_object = x_in_range and y_in_range
                                    if gaze_on_object:
                                        if (bbox[2] - bbox[0])**2 + (
                                                bbox[3] -
                                                bbox[1])**2 < screen_criteria:
                                            gaze_on_tablet = True
                                            tablet_probability = confidence
                                        else:
                                            gaze_on_screen = True
                                            screen_probability = confidence

                    if gaze_on_tablet:
                        print("Gaze on tablet")
                    if gaze_on_screen:
                        print("Gaze on screen")

                cv2.imshow("demo", img)

                if not gaze_on_robot and not gaze_on_screen and not gaze_on_tablet:
                    print("Gaze on no where")

                if screen_probability != None and tablet_probability != None:
                    data_update_ready = True
                print("data upgrade", data_update_ready)
                if send_numbers and data_update_ready:  #and screen_probability == None and and tablet_coordinates  None and tablet_probability is not None:
                    # Check whether the gaze is in the rectangle of the of the robot face
                    # Start

                    info["focus"]["robot"]["gaze_focused"] = bool(
                        gaze_on_robot)
                    info["focus"]["screen"]["gaze_focused"] = bool(
                        gaze_on_screen)
                    info["focus"]["tablet"]["gaze_focused"] = bool(
                        gaze_on_tablet)

                    info["focus"]["robot"]["probability"] = -1.0
                    info["focus"]["screen"]["probability"] = float(
                        screen_probability)
                    info["focus"]["tablet"]["probability"] = float(
                        tablet_probability)

                if wait_start_signal and save_log:
                    infolog.write(str(datetime.datetime.now()))
                    infolog.write("\n")
                    infolog.write(json.dumps(info))
                    infolog.write("\n")
                    infolog.write(json.dumps(data_gp))
                    infolog.write("\n")
                    infolog.write(json.dumps(data_gy))
                    infolog.write("\n")
                    infolog.write(json.dumps(data_ac))
                    infolog.write("\n")
                    infolog.write(json.dumps(data_lpd))
                    infolog.write("\n")
                    infolog.write(json.dumps(data_rpd))
                    infolog.write("\n")

                if send_back:
                    if not data_update_ready:
                        print("Not updated")
                    print(info)
                    r = requests.patch(address, data=json.dumps(info))
                #print(r.status_code)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                return

            if wait_stopp_signal:
                if udp_end_received.done():
                    if save_log:
                        infolog.write(str(datetime.datetime.now()))
                        infolog.write("\n")
                        infolog.write("END: ")
                        infolog.write("\n")
                        infolog.close()
                    cap.release()
                    cv2.destroyAllWindows()
                    return
예제 #5
0
                bytes += self.stream.raw.read(1024)
                a = bytes.find('\xff\xd8')
                b = bytes.find('\xff\xd9')
                if a != -1 and b != -1:
                    jpg = bytes[a:b + 2]
                    bytes = bytes[b + 2:]
                    img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),
                                       cv2.IMREAD_COLOR)
                    cv2.imshow('cam', img)
                    if cv2.waitKey(1) == 27:
                        exit(0)
            except ThreadError:
                self.thread_cancelled = True

    def is_running(self):
        return self.thread.isAlive()

    def shut_down(self):
        self.thread_cancelled = True
        #block while waiting for thread to terminate
        while self.thread.isAlive():
            time.sleep(1)
        return True


if __name__ == "__main__":
    tobiiglasses = TobiiGlassesController()
    url = "rtsp://" + tobiiglasses.address + ":8554/live/scene"

    cam = Cam(url)
    cam.start()
예제 #6
0
def demo(opt):
    def find_ellipses_parallel(img):
        gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        retval, gray_frame = cv2.threshold(gray_frame, 170, 235, 0)
        contours, hier = cv2.findContours(gray_frame, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)
        elps = []
        for cnt in contours:
            try:
                elp = cv2.fitEllipse(cnt)
                elps.append(elp)
            except:
                pass
        return elps

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.debug = max(opt.debug, 1)
    Detector = detector_factory[opt.task]
    detector = Detector(opt)
    detector.pause = False

    # Decide the camera source
    if opt.demo == 'tobii':
        ipv4_address = "192.168.0.109"
        cap = cv2.VideoCapture("rtsp://%s:8554/live/scene" % ipv4_address)
        tobiiglasses = TobiiGlassesController(ipv4_address, video_scene=True)
        video_freq = tobiiglasses.get_video_freq()
        frame_duration = 1000.0 / float(video_freq)
        tobiiglasses.start_streaming()
    elif opt.demo == 'webcam' or \
      opt.demo[opt.demo.rfind('.') + 1:].lower() in config.video_ext:
        cap = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    ret, img = cap.read()
    elps_candidates = find_ellipses_parallel(img)

    # Set up the face recognition
    detection_cnt = 0
    recognized = False
    MA = 0
    ma = 0
    elps = []
    head_degrees = np.array([0, 0, 0])

    # Read gaze from file
    gaze_df = pd.read_csv('./gaze_labels.cvs', ',')
    system_labels = []
    print(gaze_df)

    # Set up the recognition probability
    screen_probability = -1.0
    tablet_probability = -1.0

    while (cap.isOpened()):
        data_update_ready = False

        info["focus"]["robot"]["gaze_focused"] = False
        info["focus"]["screen"]["gaze_focused"] = False
        info["focus"]["tablet"]["gaze_focused"] = False

        info["focus"]["robot"]["probability"] = -1.0
        info["focus"]["screen"]["probability"] = -1.0
        info["focus"]["tablet"]["probability"] = -1.0

        #if opt.demo == 'tobii':
        #  data_gy = tobiiglasses.get_data()['mems']['gy']['gy']
        #print(data_gy)
        #nput()
        #  head_degrees = head_degrees + np.array(data_gy)
        #  print(head_degrees)

        # Detect all the things in the scene
        detection_cnt = detection_cnt + 1
        ret, img = cap.read()
        if not ret:
            break
        height, width = img.shape[:2]
        if detection_cnt % 1 == 0:

            # Detect everything
            centernet_results = detector.run(img)

            detect_robot = True
            if detect_robot:
                elps = find_ellipses_parallel(img)

            # Get gaze
            # 10268th frame is when the game starts
            gaze_x = gaze_df['gaze_x'][detection_cnt + 10268 - 1]
            gaze_y = gaze_df['gaze_y'][detection_cnt + 10268 - 1]

            # Draw everyting
            draw_robot = True
            if draw_robot:
                margin = 2.6
                for elp in elps:
                    (x, y), (MA, ma), angle = elp

                    if 120 < MA < 440 and 120 < ma < 330 and 80264 < np.pi * ma * MA < 270000:
                        cv2.ellipse(img, ((x, y),
                                          (MA * margin, ma * margin), angle),
                                    (0, 255, 0), 3)

            gaze_detected = True
            print("here")
            if gaze_detected:
                cv2.circle(img, (gaze_x, gaze_y), 30, (0, 0, 255), 2)

            results = centernet_results["results"]
            for j in [63, 64]:
                for bbox in results[j]:
                    if bbox[4] > detector.opt.vis_thresh:
                        confidence = bbox[4]
                        name = config.coco_class_name[j - 1]
                        if name == "laptop" or name == "tv":
                            if (bbox[2] - bbox[0])**2 + (
                                    bbox[3] - bbox[1])**2 < screen_criteria:
                                gaze_on_tablet = True
                                add_coco_bbox(bbox,
                                              j,
                                              "tablet",
                                              img,
                                              conf=confidence,
                                              show_txt=True)
                            else:
                                gaze_on_screen = True
                                add_coco_bbox(bbox,
                                              j,
                                              "screen",
                                              img,
                                              conf=confidence,
                                              show_txt=True)

        # Forming the sending informaiton
        detect_gaze_on_robot = True
        if detection_cnt % 1 == 0:

            gaze_on_screen = False
            gaze_on_tablet = False
            detect_gaze_on_object = True

            results = centernet_results["results"]
            # tv 63 and laptop 64
            for j in [63, 64]:
                for bbox in results[j]:
                    if bbox[4] > detector.opt.vis_thresh:
                        confidence = bbox[4]
                        name = config.coco_class_name[j - 1]
                        if name == "laptop" or name == "tv":
                            x_in_range = bbox[0] <= gaze_x <= bbox[2] if bbox[
                                2] >= bbox[0] else bbox[2] <= gaze_x <= bbox[0]
                            y_in_range = bbox[1] <= gaze_y <= bbox[3] if bbox[
                                3] >= bbox[1] else bbox[3] <= gaze_x <= bbox[1]
                            gaze_on_object = x_in_range and y_in_range
                            if gaze_on_object:
                                if (bbox[2] - bbox[0])**2 + (
                                        bbox[3] -
                                        bbox[1])**2 < screen_criteria:
                                    gaze_on_tablet = True
                                    tablet_probability = confidence
                                else:
                                    gaze_on_screen = True
                                    screen_probability = confidence
            # Detect Screen and Tablet first and then the robot

            gaze_on_robot = False
            if not gaze_on_screen and not gaze_on_tablet and gaze_detected and detect_robot and draw_robot and detect_gaze_on_robot and elps is not []:
                for elp in elps:
                    (x, y), (MA, ma), angle = elp
                    try:
                        #if 80264 <np.pi * ma *MA < 270000:
                        if 100 < MA < 440 and 100 < ma < 330 and 80264 < np.pi * ma * MA < 270000:
                            gaze_on_robot = ((gaze_y - y)**2 /
                                             (ma * margin)**2 +
                                             (gaze_x - x)**2 /
                                             (MA * margin)**2 <= 1)
                    except ZeroDivisionError:
                        print(
                            "Zero Division happened, probabolly no face detected or detected wrong area."
                        )

            if gaze_on_robot:
                print("Gaze on robot")
                system_labels.append("Robot")

            if gaze_on_tablet:
                print("Gaze on tablet")
                system_labels.append("Tablet")
            elif gaze_on_screen:
                print("Gaze on screen")
                system_labels.append("Screen")

            cv2.imshow("demo", img)
            out.write(img)

            if not gaze_on_robot and not gaze_on_screen and not gaze_on_tablet:
                print("Gaze on no where")
                system_labels.append("No where")

            if screen_probability != None and tablet_probability != None:
                data_update_ready = True
            print("data upgrade", data_update_ready)
            if data_update_ready:  #and screen_probability == None and and tablet_coordinates  None and tablet_probability is not None:
                # Check whether the gaze is in the rectangle of the of the robot face
                # Start

                info["focus"]["robot"]["gaze_focused"] = bool(gaze_on_robot)
                info["focus"]["screen"]["gaze_focused"] = bool(gaze_on_screen)
                info["focus"]["tablet"]["gaze_focused"] = bool(gaze_on_tablet)

                info["focus"]["robot"]["probability"] = -1.0
                info["focus"]["screen"]["probability"] = float(
                    screen_probability)
                info["focus"]["tablet"]["probability"] = float(
                    tablet_probability)

            #print(r.status_code)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            return

    out.release()

    df = pd.DataFrame(system_labels)
    df.to_csv('./system_labels.cvs')
예제 #7
0
def main():

	TobiiGlassesController()

	"""
예제 #8
0
def main():

    tobiiglasses = TobiiGlassesController("192.168.71.50")
    print(tobiiglasses.get_battery_status())

    #Get the bounds of the viewable range
    print("Enter x upperbound:")
    x_upper = float(input())

    print("Enter x lowerbound:")
    x_lower = float(input())

    print("Enter y upperbound:")
    y_upper = float(input())

    print("Enter y lowerbound:")
    y_lower = float(input())

    print("Enter time to run in seconds:")
    tt = int(input())

    tobiiglasses.start_streaming()
    print("Please wait ...")
    time.sleep(3.0)

    for i in range(tt):
        time.sleep(1.0)
        # 		print("Head unit: %s" % tobiiglasses.get_data()['mems'])
        # 		print("Left Eye: %s " % tobiiglasses.get_data()['left_eye'])
        # 		print("Right Eye: %s " % tobiiglasses.get_data()['right_eye'])
        # 		print("Gaze Position: %s " % tobiiglasses.get_data()['gp'])

        x_pos = tobiiglasses.get_data()['gp']['gp'][0]
        y_pos = tobiiglasses.get_data()['gp']['gp'][1]

        print("Test number %s " % i)
        print("X Position: %s " % x_pos)
        print("Y Position: %s " % y_pos)

        #if out of bounds in the x axis
        if (x_pos > x_upper or x_pos < x_lower):
            print('X out of bounds')
            print('\a')  #makes a beeping noise
        elif (y_pos > y_upper or y_pos < y_lower):
            print('Y out of bounds')
            print('\a')  #makes a beeping noise
        else:
            print('In bounds')

        print("Gaze Position 3D: %s " % tobiiglasses.get_data()['gp3'])

    tobiiglasses.stop_streaming()
    tobiiglasses.close()
def main():

    tobiiglasses = TobiiGlassesController()
    print tobiiglasses.get_battery_info()
    print tobiiglasses.get_storage_info()

    if tobiiglasses.is_recording():
        rec_id = tobiiglasses.get_current_recording_id()
        tobiiglasses.stop_recording(rec_id)

    project_name = raw_input("Please insert the project's name: ")
    project_id = tobiiglasses.create_project(project_name)

    participant_name = raw_input("Please insert the participant's name: ")
    participant_id = tobiiglasses.create_participant(project_id,
                                                     participant_name)

    calibration_id = tobiiglasses.create_calibration(project_id,
                                                     participant_id)
    raw_input(
        "Put the calibration marker in front of the user, then press enter to calibrate"
    )
    tobiiglasses.start_calibration(calibration_id)

    res = tobiiglasses.wait_until_calibration_is_done(calibration_id)

    if res is False:
        print("Calibration failed!")
        exit(1)

    recording_id = tobiiglasses.create_recording(participant_id)
    print "Important! The recording will be stored in the SD folder projects/%s/recordings/%s" % (
        project_id, recording_id)
    raw_input("Press enter to start recording")
    tobiiglasses.start_recording(recording_id)
    tobiiglasses.send_event("start_recording", "Start of the recording ")
    raw_input("Press enter to stop recording")
    tobiiglasses.send_event("stop_recording",
                            "Stop of the recording " + str(recording_id))
    tobiiglasses.stop_recording(recording_id)

    if res is False:
        print("Recording failed!")
        exit(1)
예제 #10
0
from tobiiglassesctrl import TobiiGlassesController

import cv2 as cv
import numpy as np

import sys
import time
import json

address = "192.168.71.50"
tobiiglasses = TobiiGlassesController(address)

# getting livestream video with opencv
address_rtsp = "rtsp://" + address + ":8554/live/scene"
vcap = cv.VideoCapture(address_rtsp)

#save video output
out = cv.VideoWriter('output.avi', cv.cv.CV_FOURCC(*'MJPG'), 20.0,
                     (int(vcap.get(3)), int(vcap.get(4))))

# calibration
if tobiiglasses.is_recording():
    rec_id = tobiiglasses.get_current_recording_id()
    tobiiglasses.stop_recording(rec_id)

project_name = raw_input("Please insert the project's name: ")
project_id = tobiiglasses.create_project(project_name)

participant_name = raw_input("Please insert the participant's name: ")
participant_id = tobiiglasses.create_participant(project_id, participant_name)
예제 #11
0
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>

import cv2
import numpy as np

if hasattr(__builtins__, 'raw_input'):
    input = raw_input

from tobiiglassesctrl import TobiiGlassesController

ipv4_address = "192.168.71.50"

tobiiglasses = TobiiGlassesController(ipv4_address, video_scene=True)

project_id = tobiiglasses.create_project("Test live_scene_and_gaze.py")

participant_id = tobiiglasses.create_participant(project_id,
                                                 "participant_test")

calibration_id = tobiiglasses.create_calibration(project_id, participant_id)

input(
    "Put the calibration marker in front of the user, then press enter to calibrate"
)
tobiiglasses.start_calibration(calibration_id)

res = tobiiglasses.wait_until_calibration_is_done(calibration_id)
예제 #12
0
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>

import av
import cv2
import numpy as np
from tobiiglassesctrl import TobiiGlassesController

ipv4_address = "192.168.100.10"

tobiiglasses = TobiiGlassesController(ipv4_address)
video = av.open("rtsp://%s:8554/live/scene" % ipv4_address, "r")

tobiiglasses.start_streaming()

try:
    for packet in video.demux():
        for frame in packet.decode():
            if isinstance(frame, av.video.frame.VideoFrame):
                #print(frame.pts)
                img = frame.to_ndarray(format='bgr24')
                height, width = img.shape[:2]
                data_gp = tobiiglasses.get_data()['gp']
                if data_gp['ts'] > 0:
                    cv2.circle(img, (int(data_gp['gp'][0] * width),
                                     int(data_gp['gp'][1] * height)), 60,
예제 #13
0
def main():

    #Configure LSL streams
    lsl_streams = StreamsObj()

    #Create Tobii glasses Controller
    tobiiglasses = TobiiGlassesController("192.168.71.50")
    print("Sampling frequency: ", tobiiglasses.get_et_freq())
    print(tobiiglasses.get_battery_status())

    #Calibrate
    # calibration(tobiiglasses)

    #Start Streaming
    tobiiglasses.start_streaming()
    print("Please wait ...")
    time.sleep(3.0)

    input("Press any key to start streaming")

    current_gidx = -9999

    startTime = time.time()
    old_time = time.time()
    time.sleep(0.1)
    try:
        while True:

            data = tobiiglasses.get_data()

            #Send data to LSL only there is a new data point
            if current_gidx < data['gp']['gidx']:
                current_gidx = data['gp']['gidx']
                print(data['gp'])
                #Send data
                lsl_streams.sendData('left_eye', data['left_eye'])
                lsl_streams.sendData('right_eye', data['right_eye'])
                lsl_streams.sendData('gp', data['gp'])
                lsl_streams.sendData('gp3', data['gp3'])

                #Print Battery status
                # print(tobiiglasses.get_battery_status())

            if time.time() - old_time > 0.02:  #Send data every 20ms
                lsl_streams.sendData('mems', data['mems'])
                old_time = time.time()

    except Exception as e:
        print(e)
        print("Closing Tobii")
        trace = traceback.format_exc()
        print(trace)

    finally:
        file.close()
        tobiiglasses.stop_streaming()
        tobiiglasses.close()

    print("Closing programme")
예제 #14
0
def main():

    tobiiglasses = TobiiGlassesController("192.168.71.50")
    print(tobiiglasses.get_battery_status())

    tobiiglasses.start_streaming()
    print("Please wait ...")
    time.sleep(3.0)

    for i in range(1000):
        print("Head unit: %s" % tobiiglasses.get_data()['mems'])
        print("Left Eye: %s " % tobiiglasses.get_data()['left_eye'])
        print("Right Eye: %s " % tobiiglasses.get_data()['right_eye'])
        print("Gaze Position: %s " % tobiiglasses.get_data()['gp'])
        print("Gaze Position 3D: %s " % tobiiglasses.get_data()['gp3'])

    tobiiglasses.stop_streaming()
    tobiiglasses.close()