def test_load(self): # to test failure is working: #assert 42 == 43 #assert 42 == 42 #s = ["a", "b", "c", "d"] #assert_equal(s, ["a", "b", "c", "d"]) sample = os.path.join( os.path.dirname(os.path.realpath(__file__)), "sample.list") (self.sl, contents) = gaze(sample) print("sl len:", len(self.sl)) assert len(self.sl) == 7
def path(relative=''): """ serve a path ... either a directory or a file !!! WARNING !!! this allows the app to function as a customizable file system browser be careful with what you set path_root to. If the machine you run this on has sensitive information and is connected to a public network, it's available """ global path_root if re.match('~', relative): relative = os.path.expanduser(relative) full_path = os.path.join(path_root, relative) path = Path(full_path, relative_prefix=path_root) (sl, collection, current) = gaze(full_path) #current is used to display previous and next options if current: # might be able to figure these details out in javascript # but for now: index = collection.index(current) if index != 0: previous_item = collection[index-1] else: previous_item = collection[-1] if index != len(collection)-1: next_item = collection[index+1] else: next_item = collection[0] context = {"path": path, "collection": collection, "content": current, "index": index, "previous": previous_item, "next": next_item, } return bottle.template('content', c=context) else: return bottle.template('collection', path=path, collection=collection)
def json_path(relative=''): """ serve a path ... either a directory or a file **as a json object** (the list that is generated by gaze) !!! WARNING !!! this allows the app to function as a customizable file system browser be careful with what you set path_root to. If the machine you run this on has sensitive information and is connected to a public network, it's available """ global path_root if re.match('~', relative): relative = os.path.expanduser(relative) ## else: ## relative = os.path.join('/', relative) ## full = os.path.abspath(relative) ## print full full_path = os.path.join(path_root, relative) path = Path(full_path, relative_prefix=path_root) (sl, collection, current) = gaze(full_path) print(sl) print(current) # collection is what we want to return, # but first we need to convert the images Paths # to friendly = [] for item in collection: item['image'] = str(item['image']) item['path'] = str(item['path']) friendly.append(item) return json.dumps(friendly)
#!/usr/bin/env python import sys sys.path.append('../') import gaze import time # Params hostname='http://localhost:8080/gaze-web-app' username='******' password='******' camera='camera001' # Start up Gaze g = gaze.gaze(hostname) s = g.login(username, password) sid=s['sessionId'] # List shards ls = g.list_shards(sid, username, camera, limit=100) print 'Shards', ls ls = g.list_shards(sid, username, camera, start=24999999999) print 'Shards', ls # Logout g.logout(s['sessionId'])
def before_request(): gaze_instance = getattr(g, 'gaze', None) if(gaze_instance == None): gaze_instance = gaze(GAZE_URL) g.gaze_instance = gaze_instance
def main( username=USERNAME, password=PASSWORD, camera=CAMERA, generator=GENERATOR, single_file=SINGLE_FILE, frames_per_sec=FRAMES_PER_SEC, other_params=OTHER, ): # 0. Initialize variables state = dict() stats = { "num_enqueues": 0, "num_dequeues": 0, "num_uploads": 0, "num_imgs": 0, "total_img_time": 0, "total_upload_time": 0, } threads = dict() task_queue = Queue.Queue(QUEUE_SIZE) done_db = dict() queue_lock = threading.Lock() stats_lock = threading.Lock() last_stats = None last_stats_print_time = time.time() num_images = 0 main_gz = gaze.gaze(GAZE_URL) s = None sid = None try: s = main_gz.login(USERNAME, PASSWORD) sid = s["sessionId"] x = 1 except Exception as error: print "Could not login into system", error, s return None # 1. Create threads for tid in range(0, NUM_THREADS): gz = gaze.gaze(GAZE_URL) thd = thread.start_new_thread( thread_upload_image, (tid, gz, task_queue, done_db, queue_lock, stats, stats_lock) ) threads[tid] = thd # 2. Create and load images in a loop while num_images < MAX_IMAGES: num_images += 1 ts_loop_start = time.time() # 2.1 Generate image ts_img_create_start = time.time() (state, img) = get_image(generator, single_file, stats, stats_lock, state, other_params) if state == None or img == None: sys.stderr.write("Could not get image!\n") return None ts = str(int(time.time() * 1000)) filename = ts + ".jpg" tsk = {"ts": ts, "id": num_images, "filename": filename, "data": img} ts_img_create_stop = time.time() # 2.2 Put into queue did_put = False queue_lock.acquire() if not task_queue.full(): task_queue.put(tsk) did_put = True queue_lock.release() # 2.3 Send commit queue_lock.acquire() keys = done_db.keys() keys.sort() mk = None contig = [] for k in keys: if mk == None or (mk + 1 == k): contig.append(k) mk = k else: break contig.sort() if len(contig) > 0: # print 'Contiguous', contig # print 'Send commit', done_db[contig[-1]] main_gz.commit_image(sid, USERNAME, CAMERA, done_db[contig[-1]]) for k in contig: del done_db[k] queue_lock.release() # 2.4 Update/print stats stats_lock.acquire() cur_time = time.time() last_time = last_stats_print_time if did_put: stats["num_enqueues"] = stats["num_enqueues"] + 1 diff_stats = None if last_stats != None and (cur_time - last_time) >= STATS_PRINT_TIME: diff_stats = dict() for key in stats.keys(): diff_stats[key] = stats[key] - last_stats[key] last_stats_print_time = cur_time last_stats = dict() for key in stats.keys(): last_stats[key] = stats[key] if last_stats == None: last_stats = dict() for key in stats.keys(): last_stats[key] = stats[key] stats_lock.release() if diff_stats != None: diff_time = cur_time - last_time buf = ( "%(t)10.3f ENQUEUES=%(e)10d DEQUEUES=%(d)10d IMGS=%(i)10d UPLOADS=%(u)10d IMG_TIME=%(it)10.3f UPLOAD_TIME=%(ut)10.3f" % { "t": diff_time, "e": diff_stats["num_enqueues"], "d": diff_stats["num_dequeues"], "i": diff_stats["num_imgs"], "u": diff_stats["num_uploads"], "it": diff_stats["total_img_time"], "ut": diff_stats["total_upload_time"], } ) print buf ts_loop_end = time.time() # 2.4 Wait for a bit to catchup with preferred frame rate rate_time = 1.0 / frames_per_sec rate_diff = rate_time - (ts_loop_end - ts_loop_start) if rate_diff > 0: time.sleep(rate_diff)
#!/usr/bin/env python import sys sys.path.append("../") import gaze # Params username = "******" password = "******" # Start up Gaze g = gaze.gaze("http://localhost:8080/video-webapp") s = g.login(username, password) sid = s["sessionId"] # Get camera c = g.get_camera(sid, "camera001") print "Camera", c # Logout g.logout(s["sessionId"])
def main(args): path_fd = args.face_path fd = face_detect('face detection', path_fd, args.device) fd.load_model() path_ld = args.landmark_path ld = landmark('landmark', path_ld, args.device) ld.load_model() path_hdps = args.headpose_path hp = head_pose('head pose', path_hdps, args.device) hp.load_model() gaze_path = args.gaze_path gz = gaze('Gaze', gaze_path, args.device) gz.load_model() if args.input_type == 'video': cap = cv2.VideoCapture('demo.mp4') elif args.input_type == 'cam': cap = cv2.VideoCapture(0) video_writer = cv2.VideoWriter('output1.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 10, (1920, 1080)) if not cap.isOpened(): logging.error('Video file not found. Check the path') while (cap.isOpened()): ret, frame = cap.read() if ret == True: #img = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) boxes, pre_img, crp_img = fd.predict(frame) keypoint_image, right_eye, left_eye, x_e, y_e = ld.predict(crp_img) hp_vector = hp.predict(crp_img) hp_vector = np.reshape(hp_vector, (1, 3)) mouse_points = gz.predict(left_eye, right_eye, hp_vector) # rotation vector rvec = np.array([0, 0, 0], np.float) # translation vector tvec = np.array([0, 0, 0], np.float) # camera matrix camera_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], np.float) result, _ = cv2.projectPoints(mouse_points, rvec, tvec, camera_matrix, None) result = result[0][0] res = (int(result[0] * 100), int(result[1] * 100)) e1 = (boxes[0][0] + x_e[0], boxes[0][1] + y_e[0]) e2 = (boxes[0][0] + x_e[1], boxes[0][1] + y_e[1]) cv2.arrowedLine(pre_img, e1, (e1[0] - res[0], e1[1] + res[1]), (0, 255, 0), 2) cv2.arrowedLine(pre_img, e2, (e2[0] - res[0], e2[1] + res[1]), (0, 255, 0), 2) #move_mouse = MouseController('medium','medium') #move_mouse.move((e1[0] - res[0], e1[1] + res[1])) if (args.inter_viz): cv2.imshow('frame', pre_img) video_writer.write(frame) cv2.waitKey(1) else: break cap.release() cv2.destroyAllWindows()