def run(controller, cam): # inizializzazione picoflexx q = queue.Queue() listener = MyListener(q) cam.registerDataListener(listener) # cam.startCapture() # setting up cameras cap = set_up(controller, rgb_cam=0) while True: print("press E to start") utils.draw_ui(text="press E to start") k = cv2.waitKey() if k == ord('e'): # RUNNING if not cap: print("error rgb cam") exit(-1) error_queue = False ready = False while True: try: cam.startCapture() print("picoflex ready") break except RuntimeError: print("error connection picoflex") while True: # print(frame_counter) if error_queue: print(error_queue) break frame = controller.frame() if utils.hand_is_valid(frame): print('\nhand is valid -> ready to start') ready = True # print(self.listener.recording) print("start gesture") # print(self.listener.recording) else: break # if ready: elif k == ord('s'): print("end") break
def run(controller, cam): # inizializzazione picoflexx q = queue.Queue() listener = MyListener(q, recording=False) cam.registerDataListener(listener) # cam.startCapture() if not os.path.exists("./data"): session_counter = 0 session_start = 0 utils.save_session_info(session_id=session_counter) os.makedirs("./data") elif os.path.exists("./data") and not os.path.exists(args.file_info): print("json file has to be present - check utils.save_session_info()") exit() else: session_start = utils.load_session_info() + 1 session_counter = session_start while True: if session_counter == session_start: print("press E to start new session of recording") utils.draw_ui(text="press E to start new session of recording") else: print("press E to record new session {} or Q to quit".format( session_counter)) utils.draw_ui(text="press E to record new session {} or Q to quit". format(session_counter)) k = cv2.waitKey() if k == ord('e'): pass elif k == ord('q'): print("end collection") utils.save_session_info(session_id=session_counter - 1) break sess = Session(id_session=session_counter, controller=controller, cam=cam, queue=q, listener=listener) # creazione directory per sessione directory = "./data/{:03d}".format(sess.id_session) if not os.path.exists(directory): os.makedirs(directory) sess.dir = directory print("session {} started".format(sess.id_session)) sess.run_session() session_counter += 1
def run(controller, cam): # inizializzazione picoflexx q = queue.Queue() listener = MyListener(q, recording=False) cam.registerDataListener(listener) # cam.startCapture() if not os.path.exists("./single_data"): os.makedirs("./single_data") else: shutil.rmtree("./single_data") counter = 0 while True: # setup camere cap, lcd, lcf, rcd, rcf = init_setup(controller) if cap: print("setup initialized") else: return -1 print( "press E to record single record {} or Q to quit".format(counter)) utils.draw_ui(text="press E to record single record {} or Q to quit". format(counter)) k = cv2.waitKey() if k == ord('e'): pass elif k == ord('q'): print("end single recording") break start_single_record(counter, cap, cam, controller, listener, q, lcd, lcf, rcd, rcf) print("record saved") counter += 1
def run(controller, cam, model, device, mean_depth, std_depth, mean_ir, std_ir): # inizializzazione picoflexx # q = queue.Queue() # listener = MyListener(q) # cam.registerDataListener(listener) # setting up cameras # cap = set_up(controller, rgb_cam=0) while True: print("press E to start") utils.draw_ui(text="press E to start") k = cv2.waitKey() if k == ord('e'): # RUNNING # if not cap: # print("error rgb cam") # exit(-1) error_queue = False stop = False # while True: # try: # q = queue.Queue() # listener = MyListener(q) # cam.registerDataListener(listener) # cam.startCapture() # print("picoflex ready") # break # except RuntimeError: # print("error connection picoflex") buffer_depth, buffer_ir = [], [] counter = 0 # softmax = nn.Softmax() pause = False detected = None while True: # time.sleep(0.1) if len(buffer_depth) == 0: while True: try: q = queue.Queue() listener = MyListener(q) cam.registerDataListener(listener) cam.startCapture() print("picoflex ready") break except RuntimeError: print("error connection picoflex") print("start") if cv2.waitKey(1) == ord('s') or pause or error_queue: stop = True pause = False break # print(frame_counter) if error_queue: print(error_queue) break # print("\rRunning...", end="") # utils.draw_ui(text="Running... press S to stop") # PICOFLEXX # imgs == (z, ir) ret_pico, imgs = utils.get_images_from_picoflexx(q) # print("ret_pico, z, ir", ret_pico, imgs[0], imgs[1]) if not ret_pico: print("pico image not valid") error_queue = True # break # show images else: counter += 1 print('buffer: {}'.format(len(buffer_depth))) depth_x = imgs[0] ir_x = imgs[1] # cv2.imshow('img_ir', cv2.resize(ir_x, (0, 0), fx=1.5, fy=1.5)) # cv2.imshow('img_depth', cv2.resize((depth_x * 255).astype(np.uint8), (0, 0), fx=1.5, fy=1.5)) utils.draw_demo_ui("", img0=cv2.resize(imgs[1], (0, 0), fx=1.5, fy=1.5), img1=cv2.resize((imgs[0] * 65535).astype(np.uint16), (0, 0), fx=1.5, fy=1.5), text="RUNNING..." if detected is None or counter > 20 else detected) # inserisco in buffer if len(buffer_depth) < args.n_frames: # resize depth_x = np.expand_dims(cv2.resize(depth_x, (224, 224)), axis=2) ir_x = np.expand_dims(cv2.resize(ir_x, (224, 224)), axis=2) buffer_depth.append(depth_x) buffer_ir.append(ir_x) elif len(buffer_depth) >= args.n_frames: # resize depth_x = np.expand_dims(cv2.resize(depth_x, (224, 224)), axis=2) ir_x = np.expand_dims(cv2.resize(ir_x, (224, 224)), axis=2) buffer_depth.pop(0) buffer_depth.append(depth_x) buffer_ir.pop(0) buffer_ir.append(ir_x) if len(buffer_depth) == args.n_frames: # prediction # creo clip clip_depth = np.concatenate(buffer_depth, axis=2) clip_depth = np.float32(clip_depth.transpose([2, 0, 1])) clip_ir = np.concatenate(buffer_ir, axis=2) clip_ir = np.float32(clip_ir.transpose([2, 0, 1])) # pre processing # normalization clip_depth = (clip_depth - mean_depth)/std_depth clip_ir = (clip_ir - mean_ir)/std_ir # converto in tensori clip_depth = torch.tensor(clip_depth) clip_ir = torch.tensor(clip_ir) # passo in gpu clip_depth = clip_depth.to(device) clip_ir = clip_ir.to(device) # predizione out = model(clip_depth.unsqueeze(dim=0) #, clip_ir.unsqueeze(dim=0)) ) # print("out: {}, predicted: {}".format(out, torch.max(out, 1))) out = F.softmax(out, dim=1) out = torch.max(out, 1) if out[0] >= args.pred_th: print("detected ", out[0].item(), out[1].item()) # print("softmax: {}", out) detected = gestures[out[1].item()] buffer_depth = [] buffer_ir = [] counter = 0 # pause = True cam.stopCapture() print("stop") # utils.draw_ui("{} detected".format(gestures[out[1].item()]), position=(255, 255)) utils.draw_demo_ui("", img0=cv2.resize(imgs[1], (0, 0), fx=1.5, fy=1.5), img1=cv2.resize((imgs[0] * 65535).astype(np.uint16), (0, 0), fx=1.5, fy=1.5), text=detected) else: print("not_recognized") # print("not detected: ", out[0].item(), out[1].item()) # buffer_depth = [] # buffer_ir = [] # counter = 0 # # time.sleep(10) # pause = True pass
def start_single_record(counter, cap, cam, controller, listener, q, left_coord, left_coeff, right_coord, right_coeff): directory_rr = "./single_data/{}/R/raw".format(counter) directory_lr = "./single_data/{}/L/raw".format(counter) directory_ru = "./single_data/{}/R/undistorted".format(counter) directory_lu = "./single_data/{}/L/undistorted".format(counter) directory_leap_info = "./single_data/{}/leap_motion/tracking_data".format( counter) directory_rgb = "./single_data/{}/rgb".format(counter) directory_z = "./single_data/{}/depth/z".format(counter) directory_ir = "./single_data/{}/depth/ir".format(counter) list_img_rr = [] list_img_ru = [] list_img_lr = [] list_img_lu = [] list_json = [] list_img_rgb = [] list_img_z = [] list_img_ir = [] record_if_valid = False frame_counter = 0 if not cap: print("error rgb cam") exit(-1) error_queue = False cam.startCapture() while True: # print(frame_counter) if (cv2.waitKey(1) == ord('s') and record_if_valid and frame_counter > args.n_min_frames) \ or error_queue: # print(error_queue) break frame = controller.frame() # controllo di validità per inizio registrazione (OPZIONALE) # inizia a registrare i frame solo se leap motion rileva correttamente la mano # print(self.listener.recording) if utils.hand_is_valid(frame) and not record_if_valid: print('\nhand is valid -> ready to start') record_if_valid = True # print(self.listener.recording) print("start gesture") listener.setRecording(True) # print(self.listener.recording) if record_if_valid: print("\rrecord valid -> showing {}".format(frame_counter), end="") utils.draw_ui(text="recording - press S to stop", circle=True, thickness=-1) # RGB CAM # get rgb image # print(1) ret, img_rgb = cap.read() # print(2) # resize dim img rgb if not ret: print("\nrgb cam not working") exit(-1) # cv2.imshow('img_rgb', img_rgb) # cv2.waitKey(1) # Leap Motion if frame.is_valid: image_l = frame.images[0] image_r = frame.images[1] # print(3) else: print("\rframe {} not valid".format(frame_counter), end="") continue if image_l.is_valid and image_r.is_valid: # print(4) raw_img_l = utils.get_raw_image(image_l) raw_img_r = utils.get_raw_image(image_r) # undistorted images undistorted_left = utils.undistort(image_l, left_coord, left_coeff, 400, 400) undistorted_right = utils.undistort(image_r, right_coord, right_coeff, 400, 400) # print(5) # show images # previous position cv2.imshow() # cv2.imshow('img_leap', undistorted_right) # json json_obj = utils.frame2json_struct(frame) # print(6) # PICOFLEXX # imgs == (z, ir) ret_pico, imgs = utils.get_images_from_picoflexx(q) # print("ret_pico, z, ir", ret_pico, imgs[0], imgs[1]) if not ret_pico: print("pico image not valid") error_queue = True continue cv2.moveWindow('img_rgb', -700, 325) cv2.moveWindow('img_leap', -1150, 400) cv2.moveWindow('img_ir', -1500, 600) cv2.imshow('img_leap', undistorted_right) cv2.imshow('img_rgb', img_rgb) cv2.imshow('img_ir', imgs[1]) # print(7) list_img_rr.append(raw_img_r.copy()) list_img_ru.append(undistorted_right.copy()) list_img_lr.append(raw_img_l.copy()) list_img_lu.append(undistorted_left.copy()) list_img_rgb.append(img_rgb.copy()) list_json.append(json_obj) list_img_z.append(imgs[0].copy()) list_img_ir.append(imgs[1].copy()) # list_img_z.append(z.copy()) # list_img_ir.append(ir.copy()) frame_counter += 1 # print(8) else: print('image not valid') else: print("\rerror in getting valid leap motion frame", end="") # print(self.listener.recording) listener.setRecording(False) cam.stopCapture() cap.release() #write single record print("saving record") utils.draw_ui(text="Saving session...") utils.save_single_record(list_img_rr, list_img_ru, list_img_lr, list_img_lu, list_json, list_img_rgb, list_img_z, list_img_ir, directory_rr, directory_ru, directory_lr, directory_lu, directory_leap_info, directory_rgb, directory_z, directory_ir)
def run_session(self): # init leap motion print("waiting for maps initialization...") while True: frame = self.controller.frame() image_l = frame.images[0] image_r = frame.images[1] if image_l.is_valid and image_r.is_valid: left_coordinates, left_coefficients = utils.convert_distortion_maps( image_l) right_coordinates, right_coefficients = utils.convert_distortion_maps( image_r) maps_initialized = True print('maps initialized') break else: print('\rinvalid leap motion frame', end="") # initialize video capture while True: cap = cv2.VideoCapture(1) print(cap) if cap: # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920.0) # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080.0) # print(cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) break else: print("\rerror rgb cam", end="") print("ready to go") list_of_gestures = [] counter_gesture = 0 rewrite = False skip_gesture = False for i in range(0, len(gestures)): # if (i == len(gestures) - 1) and self.last_session is False: # break while True: # if counter_gesture == NUMBER_OF_RECORDS_PER_GESTURE: # pass # if (i == len(gestures) - 1) and counter_gesture > 0: # #counter_gesture = 0 if skip_gesture is True: skip_gesture = False break if counter_gesture == 0: utils.draw_ui( text="press S to start record {0} of {1}".format( counter_gesture, gestures[i])) elif (i == len(gestures) - 1) and counter_gesture > 0: utils.draw_ui( text="press R to repeat last record or Q to quit") elif counter_gesture == args.n_records_gesture: utils.draw_ui( text="press S to start record {}/{} of {}/{:02d}" " or press R to repeat record {} of previous gesture". format(0, args.n_records_gesture - 1, gestures[i + 1], len(gestures) - 1, counter_gesture - 1)) # counter_gesture -= 1 else: utils.draw_ui( text="press S to start record {}/{} of {}/{:02d}" " or press R to repeat record {}".format( counter_gesture, args.n_records_gesture - 1, gestures[i], len(gestures) - 1, counter_gesture - 1)) while True: k = cv2.waitKey() if k == ord( 's') and counter_gesture < args.n_records_gesture: break elif k == ord( 's') and counter_gesture == args.n_records_gesture: counter_gesture = 0 skip_gesture = True rewrite = False break elif k == ord('r') and counter_gesture > 0 and i == len( gestures) - 1: counter_gesture = 0 rewrite = True break elif k == ord('r') and counter_gesture > 0: counter_gesture -= 1 rewrite = True break elif k == ord('q') and i == len(gestures) - 1: counter_gesture = 0 skip_gesture = True break if counter_gesture < args.n_records_gesture and not skip_gesture: g = Gesture(i, gestures[i], counter_gesture, self.controller, self.cam, self.q, self.listener, cap, self.id_session, maps_initialized, left_coord=left_coordinates, left_coeff=left_coefficients, right_coord=right_coordinates, right_coeff=right_coefficients, rewrite=rewrite) record = g.record() if not rewrite: list_of_gestures.append(record) counter_gesture += 1 else: list_of_gestures.pop(-1) list_of_gestures.append(record) counter_gesture += 1 rewrite = False # release videocapture cap.release() list_of_thread = [] for x in list_of_gestures: utils.draw_ui("Saving Session...") cv2.waitKey(1) list_of_thread.append(x.saveGestureData()) for th in list_of_thread: th.join() print("Recording session saved")
def record(self): list_img_rr = [] list_img_ru = [] list_img_lr = [] list_img_lu = [] list_json = [] list_img_rgb = [] list_img_z = [] list_img_ir = [] record_if_valid = False frame_counter = 0 # print("ready to go") # open rgb camera # cap = cv2.VideoCapture(1) # # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280.0) # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720.0) # print(cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) if not self.cap: print("error rgb cam") exit(-1) error_queue = False self.cam.startCapture() while True: # print(frame_counter) if (cv2.waitKey(1) == ord('s') and record_if_valid and frame_counter > args.n_min_frames)\ or error_queue: # print(error_queue) break frame = self.controller.frame() # controllo di validità per inizio registrazione (OPZIONALE) # inizia a registrare i frame solo se leap motion rileva correttamente la mano # print(self.listener.recording) if utils.hand_is_valid(frame) and not record_if_valid: print('\nhand is valid -> ready to start') record_if_valid = True # print(self.listener.recording) print("start gesture") self.listener.setRecording(True) # print(self.listener.recording) if record_if_valid: print("\rrecord valid -> showing {}".format(frame_counter), end="") utils.draw_ui(text="recording - press S to stop", circle=True, thickness=-1) # RGB CAM # get rgb image # print(1) ret, img_rgb = self.cap.read() # print(2) # resize dim img rgb if not ret: print("\nrgb cam not working") exit(-1) # cv2.imshow('img_rgb', img_rgb) # cv2.waitKey(1) # Leap Motion if frame.is_valid: image_l = frame.images[0] image_r = frame.images[1] # print(3) else: print("\rframe {} not valid".format(frame_counter), end="") continue if image_l.is_valid and image_r.is_valid: # print(4) raw_img_l = utils.get_raw_image(image_l) raw_img_r = utils.get_raw_image(image_r) # undistorted images undistorted_left = utils.undistort(image_l, self.left_coord, self.left_coeff, 400, 400) undistorted_right = utils.undistort( image_r, self.right_coord, self.right_coeff, 400, 400) # print(5) # show images # previous position cv2.imshow() # cv2.imshow('img_leap', undistorted_right) # json json_obj = utils.frame2json_struct(frame) # print(6) # PICOFLEXX #imgs == (z, ir) ret_pico, imgs = utils.get_images_from_picoflexx(self.q) # print("ret_pico, z, ir", ret_pico, imgs[0], imgs[1]) if not ret_pico: print("pico image not valid") error_queue = True continue cv2.moveWindow('img_rgb', -700, 325) cv2.moveWindow('img_leap', -1150, 400) cv2.moveWindow('img_ir', -1500, 600) cv2.imshow('img_leap', undistorted_right) cv2.imshow('img_rgb', img_rgb) cv2.imshow('img_ir', imgs[1]) # print(7) list_img_rr.append(raw_img_r.copy()) list_img_ru.append(undistorted_right.copy()) list_img_lr.append(raw_img_l.copy()) list_img_lu.append(undistorted_left.copy()) list_img_rgb.append(img_rgb.copy()) list_json.append(json_obj) list_img_z.append(imgs[0].copy()) list_img_ir.append(imgs[1].copy()) # list_img_z.append(z.copy()) # list_img_ir.append(ir.copy()) frame_counter += 1 # print(8) else: print('image not valid') else: print("\rerror in getting valid leap motion frame", end="") # print(self.listener.recording) self.listener.setRecording(False) self.cam.stopCapture() # print(self.listener.recording) # release rgb camera # cap.release() print('\nrecord {}/2 of g{} completed'.format(self.record_number, self.gesture_id)) record_if_valid = False # scrittura su disco # if not args.on_disk: if self.rewrite: rewrite = True else: rewrite = False return utils.GestureData(self.gesture_id, list_img_rr, list_img_ru, list_img_lr, list_img_lu, list_json, list_img_rgb, list_img_z, list_img_ir, self.directory_rr, self.directory_ru, self.directory_lr, self.directory_lu, self.directory_leap_info, self.directory_rgb, self.directory_z, self.directory_ir, rewrite=rewrite)
def run(controller, cam): # inizializzazione picoflexx q = queue.Queue() listener = MyListener(q, recording=False) cam.registerDataListener(listener) # cam.startCapture() if not os.path.exists("./data"): session_counter = 0 session_start = 0 utils.save_session_info(session_id=session_counter) os.makedirs("./data") elif os.path.exists("./data") and not os.path.exists(file_info): print("json file has to be present - check utils.save_session_info()") exit() else: session_start = utils.load_session_info() + 1 session_counter = session_start last_session = False for i in range(NUMBER_OF_SESSIONS_PER_PERSON): if session_counter == session_start: print("press E to start new session of recording") utils.draw_ui(text="press E to start new session of recording") elif i < NUMBER_OF_SESSIONS_PER_PERSON - 1: print("press E to record session {}".format(i)) utils.draw_ui(text="press E to record session {}".format(i)) else: # ultima sessione print("press E to record session {}".format(i)) utils.draw_ui(text="press E to record session {}".format(i)) last_session = True k = cv2.waitKey() if k == ord('e'): pass # elif k == ord('q'): # print("end collection") # utils.save_session_info(session_id=session_counter - 1) # break sess = Session(id_session=session_counter, controller=controller, cam=cam, queue=q, listener=listener, last_session=last_session) # creazione directory per sessione directory = "./data/{:03d}".format(sess.id_session) if not os.path.exists(directory): os.makedirs(directory) sess.dir = directory print("session {} started".format(sess.id_session)) sess.run_session() session_counter += 1 # end collection print("end collection") utils.save_session_info(session_id=session_counter - 1)
def run_session(self): # init leap motion print("waiting for maps initialization...") while True: frame = self.controller.frame() image_l = frame.images[0] image_r = frame.images[1] if image_l.is_valid and image_r.is_valid: left_coordinates, left_coefficients = utils.convert_distortion_maps( image_l) right_coordinates, right_coefficients = utils.convert_distortion_maps( image_r) maps_initialized = True print('maps initialized') break else: print('\rinvalid leap motion frame', end="") # initialize video capture while True: cap = cv2.VideoCapture(1) print(cap) if cap: # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920.0) # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080.0) # print(cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) break else: print("\rerror rgb cam", end="") print("ready to go") list_of_gestures = [] for i in range(0, len(gestures)): if (i == len(gestures) - 1) and self.last_session is False: break utils.draw_ui(text="press S to start recording {0}: {1}".format( i, gestures[i])) while cv2.waitKey() != ord('s'): pass g = Gesture(i, gestures[i], self.controller, self.cam, self.q, self.listener, cap, self.id_session, maps_initialized, left_coord=left_coordinates, left_coeff=left_coefficients, right_coord=right_coordinates, right_coeff=right_coefficients) list_of_gestures.append(g.record()) # release videocapture cap.release() list_of_thread = [] for x in list_of_gestures: utils.draw_ui("Saving Session...") cv2.waitKey(1) list_of_thread.append(x.saveGestureData()) for th in list_of_thread: th.join() print("Recording session saved")