def __init__(self): self.setup(self.root_dir, self.setup_dir) if __name__ == '__main__': #hsrb_interface.Robot() rospy.init_node('readJoy_node', anonymous=True) cam = RGBD() c_o = Corl_Options() count = 0 while True: rgb_img = cam.read_color_data() count += 1 if(not rgb_img == None): img_cropped = rgb_img[c_o.OFFSET_X:c_o.OFFSET_X+c_o.WIDTH,c_o.OFFSET_Y:c_o.OFFSET_Y+c_o.HEIGHT,:] cv2.imshow('debug',img_cropped) cv2.waitKey(30)
class CardPicker(): def __init__(self): ''' Initialization class for a Policy Parameters ---------- yumi : An instianted yumi robot com : The common class for the robot cam : An open bincam class debug : bool A bool to indicate whether or not to display a training set point for debuging. ''' self.robot = hsrb_interface.Robot() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.cam = RGBD() self.com = COM() #self.com.go_to_initial_state(self.whole_body) self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.detector = Detector() self.joystick = JoyStick_X(self.com) self.suction = Suction(self.gp, self.cam, self.com.Options) #self.suction.stop() #thread.start_new_thread(self.ql.run,()) print "after thread" def find_mean_depth(self, d_img): ''' Evaluates the current policy and then executes the motion specified in the the common class ''' indx = np.nonzero(d_img) mean = np.mean(d_img[indx]) return def find_card(self): self.whole_body.move_to_neutral() self.whole_body.move_to_joint_positions({ 'arm_flex_joint': -1.57, 'head_tilt_joint': -.785 }) print self.check_card_found()[0] i = 0 while True: print "panning ", i self.whole_body.move_to_joint_positions({'head_pan_joint': .30}) i += 1 if self.check_card_found()[0]: break print "found card!" self.card_pick() def card_pick(self): while True: c_img = self.cam.read_color_data() c_img_c = np.copy(c_img) cv2.imshow('debug_true', c_img_c) cv2.waitKey(30) d_img = self.cam.read_depth_data() if (not c_img == None and not d_img == None): c_img_cropped, d_img = self.com.format_data( np.copy(c_img), d_img) data = self.detector.numpy_detector(np.copy(c_img_cropped)) cur_recording = self.joystick.get_record_actions_passive() self.suction.find_pick_region_net(data, c_img, d_img, c_img_c) if (cur_recording[0] < -0.1): card_found, cards = self.check_card_found() if (card_found): self.suction.execute_grasp(cards, self.whole_body) self.com.go_to_initial_state(self.whole_body) def check_card_found(self): # try: transforms = self.tl.getFrameStrings() cards = [] for transform in transforms: #print transform if 'card' in transform: print 'got here' f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame', transform, rospy.Time(0)) cards.append(transform) return True, cards # except: return False, []
class SiemensDemo(): def __init__(self): """ Class to run HSR lego task """ self.robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.side = 'BOTTOM' self.cam = RGBD() self.com = COM() self.com.go_to_initial_state(self.whole_body) self.grasp_count = 0 self.helper = Helper(cfg) self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('gripper')) self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('suction')) self.gm = GraspManipulator(self.gp, self.gripper, self.suction, self.whole_body, self.omni_base, self.tl) self.dl = DataLogger("stats_data/model_base", cfg.EVALUATE) self.web = Web_Labeler(cfg.NUM_ROBOTS_ON_NETWORK) model_path = 'main/output_inference_graph.pb' label_map_path = 'main/object-detection.pbtxt' self.det = Detector(model_path, label_map_path) print "after thread" def run_grasp(self, bbox, c_img, col_img, workspace_img, d_img): print("grasping a " + cfg.labels[bbox.label]) group = bbox.to_group(c_img, col_img) display_grasps(workspace_img, [group]) pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img) grasp_pose = self.gripper.get_grasp_pose(pose[0], pose[1], pose[2], rot, c_img=workspace_img.data) self.gm.execute_grasp(grasp_pose, class_num=bbox.label) def run_singulate(self, col_img, main_mask, to_singulate, d_img): print("singulating") singulator = Singulation(col_img, main_mask, [g.mask for g in to_singulate]) waypoints, rot, free_pix = singulator.get_singulation() singulator.display_singulation() self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True) def get_bboxes_from_net(self, path): output_dict, vis_util_image = self.det.predict(path) plt.savefig('debug_imgs/predictions.png') plt.close() plt.clf() plt.cla() img = cv2.imread(path) boxes = format_net_bboxes(output_dict, img.shape) return boxes, vis_util_image def get_bboxes_from_web(self, path): labels = self.web.label_image(path) objs = labels['objects'] bboxes = [Bbox(obj['box'], obj['class']) for obj in objs] return bboxes def determine_to_ask_for_help(self, bboxes, col_img): bboxes = deepcopy(bboxes) col_img = deepcopy(col_img) single_objs = find_isolated_objects_by_overlap(bboxes) if len(single_objs) > 0: return False else: isolated_exist = find_isolated_objects_by_distance(bboxes, col_img) return isolated_exist def get_bboxes(self, path, col_img): boxes, vis_util_image = self.get_bboxes_from_net(path) #low confidence or no objects if self.determine_to_ask_for_help(boxes, col_img): self.helper.asked = True self.helper.start_timer() boxes = self.get_bboxes_from_web(path) self.helper.stop_timer() self.dl.save_stat("duration", self.helper.duration) self.dl.save_stat("num_online", cfg.NUM_ROBOTS_ON_NETWORK) return boxes, vis_util_image def siemens_demo(self): self.gm.position_head() time.sleep(3) #making sure the robot is finished moving c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() while not (c_img is None or d_img is None): path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png" cv2.imwrite(path, c_img) time.sleep(2) #make sure new image is written before being read # print "\n new iteration" main_mask = crop_img(c_img, simple=True) col_img = ColorImage(c_img) workspace_img = col_img.mask_binary(main_mask) bboxes, vis_util_image = self.get_bboxes(path, col_img) if len(bboxes) == 0: print("Cleared the workspace") print("Add more objects, then resume") IPython.embed() else: box_viz = draw_boxes(bboxes, c_img) cv2.imwrite("debug_imgs/box.png", box_viz) single_objs = find_isolated_objects_by_overlap(bboxes) grasp_sucess = 1.0 if len(single_objs) == 0: single_objs = find_isolated_objects_by_distance( bboxes, col_img) if len(single_objs) > 0: to_grasp = select_first_obj(single_objs) singulation_time = 0.0 self.run_grasp(to_grasp, c_img, col_img, workspace_img, d_img) grasp_sucess = self.dl.record_success( "grasp", other_data=[c_img, vis_util_image, d_img]) else: #for accurate singulation should have bboxes for all fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes] groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs] groups = merge_groups(groups, cfg.DIST_TOL) self.run_singulate(col_img, main_mask, groups, d_img) sing_start = time.time() singulation_success = self.dl.record_success( "singulation", other_data=[c_img, vis_util_image, d_img]) singulation_time = time.time() - sing_start if cfg.EVALUATE: reward = self.helper.get_reward(grasp_sucess, singulation_time) self.dl.record_reward(reward) #reset to start self.whole_body.move_to_go() self.gm.move_to_home() self.gm.position_head() time.sleep(3) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data()
# make save dir if not os.path.exists(SAVE_PATH): os.makedirs(SAVE_PATH) # Orient the robot appropriately. make sure it is starting about a foot away from the long side of the bed, facing parallel to the bed robot = hsrb_interface.Robot() whole_body = robot.get('whole_body') whole_body.move_to_go() whole_body.move_to_joint_positions({'arm_flex_joint': -np.pi / 16.0}) whole_body.move_to_joint_positions({'head_pan_joint': np.pi / 2.0}) whole_body.move_to_joint_positions({'head_tilt_joint': -np.pi / 4.0}) whole_body.move_to_joint_positions({'arm_lift_joint': 0.120}) # start collecting data rollout_num = 1 # do about 120 rollouts for decent dataset camera = RGBD() labels = list() userinput = 'a' while userinput != 'q': print("rollout", rollout_num) # generate random numbers for top/bottom, success/failure of rollout top = np.random.randint(2) success = np.random.randint(2) labels.append(str(success)) # classification labels print("Top (1)/Bottom (0): ", top) print("Success (1)/Failure (0): ", success) userinput = raw_input( "press enter to take the picture, or enter q to quit: ") if userinput != 'q': c_img = camera.read_color_data() d_img = camera.read_depth_data()
class LegoDemo(): def __init__(self): """ Class to run HSR lego task """ self.robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.side = 'BOTTOM' self.cam = RGBD() self.com = COM() # if not DEBUG: self.com.go_to_initial_state(self.whole_body) # self.tt = TableTop() # self.tt.find_table(self.robot) self.grasp_count = 0 self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('gripper')) self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('suction')) self.gm = GraspManipulator(self.gp, self.gripper, self.suction, self.whole_body, self.omni_base) self.collision_world = hsrb_interface.collision_world.CollisionWorld( "global_collision_world") self.collision_world.remove_all() self.collision_world.add_box(x=.8, y=.9, z=0.5, pose=geometry.pose(y=1.4, z=0.15), frame_id='map') print "after thread" def find_mean_depth(self, d_img): """ Evaluates the current policy and then executes the motion specified in the the common class """ indx = np.nonzero(d_img) mean = np.mean(d_img[indx]) return def get_success(self, action): """ Parameters ---------- action : str action to query success of Returns ------- :str: y or n """ if cfg.COLLECT_DATA and cfg.QUERY: print("Was " + action + " successful? (y or n)") succ = raw_input() while not (succ == "y" or succ == "n"): print("Enter only y or n to indicate success of " + action) succ = raw_input() return succ else: return "no data queried" def get_int(self): """ Returns ------- int """ if cfg.COLLECT_DATA and cfg.QUERY: print("How many legos are on the table?") inp = raw_input() while not inp.isdigit(): print("Enter an integer.") inp = raw_input() return inp else: return "no data queried" def get_str(self): """ Returns ------- :str: arbitary value """ if cfg.COLLECT_DATA and cfg.QUERY: print( "Any notes? (pushed legos off table, ran into table, etc.) (no if none)" ) inp = raw_input() return inp else: return "no data queried" def run_singulation(self, col_img, main_mask, d_img, to_singulate): """ Parameters ---------- col_img : `ColorImage` main_mask : `BinaryImage` d_img : 'DepthImage' to_singulate : list of `Group` """ print("SINGULATING") self.dm.update_traj("action", "singulate") a = time.time() singulator = Singulation(col_img, main_mask, [g.mask for g in to_singulate]) waypoints, rot, free_pix = singulator.get_singulation() self.dm.update_traj("compute_singulate_time", time.time() - a) singulator.display_singulation() self.dm.update_traj("singulate_info", (waypoints, rot, free_pix)) self.dm.update_traj("singulate_successes", "crashed") self.dm.update_traj("execute_singulate_time", "crashed") self.dm.overwrite_traj() a = time.time() self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True) self.dm.update_traj("execute_singulate_time", time.time() - a) self.dm.update_traj("singulate_success", self.get_success("singulation")) self.dm.overwrite_traj() def run_grasps(self, workspace_img, to_grasp): """ Parameters ---------- workspace_img : `ColorImage` to_grasp : list of tuple of: (`Group`, grasp_pose, suction_pose, class_num) """ print("GRASPING") #impose ordering on grasps (by closest/highest y first) to_grasp.sort(key=lambda g: -1 * g[0].cm[0]) self.dm.update_traj("action", "grasp") self.dm.update_traj("all_grasps_info", [(c[0].cm, c[0].dir, c[0].mask.data, c[3]) for c in to_grasp]) if not cfg.CHAIN_GRASPS: to_grasp = to_grasp[0:1] self.dm.update_traj("run_grasps_info", [(c[0].cm, c[0].dir, c[0].mask.data, c[3]) for c in to_grasp]) display_grasps(workspace_img, [g[0] for g in to_grasp]) successes = ["not attempted" for i in range(len(to_grasp))] correct_colors = ["not attempted" for i in range(len(to_grasp))] times = ["not attempted" for i in range(len(to_grasp))] for i in range(len(to_grasp)): print "grasping", to_grasp[i][1] successes[i] = "crashed" correct_colors[i] = "crashed" times[i] = "crashed" self.dm.update_traj("grasp_successes", successes) self.dm.update_traj("grasp_colors", correct_colors) self.dm.update_traj("execute_grasp_times", times) self.dm.overwrite_traj() a = time.time() self.gm.execute_grasp(to_grasp[i][1], class_num=to_grasp[i][3]) # self.gm.execute_suction(to_grasp[i][2], to_grasp[i][3]) times[i] = time.time() - a successes[i] = self.get_success("grasp") correct_colors[i] = self.get_success("correct color") self.dm.update_traj("grasp_successes", successes) self.dm.update_traj("grasp_colors", correct_colors) self.dm.update_traj("execute_grasp_times", times) self.dm.overwrite_traj() def clusters_to_actions(self, groups, col_img, d_img, workspace_img): """ Parameters ---------- groups : list of `Group` col_img : `ColorImage` d_img : `DepthImage` workspace_img : `ColorImage` Returns ------- list of tuples of form: (`Group`, grasp_pose, suction_pose, class_num) list of `Group` """ find_grasps_times = [] compute_grasps_times = [] find_hsv_times = [] to_singulate = [] to_grasp = [] for group in groups: a = time.time() inner_groups = grasps_within_pile(col_img.mask_binary(group.mask)) find_grasps_times.append(time.time() - a) if len(inner_groups) == 0: to_singulate.append(group) else: for in_group in inner_groups: a = time.time() pose, rot = self.gm.compute_grasp(in_group.cm, in_group.dir, d_img) grasp_pose = self.gripper.get_grasp_pose( pose[0], pose[1], pose[2], rot, c_img=workspace_img.data) suction_pose = self.suction.get_grasp_pose( pose[0], pose[1], pose[2], rot, c_img=workspace_img.data) compute_grasps_times.append(time.time() - a) a = time.time() class_num = hsv_classify(col_img.mask_binary( in_group.mask)) find_hsv_times.append(time.time() - a) to_grasp.append( (in_group, grasp_pose, suction_pose, class_num)) self.dm.update_traj("compute_grasps_times", compute_grasps_times) self.dm.update_traj("find_grasps_times", find_grasps_times) self.dm.update_traj("find_hsv_times", find_hsv_times) return to_grasp, to_singulate def lego_demo(self): if not cfg.COLLECT_DATA: print("WARNING: NO DATA IS BEING COLLECTED") print("TO COLLECT DATA, CHANGE COLLECT_DATA IN config_tpc") self.dm = DataManager() self.get_new_grasp = True # DEBUG = False # if not DEBUG: # self.gm.position_head() time.sleep(3) #making sure the robot is finished moving c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() while not (c_img is None or d_img is None): print "\n new iteration" main_mask = crop_img(c_img, use_preset=True, arc=False) col_img = ColorImage(c_img) workspace_img = col_img.mask_binary(main_mask) self.dm.clear_traj() self.dm.update_traj("c_img", c_img) self.dm.update_traj("d_img", d_img) self.dm.update_traj("stop_condition", "crash") cv2.imwrite("debug_imgs/c_img.png", c_img) self.dm.update_traj("crop", workspace_img.data) a = time.time() groups = run_connected_components(workspace_img, viz=True) self.dm.update_traj("connected_components_time", time.time() - a) self.dm.update_traj("num_legos", self.get_int()) self.dm.append_traj() print "num masses", len(groups) if len(groups) == 0: print("cleared workspace") self.dm.update_traj("stop_condition", self.get_success("clearing table")) self.dm.overwrite_traj() time.sleep(5) break to_grasp, to_singulate = self.clusters_to_actions( groups, col_img, d_img, workspace_img) self.whole_body.collision_world = self.collision_world if len(to_grasp) > 0: self.run_grasps(workspace_img, to_grasp) else: self.run_singulation(col_img, main_mask, d_img, to_singulate) self.dm.update_traj("notes", self.get_str()) #reset to start self.whole_body.move_to_go() self.gm.move_to_home() self.gm.position_head() time.sleep(8) #making sure the robot is finished moving c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() self.dm.update_traj("stop_condition", "none") self.dm.update_traj("c_img_result", c_img) self.dm.update_traj("d_img_result", d_img) self.dm.overwrite_traj()
class BedMaker(): def __init__(self): ''' Initialization class for a Policy Parameters ---------- yumi : An instianted yumi robot com : The common class for the robot cam : An open bincam class debug : bool A bool to indicate whether or not to display a training set point for debuging. ''' self.robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.side = 'BOTTOM' self.cam = RGBD() self.com = COM() # if cfg.USE_WEB_INTERFACE: # self.wl = Web_Labeler() # else: # self.wl = Python_Labeler(cam = self.cam) self.com.go_to_initial_state(self.whole_body) self.tt = TableTop() self.tt.find_table(self.robot) self.grasp_count = 0 self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.gripper = Lego_Gripper(self.gp,self.cam,self.com.Options,self.robot.get('gripper')) #self.test_current_point() #thread.start_new_thread(self.ql.run,()) print "after thread" def find_mean_depth(self,d_img): ''' Evaluates the current policy and then executes the motion specified in the the common class ''' indx = np.nonzero(d_img) mean = np.mean(d_img[indx]) return def bed_make(self): self.rollout_data = [] self.get_new_grasp = True self.position_head() while True: time.sleep(2) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() if(not c_img == None and not d_img == None): if self.get_new_grasp: c_m, dirs = run_connected_components(c_img) draw(c_img,c_m,dirs) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() self.gripper.find_pick_region_cc(c_m[0],dirs[0],c_img,d_img,self.grasp_count) pick_found,bed_pick = self.check_card_found() self.gripper.execute_grasp(bed_pick,self.whole_body,'head_down') self.grasp_count += 1 self.whole_body.move_to_go() self.tt.move_to_pose(self.omni_base,'lower_start') time.sleep(1) self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.8}) def check_card_found(self): # try: transforms = self.tl.getFrameStrings() cards = [] try: for transform in transforms: print transform current_grasp = 'bed_'+str(self.grasp_count) if current_grasp in transform: print 'got here' f_p = self.tl.lookupTransform('map',transform, rospy.Time(0)) cards.append(transform) except: rospy.logerr('bed pick not found yet') return True, cards def position_head(self): self.tt.move_to_pose(self.omni_base,'lower_start') self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.8})
class BedMaker(): def __init__(self, args): """For data collection of bed-making, NOT the deployment. Assumes we roll out the robot's policy via code (not via human touch). This is the 'slower' way where we have the python interface that the human clicks on to indicate grasping points. Good news is, our deployment code is probably going to be similar to this. For joystick: you only need it plugged in for the initial state sampler, which (at the moment) we are not even using. """ self.robot = robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = robot.get('omni_base') self.whole_body = robot.get('whole_body') self.cam = RGBD() self.com = COM() self.wl = Python_Labeler(cam=self.cam) # View mode: STANDARD (the way I was doing earlier), CLOSE (the way they want). self.view_mode = cfg.VIEW_MODE # Set up initial state, table, etc. self.com.go_to_initial_state(self.whole_body) self.tt = TableTop() # For now, a workaround. Ugly but it should do the job ... #self.tt.find_table(robot) self.tt.make_fake_ar() self.tt.find_table_workaround(robot) #self.ins = InitialSampler(self.cam) self.side = 'BOTTOM' self.grasp_count = 0 # Bells and whistles; note the 'success check' to check if transitioning self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options, robot.get('gripper')) self.sc = Success_Check(self.whole_body, self.tt, self.cam, self.omni_base) time.sleep(4) print( "Finished creating BedMaker()! Get the bed set up and run bed-making!" ) if cfg.INS_SAMPLE: print("TODO: we don't have sampling code here.") # When we start, spin this so we can check the frames. Then un-comment, # etc. It's the current hack we have to get around crummy AR marker detection. if args.phase == 1: print("Now doing rospy.spin() because phase = 1.") rospy.spin() def bed_make(self): """Runs the pipeline for data collection. You can run this for multiple bed-making trajectories. For now, though, assume one call to this means one trajectory. """ self.rollout_data = [] self.get_new_grasp = True # I think, creates red line in GUI where we adjust the bed to match it. # But in general we better fix our sampler before doing this for real. # Don't forget to press 'B' on the joystick to get past this screen. if cfg.INS_SAMPLE: u_c, d_c = self.ins.sample_initial_state() self.rollout_data.append([u_c, d_c]) while True: c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() if (not c_img.all() == None and not d_img.all() == None): if self.get_new_grasp: self.position_head() # Human supervisor labels. data = dictionary of relevant info data = self.wl.label_image(c_img) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() self.add_data_point(c_img, d_img, data, self.side, 'grasp') # Broadcasts grasp pose self.gripper.find_pick_region_labeler( data, c_img, d_img, self.grasp_count) # Execute the grasp and check for success. But if VIEW_MODE is # close, better to reset to a 'nicer' position for base movement. pick_found, bed_pick = self.check_card_found() if self.side == "BOTTOM": self.whole_body.move_to_go() self.tt.move_to_pose(self.omni_base, 'lower_start') self.gripper.execute_grasp(bed_pick, self.whole_body, 'head_down') else: self.whole_body.move_to_go() self.tt.move_to_pose(self.omni_base, 'top_mid') self.gripper.execute_grasp(bed_pick, self.whole_body, 'head_up') self.check_success_state() def check_success_state(self): """ Checks whether a single grasp in a bed-making trajectory succeeded. Depends on which side of the bed the HSR is at. Invokes human supervisor and transitions the HSR if successful. """ if self.side == "BOTTOM": success, data = self.sc.check_bottom_success(self.wl) else: success, data = self.sc.check_top_success(self.wl) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() self.add_data_point(c_img, d_img, data, self.side, 'success') print("WAS SUCCESFUL: {}".format(success)) # Handle transitioning to different side if success: if self.side == "BOTTOM": self.transition_to_top() else: self.transition_to_start() self.update_side() self.grasp_count += 1 self.get_new_grasp = True else: self.grasp_count += 1 # If grasp failure, invokes finding region again and add new data self.gripper.find_pick_region_labeler(data, c_img, d_img, self.grasp_count) self.add_data_point(c_img, d_img, data, self.side, 'grasp') self.get_new_grasp = False def update_side(self): """TODO: extend to multiple side switches?""" if self.side == "BOTTOM": self.side = "TOP" def transition_to_top(self): """Transition to top (not bottom).""" self.move_to_top_side() def transition_to_start(self): """Transition to start=bottom, save rollout data, exit program. Saves to a supervisor's directory since we're using a supervisor. """ self.com.save_rollout(self.rollout_data) self.move_to_start() sys.exit() def add_data_point(self, c_img, d_img, data, side, typ, pose=None): """Adds a dictionary to the `rollout_data` list.""" grasp_point = {} grasp_point['c_img'] = c_img grasp_point['d_img'] = d_img if pose == None: label = data['objects'][0]['box'] pose = [(label[2] - label[0]) / 2.0 + label[0], (label[3] - label[1]) / 2.0 + label[1]] grasp_point['pose'] = pose grasp_point['class'] = data['objects'][0]['class'] grasp_point['side'] = side grasp_point['type'] = typ self.rollout_data.append(grasp_point) def position_head(self): """Position the head for a grasp attempt. After playing around a bit, I think `head_tilt_joint` should be set last. """ self.whole_body.move_to_go() if self.side == "BOTTOM": self.tt.move_to_pose(self.omni_base, 'lower_start_tmp') self.whole_body.move_to_joint_positions( {'arm_flex_joint': -np.pi / 16.0}) self.whole_body.move_to_joint_positions( {'head_pan_joint': np.pi / 2.0}) self.whole_body.move_to_joint_positions({'arm_lift_joint': 0.120}) self.whole_body.move_to_joint_positions( {'head_tilt_joint': -np.pi / 4.0}) def move_to_top_side(self): """Assumes we're at the bottom and want to go to the top.""" self.whole_body.move_to_go() self.tt.move_to_pose(self.omni_base, 'right_down') self.tt.move_to_pose(self.omni_base, 'right_mid') self.tt.move_to_pose(self.omni_base, 'right_up') self.tt.move_to_pose(self.omni_base, 'top_mid_tmp') def move_to_start(self): """Assumes we're at the top and we go back to the start.""" self.whole_body.move_to_go() self.tt.move_to_pose(self.omni_base, 'right_up') self.tt.move_to_pose(self.omni_base, 'right_mid') self.tt.move_to_pose(self.omni_base, 'right_down') self.tt.move_to_pose(self.omni_base, 'lower_mid') def check_card_found(self): """Looks up the pose for where the HSR's hand should go to.""" transforms = self.tl.getFrameStrings() cards = [] try: for transform in transforms: current_grasp = 'bed_' + str(self.grasp_count) if current_grasp in transform: print('found {}'.format(current_grasp)) f_p = self.tl.lookupTransform('map', transform, rospy.Time(0)) cards.append(transform) except: rospy.logerr('bed pick not found yet') return True, cards
class BedMaker(): def __init__(self): ''' Initialization class for a Policy Parameters ---------- yumi : An instianted yumi robot com : The common class for the robot cam : An open bincam class debug : bool A bool to indicate whether or not to display a training set point for debuging. ''' self.robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') #PARAMETERS TO CHANGE self.side = 'TOP' self.r_count = 0 self.grasp_count = 0 self.success_count = 0 self.true_count = 0 self.grasp = True self.r_count = self.get_rollout_number() self.cam = RGBD() self.com = COM() self.joystick = JoyStick_X(self.com) if cfg.USE_WEB_INTERFACE: self.wl = Web_Labeler() else: self.wl = Python_Labeler(cam=self.cam) self.com.go_to_initial_state(self.whole_body) self.tt = TableTop() self.tt.find_table(self.robot) self.position_head() self.br = tf.TransformBroadcaster() self.tl = TransformListener() #self.test_current_point() time.sleep(4) #thread.start_new_thread(self.ql.run,()) print "after thread" def get_rollout_number(self): if self.side == "BOTTOM": rollouts = glob.glob(cfg.FAST_PATH + 'b_grasp/*.png') else: rollouts = glob.glob(cfg.FAST_PATH + 't_grasp/*.png') r_nums = [] for r in rollouts: a = r[56:] i = a.find('_') r_num = int(a[:i]) r_nums.append(r_num) return max(r_nums) + 1 def position_head(self): if self.side == "TOP": self.tt.move_to_pose(self.omni_base, 'right_down') self.tt.move_to_pose(self.omni_base, 'right_up') self.tt.move_to_pose(self.omni_base, 'top_mid') self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8}) elif self.side == "BOTTOM": self.tt.move_to_pose(self.omni_base, 'lower_start') self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8}) def collect_data_bed(self): while True: c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() cv2.imshow('video_feed', c_img) cv2.waitKey(30) cur_recording = self.joystick.get_record_actions_passive() if (cur_recording[0] < -0.1 and self.true_count % 20 == 0): print "PHOTO SNAPPED " self.save_image(c_img) if self.grasp: self.grasp_count += 1 self.grasp = False else: self.success_count += 1 self.grasp = True if (cur_recording[1] < -0.1 and self.true_count % 20 == 0): print "ROLLOUT DONE " self.r_count += 1 self.grasp_count = 0 self.success_count = 0 self.grasp = True self.true_count += 1 def save_image(self, c_img): if self.side == "BOTTOM": if self.grasp: cv2.imwrite( cfg.FAST_PATH + 'b_grasp/frame_' + str(self.r_count) + '_' + str(self.grasp_count) + '.png', c_img) else: cv2.imwrite( cfg.FAST_PATH + 'b_success/frame_' + str(self.r_count) + '_' + str(self.success_count) + '.png', c_img) else: if self.grasp: cv2.imwrite( cfg.FAST_PATH + 't_grasp/frame_' + str(self.r_count) + '_' + str(self.grasp_count) + '.png', c_img) else: cv2.imwrite( cfg.FAST_PATH + 't_success/frame_' + str(self.r_count) + '_' + str(self.success_count) + '.png', c_img)
import rospy from il_ros_hsr.core.grasp_planner import GraspPlanner from il_ros_hsr.p_pi.bed_making.com import Bed_COM as COM import sys sys.path.append('/home/autolab/Workspaces/michael_working/fast_grasp_detect/') from skvideo.io import vwrite from il_ros_hsr.core.joystick_X import JoyStick_X if __name__ == '__main__': robot = hsrb_interface.Robot() videos_color = [] cam = RGBD() jy = JoyStick_X() while True: time.sleep(0.05) img = cam.read_color_data() if (not img == None): cv2.imshow('debug2', img) cv2.waitKey(30) img = img[:, :, (2, 1, 0)] videos_color.append(img) cur_recording = jy.get_record_actions_passive() if (cur_recording[0] < -0.1):
if __name__ == '__main__': robot = hsrb_interface.Robot() Options = options() yolo_detect = YoloDetect(Options) com = COM() omni_base = robot.get('omni_base') whole_body = robot.get('whole_body') gripper = robot.get('gripper') #com.go_to_initial_state(whole_body,gripper) cam = RGBD() time.sleep(3) while True: c_img = cam.read_color_data() d_img = cam.read_depth_data() c_img,d_img = com.format_data(c_img,d_img) yolo_detect.get_detect(c_img,d_img) yolo_detect.broadcast_poses()
def __init__(self): """ For faster data collection where we manually simulate it. We move with our hands. This will give us the large datasets we need. Supports both grasping and success net data collection. If doing the grasping, DON'T MAKE IT A SUCCESS CASE where the blanket is all the way over the corner. That way we can use the images for both grasping and as failure cases for the success net. For the success net data collection, collect data at roughly a 5:1 ratio of successes:failures, and make failures the borderline cases. Then we borrow data from the grasping network to make it 5:5 or 1:1 for the actual success net training process (use another script for forming the data). We use the keys on the joystick to indicate the success/failure class. """ makedirs() self.robot = robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.cam = RGBD() self.com = COM() self.wl = Python_Labeler(cam=self.cam) # ---------------------------------------------------------------------- # PARAMETERS TO CHANGE (well, really the 'side' and 'grasp' only). # We choose a fixed side and collect data from there, no switching. # Automatically saves based on `r_count` and counting the saved files. # `self.grasp` remains FIXED in the code, so we're either only # collecting grasp or only collecting success images. # ---------------------------------------------------------------------- self.side = 'BOTTOM' # CHANGE AS NEEDED self.grasp = False # CHANGE AS NEEDED self.grasp_count = 0 self.success_count = 0 self.true_count = 0 self.r_count = self.get_rollout_number() self.joystick = JoyStick_X(self.com) print("NOTE: grasp={} (success={}), side: {}, rollout num: {}".format( self.grasp, not self.grasp, self.side, self.r_count)) print("Press X for any SUCCESS (class 0), Y for FAILURES (class 1).") # Set up initial state, table, etc. self.com.go_to_initial_state(self.whole_body) self.tt = TableTop() # For now, a workaround. Ugly but it should do the job ... #self.tt.find_table(robot) self.tt.make_fake_ar() self.tt.find_table_workaround(robot) self.br = tf.TransformBroadcaster() self.tl = TransformListener() time.sleep(4) # When we start, spin this so we can check the frames. Then un-comment, # etc. It's the current hack we have to get around crummy AR marker detection. #rospy.spin() # THEN we position the head since that involves moving the _base_. self.position_head()
class BedMaker(): def __init__(self): """ For faster data collection where we manually simulate it. We move with our hands. This will give us the large datasets we need. Supports both grasping and success net data collection. If doing the grasping, DON'T MAKE IT A SUCCESS CASE where the blanket is all the way over the corner. That way we can use the images for both grasping and as failure cases for the success net. For the success net data collection, collect data at roughly a 5:1 ratio of successes:failures, and make failures the borderline cases. Then we borrow data from the grasping network to make it 5:5 or 1:1 for the actual success net training process (use another script for forming the data). We use the keys on the joystick to indicate the success/failure class. """ makedirs() self.robot = robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.cam = RGBD() self.com = COM() self.wl = Python_Labeler(cam=self.cam) # ---------------------------------------------------------------------- # PARAMETERS TO CHANGE (well, really the 'side' and 'grasp' only). # We choose a fixed side and collect data from there, no switching. # Automatically saves based on `r_count` and counting the saved files. # `self.grasp` remains FIXED in the code, so we're either only # collecting grasp or only collecting success images. # ---------------------------------------------------------------------- self.side = 'BOTTOM' # CHANGE AS NEEDED self.grasp = False # CHANGE AS NEEDED self.grasp_count = 0 self.success_count = 0 self.true_count = 0 self.r_count = self.get_rollout_number() self.joystick = JoyStick_X(self.com) print("NOTE: grasp={} (success={}), side: {}, rollout num: {}".format( self.grasp, not self.grasp, self.side, self.r_count)) print("Press X for any SUCCESS (class 0), Y for FAILURES (class 1).") # Set up initial state, table, etc. self.com.go_to_initial_state(self.whole_body) self.tt = TableTop() # For now, a workaround. Ugly but it should do the job ... #self.tt.find_table(robot) self.tt.make_fake_ar() self.tt.find_table_workaround(robot) self.br = tf.TransformBroadcaster() self.tl = TransformListener() time.sleep(4) # When we start, spin this so we can check the frames. Then un-comment, # etc. It's the current hack we have to get around crummy AR marker detection. #rospy.spin() # THEN we position the head since that involves moving the _base_. self.position_head() def get_rollout_number(self): """Had to modify this a bit from Michael's code. Test+see if it works. For now, let's save based on how many `data.pkl` files we have in the appropriate directory. """ if self.side == "BOTTOM": nextdir = 'b_grasp' if not self.grasp: nextdir = 'b_success' rollouts = sorted([ x for x in os.listdir(join(FAST_PATH, nextdir)) if 'data' in x and 'pkl' in x ]) else: nextdir = 't_grasp' if not self.grasp: nextdir = 't_success' rollouts = sorted([ x for x in os.listdir(join(FAST_PATH, nextdir)) if 'data' in x and 'pkl' in x ]) return len(rollouts) def position_head(self): """Ah, I see, we can go straight to the top. Whew. It's new code reflecting the different poses and HSR joints: But, see important note below about commenting out four lines ... """ self.whole_body.move_to_go() if self.side == "BOTTOM": self.tt.move_to_pose(self.omni_base, 'lower_start_tmp') else: self.tt.move_to_pose(self.omni_base, 'right_down') self.tt.move_to_pose(self.omni_base, 'right_up') self.tt.move_to_pose(self.omni_base, 'top_mid_tmp') # NOTE: If robot is already at the top, I don't want to adjust position. # Thus, comment out the three lines and the preceding `else` statement. self.whole_body.move_to_joint_positions( {'arm_flex_joint': -np.pi / 16.0}) self.whole_body.move_to_joint_positions( {'head_pan_joint': np.pi / 2.0}) self.whole_body.move_to_joint_positions({'arm_lift_joint': 0.120}) self.whole_body.move_to_joint_positions( {'head_tilt_joint': -np.pi / 4.0}) # -np.pi/36.0}) def collect_data_grasp_only(self): """Collect data for the grasping network only, like H's method. Actually, some of these images should likely be part of the success network training, where the 'success=False' because I don't think I collected data here that was considered a 'success=True' ... """ data = [] assert self.grasp rc = str(self.r_count).zfill(3) while True: c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() # Continually show the camera image on screen and wait for user. # Doing `k=cv2.waitKey(33)` and `print(k)` results in -1 except for # when we press the joystick in a certain configuration. cv2.imshow('video_feed', c_img) cv2.waitKey(30) # Here's what they mean. Y is top, and going counterclockwise: # Y: [ 1, 0] # X: [-1, 0] # this is what we want for image collection # A: [ 0, 1] # B: [ 0, -1] # use to terminate the rollout # # There is also a bit of a delay embedded, i.e., repeated clicking # of `X` won't save images until some time has passed. Good! It is # also necessary to press for a few milliseconds (and not just tap). cur_recording = self.joystick.get_record_actions_passive() if (cur_recording[0] < -0.1 and self.true_count % 20 == 0): print( "PHOTO SNAPPED (cur_recording: {})".format(cur_recording)) self.save_image(c_img, d_img) self.grasp_count += 1 # Add to dictionary info we want, including target pose. # Also add 'type' key since data augmentation code uses it. pose = red_contour(c_img) info = { 'c_img': c_img, 'd_img': d_img, 'pose': pose, 'type': grasp } data.append(info) print(" image {}, pose: {}".format(len(data), pose)) # -------------------------------------------------------------- # We better save each time since we might get a failure to # detect, thus losing some data. We overwrite existing saved # files, which is fine since it's the current rollout `r_count`. # Since we detect pose before this, if the pose isn't detected, # we don't save. Good. # -------------------------------------------------------------- if self.side == 'BOTTOM': save_path = join(FAST_PATH, 'b_grasp', 'data_{}.pkl'.format(rc)) else: save_path = join(FAST_PATH, 't_grasp', 'data_{}.pkl'.format(rc)) with open(save_path, 'w') as f: pickle.dump(data, f) # Kill the script and re-position HSR to get diversity in camera views. if (cur_recording[1] < -0.1 and self.true_count % 20 == 0): print("ROLLOUT DONE (cur_recording: {})".format(cur_recording)) print("Length is {}. See our saved pickle files.".format( len(data))) sys.exit() # Necessary, otherwise we'd save 3-4 times per click. self.true_count += 1 def collect_data_success_only(self): """Collect data for the success network. Should be more emphasis on the success cases (not failures) because the grasing network data can supplement the failures. Focus on _borderline_ failures in this method. Recall that 0 = successful grasp, 1 = failed grasp. SAVE AND EXIT FREQUENTLY, perhaps after every 15-20 images. It's easy to make a mistake with the class label, so better to exit early often. """ data = [] assert not self.grasp rc = str(self.r_count).zfill(3) while True: c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() cv2.imshow('video_feed', c_img) cv2.waitKey(30) cur_recording = self.joystick.get_record_actions_passive() # Joystick controllers. Y is top, and going counterclockwise: # Y: [ 1, 0] # FAILURE images (class index 1) # X: [-1, 0] # SUCCESS images (class index 0) # A: [ 0, 1] # B: [ 0, -1] # terminate data collection # ------------------------------------------------------------------ if (cur_recording[0] < -0.1 or cur_recording[0] > 0.1) and self.true_count % 20 == 0: print( "PHOTO SNAPPED (cur_recording: {})".format(cur_recording)) if cur_recording[0] < -0.1: s_class = 0 elif cur_recording[0] > 0.1: s_class = 1 else: raise ValueError(cur_recording) self.save_image(c_img, d_img, success_class=s_class) self.success_count += 1 # Add to dictionary info we want, including the class. info = { 'c_img': c_img, 'd_img': d_img, 'class': s_class, 'type': 'success' } data.append(info) print(" image {}, class: {}".format(len(data), s_class)) if self.side == 'BOTTOM': save_path = join(FAST_PATH, 'b_success', 'data_{}.pkl'.format(rc)) else: save_path = join(FAST_PATH, 't_success', 'data_{}.pkl'.format(rc)) with open(save_path, 'w') as f: pickle.dump(data, f) # Kill the script and re-position HSR to get diversity in camera views. if (cur_recording[1] < -0.1 and self.true_count % 20 == 0): print("ROLLOUT DONE (cur_recording: {})".format(cur_recording)) print("Length is {}. See our saved pickle files.".format( len(data))) sys.exit() # Necessary, otherwise we'd save 3-4 times per click. self.true_count += 1 def save_image(self, c_img, d_img, success_class=None): """Save images. Don't forget to process depth images. For now I'm using a tuned cutoff like 1400, at least to _visualize_. NOTE: since the cutoff for turning depth images into black may change, it would be better to save the original d_img in a dictionary. Don't use cv2.imwrite() as I know from experience that it won't work as desired. """ rc = str(self.r_count).zfill(3) f_rc_grasp = 'frame_{}_{}.png'.format(rc, str(self.grasp_count).zfill(2)) f_rc_success = 'frame_{}_{}_class_{}.png'.format( rc, str(self.success_count).zfill(2), success_class) if np.isnan(np.sum(d_img)): cv2.patchNaNs(d_img, 0.0) d_img = depth_to_net_dim(d_img, cutoff=1400) # for visualization only if self.side == "BOTTOM": if self.grasp: pth1 = join(FAST_PATH, 'b_grasp', 'rgb_' + f_rc_grasp) pth2 = join(FAST_PATH, 'b_grasp', 'depth_' + f_rc_grasp) else: pth1 = join(FAST_PATH, 'b_success', 'rgb_' + f_rc_success) pth2 = join(FAST_PATH, 'b_success', 'depth_' + f_rc_success) else: if self.grasp: pth1 = join(FAST_PATH, 't_grasp', 'rgb_' + f_rc_grasp) pth2 = join(FAST_PATH, 't_grasp', 'depth_' + f_rc_grasp) else: pth1 = join(FAST_PATH, 't_success', 'rgb_' + f_rc_success) pth2 = join(FAST_PATH, 't_success', 'depth_' + f_rc_success) cv2.imwrite(pth1, c_img) cv2.imwrite(pth2, d_img)
class BedMaker(): def __init__(self): ''' Initialization class for a Policy Parameters ---------- yumi : An instianted yumi robot com : The common class for the robot cam : An open bincam class debug : bool A bool to indicate whether or not to display a training set point for debuging. ''' self.robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.side = 'BOTTOM' self.cam = RGBD() self.com = COM() if cfg.USE_WEB_INTERFACE: self.wl = Web_Labeler() else: self.wl = Python_Labeler(cam=self.cam) self.com.go_to_initial_state(self.whole_body) self.tt = TableTop() self.tt.find_table(self.robot) self.grasp_count = 0 self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.ins = InitialSampler(self.cam) self.gp = GraspPlanner() self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('gripper')) self.sc = Success_Check(self.whole_body, self.tt, self.cam, self.omni_base) self.ss = Self_Supervised(self.cam) #self.test_current_point() time.sleep(4) #thread.start_new_thread(self.ql.run,()) print "after thread" def find_mean_depth(self, d_img): ''' Evaluates the current policy and then executes the motion specified in the the common class ''' indx = np.nonzero(d_img) mean = np.mean(d_img[indx]) return def bed_make(self): self.rollout_data = [] self.get_new_grasp = True if cfg.INS_SAMPLE: u_c, d_c = self.ins.sample_initial_state() self.rollout_data.append([u_c, d_c]) while True: c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() if (not c_img == None and not d_img == None): if self.get_new_grasp: self.position_head() data = self.wl.label_image(c_img) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() self.add_data_point(c_img, d_img, data, self.side, 'grasp') self.gripper.find_pick_region_labeler( data, c_img, d_img, self.grasp_count) if cfg.SS_LEARN: grasp_points = self.ss.learn(self.whole_body, self.grasp_count) self.add_ss_data(grasp_points, data, self.side, 'grasp') pick_found, bed_pick = self.check_card_found() if self.side == "BOTTOM": self.gripper.execute_grasp(bed_pick, self.whole_body, 'head_down') else: self.gripper.execute_grasp(bed_pick, self.whole_body, 'head_up') self.check_success_state() def check_success_state(self): if self.side == "BOTTOM": success, data = self.sc.check_bottom_success(self.wl) else: success, data = self.sc.check_top_success(self.wl) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() self.add_data_point(c_img, d_img, data, self.side, 'success') print "WAS SUCCESFUL: " print success if (success): if cfg.SS_LEARN: grasp_points = self.ss.learn(self.whole_body, self.grasp_count) self.add_ss_data(grasp_points, data, self.side, 'success') if self.side == "BOTTOM": self.transition_to_top() else: self.transition_to_start() self.update_side() self.grasp_count += 1 self.get_new_grasp = True else: self.grasp_count += 1 self.gripper.find_pick_region_labeler(data, c_img, d_img, self.grasp_count) self.add_data_point(c_img, d_img, data, self.side, 'grasp') self.get_new_grasp = False if cfg.SS_LEARN: grasp_points = self.ss.learn(self.whole_body, self.grasp_count) self.add_ss_data(grasp_points, data, self.side, 'success') def update_side(self): if self.side == "BOTTOM": self.side = "TOP" def transition_to_top(self): if cfg.DEBUG_MODE: self.com.save_rollout(self.rollout_data) self.tt.move_to_pose(self.omni_base, 'lower_mid') sys.exit() else: self.move_to_top_side() def transition_to_start(self): self.com.save_rollout(self.rollout_data) self.move_to_start() sys.exit() def add_data_point(self, c_img, d_img, data, side, typ, pose=None): grasp_point = {} grasp_point['c_img'] = c_img grasp_point['d_img'] = d_img if pose == None: label = data['objects'][0]['box'] pose = [(label[2] - label[0]) / 2.0 + label[0], (label[3] - label[1]) / 2.0 + label[1]] grasp_point['pose'] = pose grasp_point['class'] = data['objects'][0]['class'] grasp_point['side'] = side grasp_point['type'] = typ self.rollout_data.append(grasp_point) def position_head(self): if self.side == "TOP": self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8}) elif self.side == "BOTTOM": self.tt.move_to_pose(self.omni_base, 'lower_start') self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8}) def add_ss_data(self, g_points, data, side, typ): for g_point in g_points: self.add_data_point(g_point['c_img'], g_point['d_img'], data, side, typ, pose=g_point['pose']) def move_to_top_side(self): self.tt.move_to_pose(self.omni_base, 'right_down') #self.tt.move_to_pose(self.omni_base,'right_mid') self.tt.move_to_pose(self.omni_base, 'right_up') self.tt.move_to_pose(self.omni_base, 'top_mid') def move_to_start(self): self.tt.move_to_pose(self.omni_base, 'right_up') #self.tt.move_to_pose(self.omni_base,'right_mid') self.tt.move_to_pose(self.omni_base, 'right_down') self.tt.move_to_pose(self.omni_base, 'lower_mid') def check_bottom_success(self): self.tt.move_to_pose(self.omni_base, 'lower_mid') self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8}) def check_card_found(self): # try: transforms = self.tl.getFrameStrings() cards = [] try: for transform in transforms: print transform current_grasp = 'bed_' + str(self.grasp_count) if current_grasp in transform: print 'got here' f_p = self.tl.lookupTransform('map', transform, rospy.Time(0)) cards.append(transform) except: rospy.logerr('bed pick not found yet') return True, cards
class BedMaker(): def __init__(self, args): """For deploying the bed-making policy, not for data collection. We use all three variants (analytic, human, networks) here due to similarities in code structure. """ self.args = args DEBUG = True # TODO: switch to a logger so debug mode handled easily # Set up the robot. self.robot = robot = hsrb_interface.Robot() if DEBUG: print("finished: hsrb_interface.Robot()...") self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') if DEBUG: print("finished: robot.get(omni_base)...") self.whole_body = self.robot.get('whole_body') if DEBUG: print("finished: robot.get(whole_body)...") self.cam = RGBD() self.com = COM() self.wl = Python_Labeler(cam=self.cam) # Set up initial state, table, etc. Don't forget view mode! self.view_mode = BED_CFG.VIEW_MODE self.com.go_to_initial_state(self.whole_body) if DEBUG: print("finished: go_to_initial_state() ...") self.tt = TableTop() if DEBUG: print("finished: TableTop()...") # For now, a workaround. Ugly but it should do the job ... #self.tt.find_table(robot) self.tt.make_fake_ar() self.tt.find_table_workaround(robot) #self.ins = InitialSampler(self.cam) self.side = 'BOTTOM' self.grasp_count = 0 self.b_grasp_count = 0 self.t_grasp_count = 0 # AH, build the YOLO network beforehand. g_cfg = BED_CFG.GRASP_CONFIG s_cfg = BED_CFG.SUCC_CONFIG self.yc = YOLO_CONV(options=s_cfg) self.yc.load_network() # Policy for grasp detection, using Deep Imitation Learning. # Or, actually, sometimes we will use humans or an analytic version. if DEBUG: self._test_variables() print("\nnow forming the GDetector with type {}".format(args.g_type)) if args.g_type == 'network': self.g_detector = GDetector(g_cfg, BED_CFG, yc=self.yc) elif args.g_type == 'analytic': self.g_detector = Analytic_Grasp() # TODO not implemented! elif args.g_type == 'human': print("Using a human, don't need to have a `g_detector`. :-)") if DEBUG: self._test_variables() print("\nnow making success net") self.sn = Success_Net(self.whole_body, self.tt, self.cam, self.omni_base, fg_cfg=s_cfg, bed_cfg=BED_CFG, yc=self.yc) # Bells and whistles. self.br = TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options, robot.get('gripper')) self.dp = DrawPrediction() # When we start, do rospy.spin() to check the frames (phase 1). Then re-run. # The current hack we have to get around crummy AR marker detection. :-( if DEBUG: self._test_variables() print("Finished with init method") time.sleep(4) if args.phase == 1: print("Now doing rospy.spin() because phase = 1.") rospy.spin() # For evaluating coverage. self.img_start = None self.img_final = None self.img_start2 = None self.img_final2 = None # For grasp offsets. self.apply_offset = False def bed_make(self): """Runs the pipeline for deployment, testing out bed-making. """ # Get the starting image (from USB webcam). Try a second as well. cap = cv2.VideoCapture(0) frame = None while frame is None: ret, frame = cap.read() cv2.waitKey(50) self.image_start = frame cv2.imwrite('image_start.png', self.image_start) _, frame = cap.read() self.image_start2 = frame cv2.imwrite('image_start2.png', self.image_start2) cap.release() print( "NOTE! Recorded `image_start` for coverage evaluation. Was it set up?" ) def get_pose(data_all): # See `find_pick_region_labeler` in `p_pi/bed_making/gripper.py`. # It's because from the web labeler, we get a bunch of objects. # So we have to compute the pose (x,y) from it. res = data_all['objects'][0] x_min = float(res['box'][0]) y_min = float(res['box'][1]) x_max = float(res['box'][2]) y_max = float(res['box'][3]) x = (x_max - x_min) / 2.0 + x_min y = (y_max - y_min) / 2.0 + y_min return (x, y) args = self.args use_d = BED_CFG.GRASP_CONFIG.USE_DEPTH self.get_new_grasp = True self.new_grasp = True self.rollout_stats = [] # What we actually save for analysis later # Add to self.rollout_stats at the end for more timing info self.g_time_stats = [] # for _execution_ of a grasp self.move_time_stats = [] # for moving to the other side while True: c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() if (not c_img.all() == None and not d_img.all() == None): if self.new_grasp: self.position_head() else: self.new_grasp = True time.sleep(3) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() d_img_raw = np.copy(d_img) # Needed for determining grasp pose # -------------------------------------------------------------- # Process depth images! Helps network, human, and (presumably) analytic. # Obviously human can see the c_img as well ... hard to compare fairly. # -------------------------------------------------------------- if use_d: if np.isnan(np.sum(d_img)): cv2.patchNaNs(d_img, 0.0) d_img = depth_to_net_dim(d_img, robot='HSR') policy_input = np.copy(d_img) else: policy_input = np.copy(c_img) # -------------------------------------------------------------- # Run grasp detector to get data=(x,y) point for target, record stats. # Note that the web labeler returns a dictionary like this: # {'objects': [{'box': (155, 187, 165, 194), 'class': 0}], 'num_labels': 1} # but we really want just the 2D grasping point. So use `get_pose()`. # Also, for the analytic one, we'll pick the highest point ourselves. # -------------------------------------------------------------- sgraspt = time.time() if args.g_type == 'network': data = self.g_detector.predict(policy_input) elif args.g_type == 'analytic': data_all = self.wl.label_image(policy_input) data = get_pose(data_all) elif args.g_type == 'human': data_all = self.wl.label_image(policy_input) data = get_pose(data_all) egraspt = time.time() g_predict_t = egraspt - sgraspt print("Grasp predict time: {:.2f}".format(g_predict_t)) self.record_stats(c_img, d_img_raw, data, self.side, g_predict_t, 'grasp') # For safety, we can check image and abort as needed before execution. if use_d: img = self.dp.draw_prediction(d_img, data) else: img = self.dp.draw_prediction(c_img, data) caption = 'G Predicted: {} (ESC to abort, other key to proceed)'.format( data) call_wait_key(cv2.imshow(caption, img)) # -------------------------------------------------------------- # Broadcast grasp pose, execute the grasp, check for success. # We'll use the `find_pick_region_net` since the `data` is the # (x,y) pose, and not `find_pick_region_labeler`. # -------------------------------------------------------------- self.gripper.find_pick_region_net( pose=data, c_img=c_img, d_img=d_img_raw, count=self.grasp_count, side=self.side, apply_offset=self.apply_offset) pick_found, bed_pick = self.check_card_found() if self.side == "BOTTOM": self.whole_body.move_to_go() self.tt.move_to_pose(self.omni_base, 'lower_start') tic = time.time() self.gripper.execute_grasp(bed_pick, self.whole_body, 'head_down') toc = time.time() else: self.whole_body.move_to_go() self.tt.move_to_pose(self.omni_base, 'top_mid') tic = time.time() self.gripper.execute_grasp(bed_pick, self.whole_body, 'head_up') toc = time.time() self.g_time_stats.append(toc - tic) self.check_success_state(policy_input) def check_success_state(self, old_grasp_image): """ Checks whether a single grasp in a bed-making trajectory succeeded. Depends on which side of the bed the HSR is at. Invokes the learned success network policy and transitions the HSR if successful. When we record the data, c_img and d_img should be what success net saw. UPDATE: now we can pass in the previous `d_img` from the grasping to compare the difference. Well, technically the `policy_input` so it can handle either case. """ use_d = BED_CFG.SUCC_CONFIG.USE_DEPTH if self.side == "BOTTOM": result = self.sn.check_bottom_success(use_d, old_grasp_image) self.b_grasp_count += 1 else: result = self.sn.check_top_success(use_d, old_grasp_image) self.t_grasp_count += 1 self.grasp_count += 1 assert self.grasp_count == self.b_grasp_count + self.t_grasp_count success = result['success'] data = result['data'] c_img = result['c_img'] d_img = result['d_img'] d_img_raw = result['d_img_raw'] s_predict_t = result['s_predict_t'] img_diff = result['diff_l2'] img_ssim = result['diff_ssim'] self.record_stats(c_img, d_img_raw, data, self.side, s_predict_t, 'success') # We really need a better metric, such as 'structural similarity'. # Edit: well, it's probably marginally better, I think. # I use an L2 threshold of 98k, and an SSIM threshold of 0.88. if BED_CFG.GRASP_CONFIG.USE_DEPTH != BED_CFG.SUCC_CONFIG.USE_DEPTH: print("grasp vs success for using depth differ") print("for now we'll ignore the offset issue.") else: print("L2 and SSIM btwn grasp & next image: {:.1f} and {:.3f}". format(img_diff, img_ssim)) if img_ssim >= 0.875 or img_diff < 85000: print("APPLYING OFFSET! (self.apply_offset = True)") self.apply_offset = True else: print("no offset applied (self.apply_offset = False)") self.apply_offset = False # Have user confirm that this makes sense. caption = "Success net saw this and thought: {}. Press any key".format( success) if use_d: call_wait_key(cv2.imshow(caption, d_img)) else: call_wait_key(cv2.imshow(caption, c_img)) # Limit amount of grasp attempts per side, pretend 'success' anyway. lim = BED_CFG.GRASP_ATTEMPTS_PER_SIDE if (self.side == 'BOTTOM' and self.b_grasp_count >= lim) or \ (self.side == 'TOP' and self.t_grasp_count >= lim): print("We've hit {} for this side so set success=True".format(lim)) success = True # Handle transitioning to different side if success: if self.side == "BOTTOM": self.transition_to_top() self.side = 'TOP' else: self.transition_to_start() print( "We're moving to another side so revert self.apply_offset = False." ) self.apply_offset = False else: self.new_grasp = False def transition_to_top(self): """Transition to top (not bottom).""" transition_time = self.move_to_top_side() self.move_time_stats.append(transition_time) def transition_to_start(self): """Transition to start=bottom, SAVE ROLLOUT STATS, exit program. The `rollout_stats` is a list with a bunch of stats recorded via the class method `record_stats`. We save with a top-down webcam and save before moving back, since the HSR could disconnect. """ # Record the final image for evaluation later (from USB webcam). cap = cv2.VideoCapture(0) frame = None while frame is None: ret, frame = cap.read() self.image_final = frame cv2.imwrite('image_final.png', self.image_final) _, frame = cap.read() self.image_final2 = frame cv2.imwrite('image_final2.png', self.image_final2) cap.release() print("NOTE! Recorded `image_final` for coverage evaluation.") # Append some last-minute stuff to `self.rollout_stats` for saving. final_stuff = { 'image_start': self.image_start, 'image_final': self.image_final, 'image_start2': self.image_start2, 'image_final2': self.image_final2, 'grasp_times': self.g_time_stats, 'move_times': self.move_time_stats, 'args': self.args, # ADDING THIS! Now we can 'retrace' our steps. } self.rollout_stats.append(final_stuff) # SAVE, move to start, then exit. self.com.save_stat(self.rollout_stats, target_path=self.args.save_path) self.move_to_start() sys.exit() def record_stats(self, c_img, d_img, data, side, time, typ): """Adds a dictionary to the `rollout_stats` list. We can tell it's a 'net' thing due to 'net_pose' and 'net_succ' keys. EDIT: argh wish I hadn't done that since this script also handles the human and analytic cases. Oh well, too late for that now. """ assert side in ['BOTTOM', 'TOP'] grasp_point = {} grasp_point['c_img'] = c_img grasp_point['d_img'] = d_img if typ == "grasp": grasp_point['net_pose'] = data grasp_point['g_net_time'] = time elif typ == "success": grasp_point['net_succ'] = data grasp_point['s_net_time'] = time else: raise ValueError(typ) grasp_point['side'] = side grasp_point['type'] = typ self.rollout_stats.append(grasp_point) def position_head(self): """Position head for a grasp. Use lower_start_tmp so HSR looks 'sideways'; thus, hand is not in the way. """ self.whole_body.move_to_go() if self.side == "BOTTOM": self.tt.move_to_pose(self.omni_base, 'lower_start_tmp') self.whole_body.move_to_joint_positions( {'arm_flex_joint': -np.pi / 16.0}) self.whole_body.move_to_joint_positions( {'head_pan_joint': np.pi / 2.0}) self.whole_body.move_to_joint_positions({'arm_lift_joint': 0.120}) self.whole_body.move_to_joint_positions( {'head_tilt_joint': -np.pi / 4.0}) def move_to_top_side(self): """Assumes we're at the bottom and want to go to the top.""" self.whole_body.move_to_go() tic = time.time() self.tt.move_to_pose(self.omni_base, 'right_down_1') self.tt.move_to_pose(self.omni_base, 'right_mid_1') self.tt.move_to_pose(self.omni_base, 'right_up_1') self.tt.move_to_pose(self.omni_base, 'top_mid_tmp') toc = time.time() return toc - tic def move_to_start(self): """Assumes we're at the top and we go back to the start. Go to lower_start_tmp to be at the same view as we started with, so that we take a c_img and compare coverage. """ self.whole_body.move_to_go() tic = time.time() self.tt.move_to_pose(self.omni_base, 'right_up_2') self.tt.move_to_pose(self.omni_base, 'right_mid_2') self.tt.move_to_pose(self.omni_base, 'right_down_2') self.tt.move_to_pose(self.omni_base, 'lower_start_tmp') toc = time.time() return toc - tic def check_card_found(self): """Looks up the pose for where the HSR's hand should go to.""" transforms = self.tl.getFrameStrings() cards = [] try: for transform in transforms: current_grasp = 'bed_' + str(self.grasp_count) if current_grasp in transform: print('found {}'.format(current_grasp)) f_p = self.tl.lookupTransform('map', transform, rospy.Time(0)) cards.append(transform) except: rospy.logerr('bed pick not found yet') return True, cards def _test_grasp(self): """Simple tests for grasping. Don't forget to process depth images. Do this independently of any rollout ... """ print("\nNow in `test_grasp` to check grasping net...") self.position_head() time.sleep(3) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() if np.isnan(np.sum(d_img)): cv2.patchNaNs(d_img, 0.0) d_img = depth_to_net_dim(d_img, robot='HSR') pred = self.g_detector.predict(np.copy(d_img)) img = self.dp.draw_prediction(d_img, pred) print("prediction: {}".format(pred)) caption = 'G Predicted: {} (ESC to abort, other key to proceed)'.format( pred) cv2.imshow(caption, img) key = cv2.waitKey(0) if key in ESC_KEYS: print("Pressed ESC key. Terminating program...") sys.exit() def _test_success(self): """Simple tests for success net. Don't forget to process depth images. Should be done after a grasp test since I don't re-position... Note: we have access to `self.sn` but that isn't the actual net which has a `predict`, but it's a wrapper (explained above), but we can access the true network via `self.sn.sdect` and from there call `predict`. """ print("\nNow in `test_success` to check success net...") time.sleep(3) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() if np.isnan(np.sum(d_img)): cv2.patchNaNs(d_img, 0.0) d_img = depth_to_net_dim(d_img, robot='HSR') result = self.sn.sdect.predict(np.copy(d_img)) result = np.squeeze(result) print("s-net pred: {} (if [0]<[1] failure, else success...)".format( result)) caption = 'S Predicted: {} (ESC to abort, other key to proceed)'.format( result) cv2.imshow(caption, d_img) key = cv2.waitKey(0) if key in ESC_KEYS: print("Pressed ESC key. Terminating program...") sys.exit() def _test_variables(self): """Test to see if TF variables were loaded correctly. """ vars = tf.trainable_variables() print("\ntf.trainable_variables:") for vv in vars: print(" {}".format(vv)) print("done\n")
class FullWebDemo(): def __init__(self): """ Class to run HSR lego task """ self.robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.side = 'BOTTOM' self.cam = RGBD() self.com = COM() self.com.go_to_initial_state(self.whole_body) self.grasp_count = 0 self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('gripper')) self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('suction')) self.gm = GraspManipulator(self.gp, self.gripper, self.suction, self.whole_body, self.omni_base, self.tl) self.web = Web_Labeler() print "after thread" def run_grasp(self, bbox, c_img, col_img, workspace_img, d_img): print("grasping a " + cfg.labels[bbox.label]) #bbox has format [xmin, ymin, xmax, ymax] fg, obj_w = bbox.to_mask(c_img, col_img) # cv2.imwrite("debug_imgs/test.png", obj_w.data) # cv2.imwrite("debug_imgs/test2.png", fg.data) groups = get_cluster_info(fg) curr_tol = cfg.COLOR_TOL while len(groups) == 0 and curr_tol > 10: curr_tol -= 5 #retry with lower tolerance- probably white object fg, obj_w = bbox.to_mask(c_img, col_img, tol=curr_tol) groups = get_cluster_info(fg) if len(groups) == 0: print("No object within bounding box") return False display_grasps(workspace_img, groups) group = groups[0] pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img) grasp_pose = self.gripper.get_grasp_pose(pose[0], pose[1], pose[2], rot, c_img=workspace_img.data) self.gm.execute_grasp(grasp_pose, class_num=bbox.label) def run_singulate(self, col_img, main_mask, to_singulate, d_img): print("singulating") singulator = Singulation(col_img, main_mask, [g.mask for g in to_singulate]) waypoints, rot, free_pix = singulator.get_singulation() singulator.display_singulation() self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True) def full_web_demo(self): self.gm.position_head() time.sleep(3) #making sure the robot is finished moving c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() while not (c_img is None or d_img is None): path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png" cv2.imwrite(path, c_img) time.sleep(2) #make sure new image is written before being read # print "\n new iteration" main_mask = crop_img(c_img, simple=True) col_img = ColorImage(c_img) workspace_img = col_img.mask_binary(main_mask) labels = self.web.label_image(path) objs = labels['objects'] bboxes = [Bbox(obj['box'], obj['class']) for obj in objs] single_objs = find_isolated_objects(bboxes) if len(single_objs) > 0: to_grasp = select_first_obj(single_objs) self.run_grasp(to_grasp, c_img, col_img, workspace_img, d_img) else: #for accurate singulation should have bboxes for all fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes] groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs] groups = merge_groups(groups, cfg.DIST_TOL) self.run_singulate(col_img, main_mask, groups, d_img) #reset to start self.whole_body.move_to_go() self.gm.move_to_home() self.gm.position_head() time.sleep(3) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data()
def __init__(self, args): """For deploying the bed-making policy, not for data collection. We use all three variants (analytic, human, networks) here due to similarities in code structure. """ self.args = args DEBUG = True # TODO: switch to a logger so debug mode handled easily # Set up the robot. self.robot = robot = hsrb_interface.Robot() if DEBUG: print("finished: hsrb_interface.Robot()...") self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') if DEBUG: print("finished: robot.get(omni_base)...") self.whole_body = self.robot.get('whole_body') if DEBUG: print("finished: robot.get(whole_body)...") self.cam = RGBD() self.com = COM() self.wl = Python_Labeler(cam=self.cam) # Set up initial state, table, etc. Don't forget view mode! self.view_mode = BED_CFG.VIEW_MODE self.com.go_to_initial_state(self.whole_body) if DEBUG: print("finished: go_to_initial_state() ...") self.tt = TableTop() if DEBUG: print("finished: TableTop()...") # For now, a workaround. Ugly but it should do the job ... #self.tt.find_table(robot) self.tt.make_fake_ar() self.tt.find_table_workaround(robot) #self.ins = InitialSampler(self.cam) self.side = 'BOTTOM' self.grasp_count = 0 self.b_grasp_count = 0 self.t_grasp_count = 0 # AH, build the YOLO network beforehand. g_cfg = BED_CFG.GRASP_CONFIG s_cfg = BED_CFG.SUCC_CONFIG self.yc = YOLO_CONV(options=s_cfg) self.yc.load_network() # Policy for grasp detection, using Deep Imitation Learning. # Or, actually, sometimes we will use humans or an analytic version. if DEBUG: self._test_variables() print("\nnow forming the GDetector with type {}".format(args.g_type)) if args.g_type == 'network': self.g_detector = GDetector(g_cfg, BED_CFG, yc=self.yc) elif args.g_type == 'analytic': self.g_detector = Analytic_Grasp() # TODO not implemented! elif args.g_type == 'human': print("Using a human, don't need to have a `g_detector`. :-)") if DEBUG: self._test_variables() print("\nnow making success net") self.sn = Success_Net(self.whole_body, self.tt, self.cam, self.omni_base, fg_cfg=s_cfg, bed_cfg=BED_CFG, yc=self.yc) # Bells and whistles. self.br = TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options, robot.get('gripper')) self.dp = DrawPrediction() # When we start, do rospy.spin() to check the frames (phase 1). Then re-run. # The current hack we have to get around crummy AR marker detection. :-( if DEBUG: self._test_variables() print("Finished with init method") time.sleep(4) if args.phase == 1: print("Now doing rospy.spin() because phase = 1.") rospy.spin() # For evaluating coverage. self.img_start = None self.img_final = None self.img_start2 = None self.img_final2 = None # For grasp offsets. self.apply_offset = False
class BedMaker(): def __init__(self): ''' Initialization class for a Policy Parameters ---------- yumi : An instianted yumi robot com : The common class for the robot cam : An open bincam class debug : bool A bool to indicate whether or not to display a training set point for debuging. ''' self.robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.side = 'BOTTOM' self.cam = RGBD() self.com = COM() if not DEBUG: self.com.go_to_initial_state(self.whole_body) self.tt = TableTop() self.tt.find_table(self.robot) self.wl = Python_Labeler(cam=self.cam) self.grasp_count = 0 self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.ds = data_saver('tpc_rollouts/rollouts') self.gp = GraspPlanner() self.gripper = Crane_Gripper(self.gp, cam, options, robot.get('gripper')) self.gm = GraspManipulator(self.gp, self.gripper, self.whole_body, self.omni_base, self.tt) print "after thread" def find_mean_depth(self, d_img): ''' Evaluates the current policy and then executes the motion specified in the the common class ''' indx = np.nonzero(d_img) mean = np.mean(d_img[indx]) return def lego_demo(self): self.rollout_data = [] self.get_new_grasp = True if not DEBUG: self.gm.position_head() while True: time.sleep(1) #making sure the robot is finished moving c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() cv2.imwrite("debug_imgs/c_img.png", c_img) print "\n new iteration" if (not c_img == None and not d_img == None): #label image data = self.wl.label_image(c_img) c_img = self.cam.read_color_data() grasp_boxes = [] suction_boxes = [] singulate_boxes = [] for i in range(data['num_labels']): bbox = data['objects'][i]['box'] classnum = data['objects'][i]['class'] classname = ['grasp', 'singulate', 'suction', 'quit'][classnum] if classname == "grasp": grasp_boxes.append(bbox) elif classname == "suction": suction_boxes.append(bbox) elif classname == "singulate": singulate_boxes.append(bbox) elif classname == "quit": self.ds.save_rollout() self.gm.move_to_home() self.ds.append_data_to_rollout(c_img, data) main_mask = crop_img(c_img) col_img = ColorImage(c_img) workspace_img = col_img.mask_binary(main_mask) grasps = [] viz_info = [] for i in range(len(grasp_boxes)): bbox = grasp_boxes[i] center_mass, direction = bbox_to_grasp(bbox, c_img, d_img) viz_info.append([center_mass, direction]) pose, rot = self.gm.compute_grasp(center_mass, direction, d_img) grasps.append( self.gripper.get_grasp_pose(pose[0], pose[1], pose[2], rot, c_img=workspace_img.data)) suctions = [] for i in range(len(suction_boxes)): suctions.append("compute_suction?") if len(grasps) > 0 or len(suctions) > 0: cv2.imwrite( "grasps.png", visualize(workspace_img, [v[0] for v in viz_info], [v[1] for v in viz_info])) print "running grasps" for grasp in grasps: print "grasping", grasp self.gm.execute_grasp(grasp) print "running suctions" for suction in suctions: print "suctioning", suction #execute suction elif len(singulate_boxes) > 0: print("singulating") bbox = singulate_boxes[0] obj_mask = bbox_to_mask(bbox, c_img) start, end = find_singulation(col_img, main_mask, obj_mask) display_singulation(start, end, workspace_img) self.gm.singulate(start, end, c_img, d_img) else: print("cleared workspace") self.whole_body.move_to_go() self.gm.position_head()
class CollectData(): def __init__(self): """ Class to run HSR lego task """ self.robot = hsrb_interface.Robot() self.rgbd_map = RGBD2Map() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.side = 'BOTTOM' self.cam = RGBD() self.com = COM() # if not DEBUG: self.com.go_to_initial_state(self.whole_body) # self.tt = TableTop() # self.tt.find_table(self.robot) self.grasp_count = 0 self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('gripper')) self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('suction')) self.gm = GraspManipulator(self.gp, self.gripper, self.suction, self.whole_body, self.omni_base) self.collision_world = hsrb_interface.collision_world.CollisionWorld( "global_collision_world") self.collision_world.remove_all() self.collision_world.add_box(x=.8, y=.9, z=0.5, pose=geometry.pose(y=1.4, z=0.15), frame_id='map') print("finished initializing collect data class") def collect_data(self): """ Run this a few times to check that the rgb images are reasonable. If not, rearrange the setup and try again. Delete any images saved after that, the run this "for real". """ self.gm.position_head() IMDIR_RGB = 'image_rgb/' IMDIR_DEPTH = 'image_depth/' time.sleep(5) #making sure the robot is finished moving print("after calling gm.position_head() w/several second delay") print("\nwhole body joint positions:\n{}".format( self.whole_body.joint_positions)) while True: num = len([x for x in os.listdir(IMDIR_RGB) if 'png' in x]) c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() cv2.imshow('rgb/image_raw', c_img) cv2.imshow('depth/image_raw', d_img) fname1 = IMDIR_RGB + 'rgb_raw_{}.png'.format(str(num).zfill(4)) fname2 = IMDIR_DEPTH + 'depth_raw_{}.png'.format(str(num).zfill(4)) cv2.imwrite(fname1, c_img) cv2.imwrite(fname2, d_img) print("just saved {} and {}. NOW REARRANGE SETUP!!".format( fname1, fname2)) IPython.embed() #re-arrange setup here
class CardPicker(): def __init__(self): ''' Initialization class for a Policy Parameters ---------- yumi : An instianted yumi robot com : The common class for the robot cam : An open bincam class debug : bool A bool to indicate whether or not to display a training set point for debuging. ''' self.robot = hsrb_interface.Robot() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.cam = RGBD() self.com = COM() self.com.go_to_initial_state(self.whole_body) self.br = tf.TransformBroadcaster() self.tl = TransformListener() self.gp = GraspPlanner() self.suction = Suction(self.gp, self.cam) #thread.start_new_thread(self.ql.run,()) print "after thread" def find_mean_depth(self, d_img): ''' Evaluates the current policy and then executes the motion specified in the the common class ''' indx = np.nonzero(d_img) mean = np.mean(d_img[indx]) return def card_pick(self): while True: c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() if (not c_img == None and not d_img == None): self.ql = QueryLabeler() self.ql.run(c_img) data = self.ql.label_data del self.ql self.suction.find_pick_region(data, c_img, d_img) # card_found,cards = self.check_card_found() # if(card_found): # self.suction.execute_grasp(cards,self.whole_body) # self.com.go_to_initial_state(self.whole_body) def check_card_found(self): # try: transforms = self.tl.getFrameStrings() cards = [] for transform in transforms: print transform if 'card' in transform: print 'got here' f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame', transform, rospy.Time(0)) cards.append(transform) return True, cards
class LabelDemo(): def __init__(self): self.robot = hsrb_interface.Robot() self.br = tf.TransformBroadcaster() self.rgbd_map = RGBD2Map(self.br) # IPython.embed() self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.side = 'BOTTOM' self.cam = RGBD() self.com = COM() self.com.go_to_initial_state(self.whole_body) self.grasp_count = 0 self.tl = TransformListener() self.gp = GraspPlanner() self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('gripper')) self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('suction')) self.gm = GraspManipulator(self.gp, self.gripper, self.suction, self.whole_body, self.omni_base, self.tl) self.web = Web_Labeler() thread.start_new_thread(self.broadcast_temp_bin,()) time.sleep(3) print "after thread" def broadcast_temp_bin(self): while True: self.br.sendTransform((1, 1, 0.6), tf.transformations.quaternion_from_euler(ai=0.0,aj=1.57,ak=0.0), rospy.Time.now(), 'temp_bin', # 'head_rgbd_sensor_link') 'map') rospy.sleep(1) def bbox_to_fg(self, bbox, c_img, col_img): obj_mask = crop_img(c_img, bycoords = [bbox[1], bbox[3], bbox[0], bbox[2]]) obj_workspace_img = col_img.mask_binary(obj_mask) fg = obj_workspace_img.foreground_mask(cfg.COLOR_TOL, ignore_black=True) return fg, obj_workspace_img def test_bbox_overlap(self, box_a, box_b): #bbox has format [xmin, ymin, xmax, ymax] if box_a[0] > box_b[2] or box_a[2] < box_b[0]: return False if box_a[1] > box_b[3] or box_a[3] < box_b[1]: return False return True def find_isolated_objects(self, bboxes, c_img): valid_bboxes = [] for curr_ind in range(len(bboxes)): curr_bbox = bboxes[curr_ind] overlap = False for test_ind in range(curr_ind + 1, len(bboxes)): test_bbox = bboxes[test_ind] if self.test_bbox_overlap(curr_bbox, test_bbox): overlap = True break if not overlap: valid_bboxes.append(curr_bbox) return valid_bboxes def label_demo(self): """ Main method which executes the stuff we're interested in. Should apply to both the physical and simulated HSRs. Call as `python main/test_labeling.py`. """ self.gm.position_head() time.sleep(3) #making sure the robot is finished moving c_img = self.cam.read_color_data() d_img = self.cam.read_depth_data() path = "/home/ron/siemens_sim/siemens_challenge/debug_imgs/web.png" cv2.imwrite(path, c_img) time.sleep(2) #make sure new image is written before being read # print "\n new iteration" main_mask = crop_img(c_img, simple=True) col_img = ColorImage(c_img) workspace_img = col_img.mask_binary(main_mask) labels = self.web.label_image(path) obj = labels['objects'][0] bbox = obj['box'] class_label = obj['class'] #bbox has format [xmin, ymin, xmax, ymax] fg, obj_w = self.bbox_to_fg(bbox, c_img, col_img) cv2.imwrite("debug_imgs/test.png", obj_w.data) groups = get_cluster_info(fg) display_grasps(workspace_img, groups) group = groups[0] print(d_img) pose,rot = self.gm.compute_grasp(group.cm, group.dir, d_img) pose = find_pose(pose) if pose == None: print("unable to find corresponding item.") sys.exit() a = tf.transformations.quaternion_from_euler(ai=-2.355,aj=-3.14,ak=0.0) b = tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=1.57) base_rot = tf.transformations.quaternion_multiply(a,b) print("now about to get grasp pose, w/pose: {}, rot: {}".format(pose, rot)) thread.start_new_thread(self.gripper.loop_broadcast,(pose,base_rot,rot)) time.sleep(5) print("now calling execute_grasp w/grasp_pose: {}".format(grasp_pose)) # IPython.embed() self.gm.execute_grasp("grasp_0") self.whole_body.move_end_effector_pose(geometry.pose(),"temp_bin") self.gripper.open_gripper() #reset to start self.whole_body.move_to_go() # self.gm.move_to_home() self.gm.position_head() time.sleep(3)
def clean_up(self): self.sess.close() tfl.reset_default_graph() def broadcast_poses(self, poses): count = 0 for pose in poses: print "POSE ", pose #IPython.embed() self.br.sendTransform((pose[0], pose[1] - 0.05, pose[2] + 0.06), (0.0, 0.0, 0.0, 1.0), rospy.Time.now(), 'bottle' + str(count), 'head_rgbd_sensor_link') count += 1 if __name__ == '__main__': rospy.init_node('readJoy_node', anonymous=True) print 'bottle' rgbd = RGBD() time.sleep(5) do = Bottle_Detect() while True: print "IMGAE" print rgbd.read_color_data() if (not rgbd.read_color_data() == None): do.detect_bottle(rgbd.read_color_data())
button = d_pad[1] self.make_projection(down_corner,up_corner) return down_corner, up_corner if __name__=='__main__': robot = hsrb_interface.Robot() whole_body = robot.get('whole_body') omni_base = robot.get('omni_base') com = COM() rgbd_map = RGBD2Map() cam = RGBD() com.go_to_initial_state(whole_body) tt = TableTop() tt.find_table(robot) # tt.move_to_pose(omni_base,'lower_start') # whole_body.move_to_joint_positions({'head_tilt_joint':-0.8}) time.sleep(5) IS = InitialSampler(cam) while True: IS.sample_initial_state()
class Collect_Demos(): def __init__(self, user_name=None, inject_noise=False, noise_scale=1.0): self.robot = hsrb_interface.Robot() self.noise = 0.1 self.omni_base = self.robot.get('omni_base') self.whole_body = self.robot.get('whole_body') self.gripper = self.robot.get('gripper') self.tl = TransformListener() self.cam = RGBD() time.sleep(5) self.b_d = Bottle_Detect(self.cam.read_info_data()) self.start_recording = False self.stop_recording = False self.com = COM() if (not user_name == None): self.com.Options.setup(self.com.Options.root_dir, user_name) #self.com.go_to_initial_state(self.whole_body,self.gripper) #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3}) self.joystick = JoyStick_X(self.com, inject_noise=inject_noise, noise_scale=noise_scale) self.torque = Gripper_Torque() self.joints = Joint_Positions() def proess_state(self): img_rgb = self.cam.read_color_data() img_depth = self.cam.read_depth_data() cur_action, noise_action, time_pub = self.joystick.apply_control() state = self.com.format_data(img_rgb, img_depth) #cv2.imwrite('frame_'+str(self.count)+'.png',state[0]) #Save all data data = {} data['action'] = cur_action data['color_img'] = state[0] data['depth_img'] = state[1] data['noise_action'] = noise_action pose = self.whole_body.get_end_effector_pose().pos pose = np.array([pose.x, pose.y, pose.z]) data['robot_pose'] = pose # data['action_time'] = time_pub # data['image_time'] = self.cam.color_time_stamped print "ACTION AT COUNT ", self.count print cur_action self.count += 1 self.trajectory.append(data) # if(LA.norm(cur_action) > 1e-3): # print "GOT ACCEPTED" # self.trajectory.append(data) def check_success(self): img_rgb = self.cam.read_color_data() img_depth = self.cam.read_depth_data() success = self.b_d.detect_bottle(img_rgb, img_depth) print "BOTTLE FOUND ", success return success def run(self): self.joystick.apply_control() cur_recording = self.joystick.get_record_actions_passive() if (cur_recording[0] < -0.1): print "BEGIN DATA COLLECTION" self.start_recording = True count = 0 if (self.start_recording): self.count = 0 self.trajectory = [] while not self.stop_recording: #while count < 20: if (self.cam.is_updated): self.proess_state() self.cam.is_updated = False cur_recording = self.joystick.get_record_actions() if (cur_recording[1] < -0.1): print "END DATA COLLECTION" self.stop_recording = True count += 1 self.check_success() q = input('SAVE DATA: y or n: ') if (q == 'y'): self.com.save_recording(self.trajectory) self.start_recording = False self.stop_recording = False