def main(): rob_arm = SingleRoboticArm() data_root = '/home/luben/data/pdc/logs_proto' date = '2021-05-01' anno_data = 'insertion_' + date + '/processed' im_data = 'insertion_' + date + '/processed/images' anno_data_path = os.path.join(data_root, anno_data) im_data_path = os.path.join(data_root, im_data) # create folder cwd = os.getcwd() os.chdir(data_root) if not os.path.exists(im_data): os.makedirs(im_data) os.chdir(cwd) info_dic = {} cnt = 0 iter = 5 cam_name = 'vision_eye' peg_top = 'peg_keypoint_top' peg_bottom = 'peg_keypoint_bottom' hole_top = 'hole_keypoint_top' hole_bottom = 'hole_keypoint_bottom' pick_record = True tilt_record = True move_record = True origin_hole_pose = rob_arm.get_object_matrix('hole') origin_hole_pos = origin_hole_pose[:3, 3] origin_hole_quat = rob_arm.get_object_quat('hole') origin_peg_pose = rob_arm.get_object_matrix('peg') origin_peg_pos = origin_peg_pose[:3, 3] origin_peg_quat = rob_arm.get_object_quat('peg') #origin_ur5_pose = rob_arm.get_object_matrix('UR5_ikTarget') #origin_ur5_pos = origin_ur5_pose[:3, 3] #origin_ur5_quat = rob_arm.get_object_quat('UR5_ikTarget') gripper_init_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') dir = np.array([0.0,0.0,-1.0]) for i in range(iter): # init pos of peg nad hole; now ths pos is not random ran_hole_pos, ran_peg_pos = obj_init_pos(origin_hole_pos.copy(),origin_peg_pos.copy()) rob_arm.set_object_position('hole', ran_hole_pos) rob_arm.set_object_position('peg', ran_peg_pos) rob_arm.set_object_quat('hole', origin_hole_quat) rob_arm.set_object_quat('peg', origin_peg_quat) rob_arm.movement(gripper_init_pose) rob_arm.open_gripper(mode=1) # gt grasp peg_keypoint_pose = rob_arm.get_object_matrix(obj_name='peg_keypoint_bottom') grasp_pose = peg_keypoint_pose.copy() grasp_pose[:3, 3] -= dir * 0.08 rob_arm.gt_run_grasp(grasp_pose) # pull up the peg to avoid collision (hole's height = 0.07) grasp_pose[:3, 3] -= dir * 0.075 rob_arm.movement(grasp_pose) # randomly tilt the peg from -20 to 20 degrees peg_tilt_degree = random.uniform(-20, 20) peg_quat = rob_arm.get_object_quat('peg') # this line must be modified for angled hole peg_rot_quat = [math.sin(math.radians(peg_tilt_degree / 2)), 0, 0, math.cos(math.radians(peg_tilt_degree / 2))] rob_arm.set_object_quat('peg', peg_rot_quat) peg_pose = rob_arm.get_object_matrix('peg') peg_pos = peg_pose[:3, 3] # offset due to rotation # this line must be modified for angled hole offset_peg_y = 0.035 * math.sin(math.radians(peg_tilt_degree / 2)) peg_pos[1] += offset_peg_y rob_arm.set_object_position('peg', peg_pos) if tilt_record: # tilt the gripper peg_keypoint_top_pose = rob_arm.get_object_matrix(peg_top) peg_keypoint_bottom_pose = rob_arm.get_object_matrix(peg_bottom) hole_keypoint_top_pose = rob_arm.get_object_matrix(hole_top) hole_keypoint_bottom_pose = rob_arm.get_object_matrix(hole_bottom) hole_dir = hole_keypoint_bottom_pose[:3, 3] - hole_keypoint_top_pose[:3, 3] peg_dir = peg_keypoint_bottom_pose[:3, 3] - peg_keypoint_top_pose[:3, 3] dot_product = np.dot(peg_dir, hole_dir) degree = math.degrees(math.acos(dot_product / (np.linalg.norm(peg_dir) * np.linalg.norm(hole_dir)))) print('Degree:', degree) cross_product = np.cross(peg_dir, hole_dir) cross_product = cross_product / np.linalg.norm(cross_product) w = math.cos(math.radians(degree / 2)) x = math.sin(math.radians(degree / 2)) * cross_product[0] y = math.sin(math.radians(degree / 2)) * cross_product[1] z = math.sin(math.radians(degree / 2)) * cross_product[2] gripper_rot_quat = [w, x, y, z] rot_pose = quaternion_matrix(gripper_rot_quat) gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') rot_matrix = np.dot(rot_pose[:3, :3], gripper_pose[:3, :3]) gripper_pose[:3, :3] = rot_matrix rob_arm.movement(gripper_pose) # peg hole alignment peg_keypoint_bottom_pose = rob_arm.get_object_matrix(obj_name='peg_keypoint_bottom') hole_keypoint_top_pose = rob_arm.get_object_matrix(obj_name='hole_keypoint_top') err_xy = hole_keypoint_top_pose[:2, 3] - peg_keypoint_bottom_pose[:2, 3] gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') # peg falling if err_xy[0] > 0.15 or err_xy[1] > 0.15: continue gripper_pose[:2, 3] += err_xy rob_arm.movement(gripper_pose) # gripper rotation record step_size = 0.1 print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' rotate: ' + str(0)) delta_rotation_quat = np.array([1.0, 0.0, 0.0, 0.0]) delta_translation = np.array([0.0, 0.0, 0.0]) info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 split_degree = degree * step_size for j in range(int(1 / step_size)): # reverse rotation w = math.cos(math.radians(split_degree / 2)) x = math.sin(math.radians(split_degree / 2)) * cross_product[0] * -1 y = math.sin(math.radians(split_degree / 2)) * cross_product[1] * -1 z = math.sin(math.radians(split_degree / 2)) * cross_product[2] * -1 gripper_rot_quat = [w, x, y, z] rot_pose = quaternion_matrix(gripper_rot_quat) gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') rot_matrix = np.dot(rot_pose[:3, :3], gripper_pose[:3, :3]) gripper_pose[:3, :3] = rot_matrix rob_arm.movement(gripper_pose) print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' rotate: ' + str(j+1)) delta_rotation_quat = np.array([w, -x, -y, -z]) delta_translation = np.array([0.0, 0.0, 0.0]) info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 # pick up the peg gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') random_z = random.uniform(0.1, 0.3) pick_vec = np.array([0,0, random_z]) if pick_record: step_size = 0.1 for j in range(int(1 / step_size)): gripper_pose[:3, 3] += pick_vec * step_size rob_arm.movement(gripper_pose) print('iter: '+str(i)+' cnt: '+str(cnt)+' pick: '+str(j)) delta_rotation_quat = np.array([1.0, 0.0, 0.0, 0.0]) delta_translation = - pick_vec * step_size info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 else: gripper_pose[:3, 3] += pick_vec rob_arm.movement(gripper_pose) target_x, target_y = random_gripper_xy(ran_hole_pos) gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') if move_record: now_x = gripper_pose[0, 3] now_y = gripper_pose[1, 3] move_vec = np.array([target_x - now_x, target_y - now_y, 0]) step_size = 0.1 for k in range(int(1/step_size)): if k==0: for l in range(10): gripper_pose[:3, 3] += move_vec * 0.01 rob_arm.movement(gripper_pose) print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' move: ' + str(l)) delta_rotation_quat = np.array([1.0, 0.0, 0.0, 0.0]) delta_translation = - move_vec * 0.01 info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 if l == 9: move_vec *= 0.9 else: gripper_pose[:3,3] += move_vec * step_size rob_arm.movement(gripper_pose) print('iter: '+str(i)+' cnt: ' + str(cnt) + ' move: ' + str(k+9)) delta_rotation_quat = np.array([1.0, 0.0, 0.0, 0.0]) delta_translation = - move_vec * step_size info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 else: gripper_pose[0, 3] = target_x gripper_pose[1, 3] = target_y rob_arm.movement(gripper_pose) ''' # insert gripper_pose[:3, 3] += dir * 0.23 rob_arm.insertion(gripper_pose) # after insertion gripper_pose[:3, 3] -= dir * 0.23 rob_arm.movement(gripper_pose) # go back to initial pose rob_arm.movement(gripper_init_pose) ''' #rob_arm.set_object_position('hole', origin_hole_pos) #rob_arm.set_object_quat('hole', origin_hole_quat) #rob_arm.set_object_position('peg', origin_peg_pos) #rob_arm.set_object_quat('peg', origin_peg_quat) #rob_arm.set_object_quat('UR5_ikTarget', origin_ur5_quat) #rob_arm.set_object_position('UR5_ikTarget', origin_ur5_pos) rob_arm.finish() f = open(os.path.join(anno_data_path, 'peg_in_hole.yaml'),'w') yaml.dump(info_dic,f , Dumper=yaml.RoundTripDumper) f.close()
def main(): rob_arm = SingleRoboticArm() init_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') netpath = '/home/luben/robot-peg-in-hole-task/mankey/experiment/box_ckpnt_grayscale_fix/checkpoint-100.pth' kp_detection = KeypointDetection(netpath) choose_hole = 0 bbox = np.array([0, 0, 255, 255]) # random remove peg peg_list = list(range(6)) num_remove = random.randint(2, 3) print('number of remove peg:{:d}'.format(num_remove)) remove_list = random.sample(peg_list, num_remove) for i, v in enumerate(remove_list): print('remove peg_idx:{:d}'.format(v)) rob_arm.set_object_position('peg_large' + str(v), [2, 0.2 * i, 0.1]) peg_list.pop(peg_list.index(v)) hole_check_pose = rob_arm.get_object_matrix(obj_name='hole_check') rob_arm.movement(hole_check_pose) # detect empty hole #cam_name = 'vision_eye' cam_name = 'vision_fix' rgb = rob_arm.get_rgb(cam_name=cam_name) depth = rob_arm.get_depth(cam_name=cam_name, near_plane=0.01, far_plane=1.5) # rotate 180 (h, w) = rgb.shape[:2] center = (w / 2, h / 2) M = cv2.getRotationMatrix2D(center, 180, 1.0) rgb = cv2.warpAffine(rgb, M, (w, h)) # rotate 180 (h, w) = depth.shape[:2] center = (w / 2, h / 2) M = cv2.getRotationMatrix2D(center, 180, 1.0) depth = cv2.warpAffine(depth, M, (w, h)) depth_mm = (depth * 1000).astype( np.uint16 ) # type: np.uint16 ; uint16 is needed by keypoint detection network camera_keypoint, keypointxy_depth_realunit = kp_detection.inference( cv_rgb=rgb, cv_depth=depth_mm, bbox=bbox) depth_hole_list = [] for i in range(6): x = int(keypointxy_depth_realunit[2 * (i + 1)][0]) y = int(keypointxy_depth_realunit[2 * (i + 1)][1]) #print(x,y,depth_mm[y][x]) depth_hole_list.append(depth_mm[y][x]) choose_hole = depth_hole_list.index(max(depth_hole_list)) print('Get ready to insert hole {:d} !'.format(choose_hole)) ## gt grasp peg_keypoint_top_pose = rob_arm.get_object_matrix( obj_name='peg_keypoint_top_large') grasp_pose = peg_keypoint_top_pose.copy() grasp_pose[0, 3] += 0.0095 grasp_pose[2, 3] -= 0.015 rob_arm.gt_run_grasp(grasp_pose) grasp_pose[2, 3] += 0.24 rob_arm.movement(grasp_pose) print('servoing...') err_tolerance = 0.08 alpha_err = 0.7 alpha_target = 0.7 filter_robot_pose = rob_arm.get_object_matrix('UR5_ikTip') err_rolling, err_size_rolling = None, err_tolerance * 10 cnt = 0 while err_size_rolling > err_tolerance: print('========cnt:' + str(cnt) + '=========\n') cnt += 1 robot_pose = rob_arm.get_object_matrix('UR5_ikTip') rgb = rob_arm.get_rgb(cam_name=cam_name) depth = rob_arm.get_depth(cam_name=cam_name, near_plane=0.01, far_plane=1.5) # rotate 180 (h, w) = rgb.shape[:2] center = (w / 2, h / 2) M = cv2.getRotationMatrix2D(center, 180, 1.0) rgb = cv2.warpAffine(rgb, M, (w, h)) # rotate 180 (h, w) = depth.shape[:2] center = (w / 2, h / 2) M = cv2.getRotationMatrix2D(center, 180, 1.0) depth = cv2.warpAffine(depth, M, (w, h)) depth_mm = (depth * 1000).astype( np.uint16 ) # type: np.uint16 ; uint16 is needed by keypoint detection network camera_keypoint, keypointxy_depth_realunit = kp_detection.inference( cv_rgb=rgb, cv_depth=depth_mm, bbox=bbox) cam_pos, cam_quat, cam_matrix = rob_arm.get_camera_pos( cam_name=cam_name) cam_matrix = np.array(cam_matrix).reshape(3, 4) camera2world_matrix = cam_matrix world_keypoints = np.zeros((14, 3), dtype=np.float64) for idx, keypoint in enumerate(camera_keypoint): keypoint = np.append(keypoint, 1) world_keypoints[idx, :] = np.dot(camera2world_matrix, keypoint)[0:3] print('world keypoints', world_keypoints) peg_keypoint_top = world_keypoints[0] peg_keypoint_bottom = world_keypoints[1] hole_keypoint_top = world_keypoints[2 * (choose_hole + 1)] hole_keypoint_bottom = world_keypoints[2 * (choose_hole + 1) + 1] err = hole_keypoint_top - peg_keypoint_bottom # err*= 0.1 print('err vector', err) dis = math.sqrt(math.pow(err[0], 2) + math.pow(err[1], 2)) print('Distance:', dis) if cnt >= 0: kp_detection.visualize(cv_rgb=rgb, cv_depth=depth_mm, keypoints=camera_keypoint) tilt = False if dis < 0.01 and tilt == True: hole_dir = hole_keypoint_bottom - hole_keypoint_top peg_dir = peg_keypoint_bottom - peg_keypoint_top dot_product = np.dot(peg_dir, hole_dir) degree = math.degrees( math.acos( dot_product / (np.linalg.norm(peg_dir) * np.linalg.norm(hole_dir)))) print('Degree:', degree) if degree > 2: cross_product = np.cross(peg_dir, hole_dir) cross_product = cross_product / np.linalg.norm(cross_product) #degree -= 0.5 w = math.cos(math.radians(degree / 2)) x = math.sin(math.radians(degree / 2)) * cross_product[0] y = math.sin(math.radians(degree / 2)) * cross_product[1] z = math.sin(math.radians(degree / 2)) * cross_product[2] quat = [w, x, y, z] rot_pose = quaternion_matrix(quat) rot_matrix = np.dot(rot_pose[:3, :3], robot_pose[:3, :3]) robot_pose[:3, :3] = rot_matrix filter_robot_pose[:3, :3] = rot_matrix rob_arm.movement(robot_pose) # robot_pose[1,3] += (-err[0]) # robot_pose[2,3] += (-err[1]) # robot_pose[2,3] += err[1] robot_pose[:2, 3] += err[:2] #print('unfiltered robot pose', robot_pose[:3, 3]) filter_robot_pose[:3, 3] = alpha_target * filter_robot_pose[:3, 3] + ( 1 - alpha_target) * robot_pose[:3, 3] #print('filtered robot pose', filter_robot_pose[:3, 3]) # robot moves to filter_robot_pose rob_arm.movement(filter_robot_pose) # self.movement(robot_pose) if err_rolling is None: err_rolling = err err_rolling = alpha_err * err_rolling + (1 - alpha_err) * err err_size_rolling = alpha_err * err_size_rolling + ( 1 - alpha_err) * np.linalg.norm(err_rolling) # insertion print('insertion') dir = np.array([0, 0, -1]) move_pose = rob_arm.get_object_matrix('UR5_ikTip') move_pose[:3, 3] += dir * 0.2 rob_arm.insertion(move_pose) # after insertion move_pose[:3, 3] -= dir * 0.2 rob_arm.movement(move_pose) # go back to initial pose rob_arm.movement(init_pose) rob_arm.finish()
def main(): rob_arm = SingleRoboticArm() init_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') netpath = '/home/luben/robot-peg-in-hole-task/mankey/experiment/ckpnt_0420/checkpoint-100.pth' kp_detection = KeypointDetection(netpath) #extrinsic = rob_arm.get_camera_matrix(cam_name='Vision_sensor0') #print(extrinsic) #intrinsic = rob_arm.get_intrinsic_matrix() #print(intrinsic) #rgb = rob_arm.get_rgb(cam_name='Vision_sensor0') #print(rgb.shape) #depth = rob_arm.get_depth(cam_name='Vision_sensor0', near_plane=0.02, far_plane=1) #depth_mm = (depth * 1000).astype(np.uint16) # type: np.uint16 ; uint16 is needed by keypoint detection network #print(depth.shape) #masks = rob_arm.get_mask() ### gt open-loop ''' hole_keypoint_pos = rob_arm.get_target_matrix('hole_keypoint') action_matrix = rob_arm.get_correct_action_matrix('peg_keypoint') grasp_list = rob_arm.naive_grasp_detection(rgb, depth) #rob_arm.run_grasp(grasp_list, 1, use_gdn=False) rob_arm.pick_and_place(grasp_list, 1, action_matrix,use_gdn=False) ''' ### gt close-loop ''' grasp_list = rob_arm.naive_grasp_detection(rgb, depth) rob_arm.run_grasp(grasp_list, 1, use_gdn = False) print('servoing...') rob_arm.gt_peg_in_hole_servo(target_name='hole_keypoint', object_name='peg_keypoint') ''' ### close-loop #naive grasp ''' rgb = rob_arm.get_rgb() depth = rob_arm.get_depth() grasp_list = rob_arm.naive_grasp_detection(rgb, depth) rob_arm.run_grasp(grasp_list, 1, use_gdn = False) ''' #gt grasp peg_keypoint_bottom_pose = rob_arm.get_object_matrix( obj_name='peg_keypoint_bottom') grasp_pose = peg_keypoint_bottom_pose.copy() grasp_pose[2, 3] += 0.08 rob_arm.gt_run_grasp(grasp_pose) grasp_pose[2, 3] += 0.2 rob_arm.movement(grasp_pose) print('servoing...') bbox = np.array([0, 0, 255, 255]) err_tolerance = 0.18 alpha_err = 0.7 alpha_target = 0.7 filter_robot_pose = rob_arm.get_object_matrix('UR5_ikTip') err_rolling, err_size_rolling = None, err_tolerance * 10 cnt = 0 while err_size_rolling > err_tolerance: print('========cnt:' + str(cnt) + '=========\n') cnt += 1 robot_pose = rob_arm.get_object_matrix('UR5_ikTip') cam_name = 'vision_eye' rgb = rob_arm.get_rgb(cam_name=cam_name) depth = rob_arm.get_depth(cam_name=cam_name, near_plane=0.01, far_plane=1.5) # rotate 180 (h, w) = rgb.shape[:2] center = (w / 2, h / 2) M = cv2.getRotationMatrix2D(center, 180, 1.0) rgb = cv2.warpAffine(rgb, M, (w, h)) # rotate 180 (h, w) = depth.shape[:2] center = (w / 2, h / 2) M = cv2.getRotationMatrix2D(center, 180, 1.0) depth = cv2.warpAffine(depth, M, (w, h)) depth_mm = (depth * 1000).astype( np.uint16 ) # type: np.uint16 ; uint16 is needed by keypoint detection network camera_keypoint = kp_detection.inference(cv_rgb=rgb, cv_depth=depth_mm, bbox=bbox) cam_pos, cam_quat, cam_matrix = rob_arm.get_camera_pos( cam_name=cam_name) cam_matrix = np.array(cam_matrix).reshape(3, 4) camera2world_matrix = cam_matrix world_keypoints = np.zeros((4, 3), dtype=np.float64) for idx, keypoint in enumerate(camera_keypoint): keypoint = np.append(keypoint, 1) world_keypoints[idx, :] = np.dot(camera2world_matrix, keypoint)[0:3] print('world keypoints', world_keypoints) peg_keypoint_top = world_keypoints[0] peg_keypoint_bottom = world_keypoints[1] #object_keypoint[0] = object_keypoint[0] * -1 #object_keypoint[1] = object_keypoint[1] * -1 hole_keypoint_top = world_keypoints[2] hole_keypoint_bottom = world_keypoints[3] #target_keypoint[0] = target_keypoint[0]* -1 #target_keypoint[1] = target_keypoint[1] * -1 err = hole_keypoint_top - peg_keypoint_bottom # err*= 0.1 print('err vector', err) if cnt >= 100: kp_detection.visualize(cv_rgb=rgb, cv_depth=depth_mm, keypoints=camera_keypoint) dis = math.sqrt(math.pow(err[0], 2) + math.pow(err[1], 2)) print('Distance:', dis) if dis < 0.01: hole_dir = hole_keypoint_bottom - hole_keypoint_top peg_dir = peg_keypoint_bottom - peg_keypoint_top dot_product = np.dot(peg_dir, hole_dir) degree = math.degrees( math.acos( dot_product / (np.linalg.norm(peg_dir) * np.linalg.norm(hole_dir)))) print('Degree:', degree) if degree > 2: cross_product = np.cross(peg_dir, hole_dir) cross_product = cross_product / np.linalg.norm(cross_product) #degree -= 0.5 w = math.cos(math.radians(degree / 2)) x = math.sin(math.radians(degree / 2)) * cross_product[0] y = math.sin(math.radians(degree / 2)) * cross_product[1] z = math.sin(math.radians(degree / 2)) * cross_product[2] quat = [w, x, y, z] rot_pose = quaternion_matrix(quat) rot_matrix = np.dot(rot_pose[:3, :3], robot_pose[:3, :3]) robot_pose[:3, :3] = rot_matrix filter_robot_pose[:3, :3] = rot_matrix rob_arm.movement(robot_pose) # robot_pose[1,3] += (-err[0]) # robot_pose[2,3] += (-err[1]) # robot_pose[2,3] += err[1] robot_pose[:2, 3] += err[:2] #print('unfiltered robot pose', robot_pose[:3, 3]) filter_robot_pose[:3, 3] = alpha_target * filter_robot_pose[:3, 3] + ( 1 - alpha_target) * robot_pose[:3, 3] #print('filtered robot pose', filter_robot_pose[:3, 3]) # robot moves to filter_robot_pose rob_arm.movement(filter_robot_pose) # self.movement(robot_pose) if err_rolling is None: err_rolling = err err_rolling = alpha_err * err_rolling + (1 - alpha_err) * err err_size_rolling = alpha_err * err_size_rolling + ( 1 - alpha_err) * np.linalg.norm(err_rolling) # insertion print('insertion') dir = np.array([0, 0, -1]) move_pose = rob_arm.get_object_matrix('UR5_ikTip') move_pose[:3, 3] += dir * 0.185 rob_arm.insertion(move_pose) # after insertion move_pose[:3, 3] -= dir * 0.185 rob_arm.movement(move_pose) # go back to initial pose rob_arm.movement(init_pose) ### arm move test ''' robot_pose = rob_arm.get_object_matrix('UR5_ikTip') robot_pose[2,3] += 10 rob_arm.movement(robot_pose) ''' ### only grasp ''' option = 'naive' if option == 'gdn': for mask in masks: rob_arm.visualize_image(mask, depth, rgb) grasp_list = rob_arm.get_grasping_candidate(depth, mask, 75, 75) #print(grasp_list) rob_arm.run_grasp(grasp_list, 1) else: grasp_list = rob_arm.naive_grasp_detection(rgb, depth) #print(grasp_list) rob_arm.run_grasp(grasp_list, 1, use_gdn = False) ''' rob_arm.finish()
def main(): rob_arm = SingleRoboticArm() cam_name = 'vision_sensor' #extrinsic = rob_arm.get_camera_matrix(cam_name=cam_name) #print(extrinsic) #intrinsic = rob_arm.get_intrinsic_matrix() #print(intrinsic) #rgb = rob_arm.get_rgb(cam_name=cam_name) #print(rgb.shape) #depth = rob_arm.get_depth(cam_name=cam_name, near_plane=0.01, far_plane=0.5) #print(depth.shape) #masks = rob_arm.get_mask(cam_name=cam_name) ''' cam_name = 'vision_sensor' rgb = rob_arm.get_rgb(cam_name=cam_name) depth = rob_arm.get_depth(cam_name=cam_name, near_plane=0.01, far_plane=0.5) rob_arm.naive_angle_grasp(rgb, depth, cam_name=cam_name) gripper_pose = np.array([[ 4.54187393e-05, -1.00000000e+00, 8.34465027e-06, -1.5510e-01], [-1.97768211e-04, -8.34465027e-06, -1.00000000e+00, -3.0250e-01], [ 1.00000000e+00, 4.54187393e-05, -1.97768211e-04, 0.329978], [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) rob_arm.movement(gripper_pose) ''' init_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') cam_name = 'vision_sensor_gdn' rgb = rob_arm.get_rgb(cam_name=cam_name) depth = rob_arm.get_depth(cam_name=cam_name, near_plane=0.01, far_plane=1.5) #masks = rob_arm.get_mask(cam_name=cam_name) extrinsic = rob_arm.get_camera_matrix(cam_name=cam_name) #for mask in masks: rob_arm.visualize_image(None, depth, rgb) grasp_list = rob_arm.get_grasping_candidate(depth, extrinsic, 90, 90) grasp_matrix = grasp_list[0] #print(grasp_list) #rob_arm.run_grasp(grasp_list, 1, use_gdn = True) rob_arm.run_single_grasp(grasp_matrix, use_gdn=True) step_2_pose = np.array( [[7.14063644e-05, -1.00000000e+00, 1.22785568e-05, 1.69920236e-01], [-2.97635794e-04, -1.23977661e-05, -1.00000000e+00, -4.69989717e-01], [1.00000000e+00, 7.14957714e-05, -2.97665596e-04, 2.22262397e-01], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) step_2_rot = step_2_pose[:3, :3] grasp_matrix[:3, :3] = step_2_rot grasp_matrix[2, 3] += 0.2 rob_arm.movement(grasp_matrix) rob_arm.movement(step_2_pose) rob_arm.open_gripper(True) rob_arm.finish()
def main(): rob_arm = SingleRoboticArm() data_root = '/Users/cmlab/data/pdc/logs_proto' date = '2021-10-07' anno_data = 'insertion_xyzrot_eye_' + date + '/processed' im_data = 'insertion_xyzrot_eye_' + date + '/processed/images' anno_data_path = os.path.join(data_root, anno_data) im_data_path = os.path.join(data_root, im_data) # create folder cwd = os.getcwd() os.chdir(data_root) if not os.path.exists(im_data): os.makedirs(im_data) os.chdir(cwd) info_dic = {} cnt = 0 iter = 10 cam_name = 'vision_eye' peg_top = 'peg_keypoint_top2' peg_bottom = 'peg_keypoint_bottom2' hole_top = 'hole_keypoint_top' hole_bottom = 'hole_keypoint_bottom' peg_name = 'peg2' hole_name = 'hole' move_record = True ompl_path_planning = False origin_hole_pose = rob_arm.get_object_matrix(hole_name) origin_hole_pos = origin_hole_pose[:3, 3] origin_hole_quat = rob_arm.get_object_quat(hole_name) origin_peg_pose = rob_arm.get_object_matrix(peg_name) origin_peg_pos = origin_peg_pose[:3, 3] origin_peg_quat = rob_arm.get_object_quat(peg_name) #origin_ur5_pose = rob_arm.get_object_matrix('UR5_ikTarget') #origin_ur5_pos = origin_ur5_pose[:3, 3] #origin_ur5_quat = rob_arm.get_object_quat('UR5_ikTarget') gripper_init_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') for i in range(iter): print('=' * 8 + 'Iteration ' + str(i) + '=' * 8) # set init pos of peg nad hole hole_pos, peg_pos = get_init_pos(origin_hole_pos.copy(), origin_peg_pos.copy()) rob_arm.set_object_position(hole_name, hole_pos) rob_arm.set_object_quat(hole_name, origin_hole_quat) #rob_arm.set_object_position(peg_name, peg_pos) rob_arm.set_object_position(peg_name, origin_peg_pos) rob_arm.set_object_quat(peg_name, origin_peg_quat) random_tilt(rob_arm, [hole_name], 0, 60) rob_arm.movement(gripper_init_pose) rob_arm.open_gripper(mode=1) # move peg to random x,y,z target_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') hole_top_pose = rob_arm.get_object_matrix(obj_name=hole_top) delta_move = np.array([ random.uniform(-0.03, 0.03), random.uniform(-0.03, 0.03), random.uniform(0.09, 0.12) ]) target_pos = hole_top_pose[:3, 3] target_pos += delta_move # target pose target_pose[:3, 3] = target_pos if ompl_path_planning == True: # move to target pose in order to get joint position rob_arm.movement(target_pose) target_joint_config = rob_arm.get_joint_position() rob_arm.movement(gripper_init_pose) # gt grasp peg # enable dynamic property of the peg rob_arm.enableDynamic(peg_name, True) peg_keypoint_top_pose = rob_arm.get_object_matrix(obj_name=peg_top) grasp_pose = peg_keypoint_top_pose.copy() # gripper offset #grasp_pose[:3, 3] += peg_keypoint_top_pose[:3,1] * 0.0015 # y-axis rob_arm.gt_run_grasp(grasp_pose) #rob_arm.setObjectParent(peg_name,'RG2') # disable dynamic has some problems. #rob_arm.enableDynamic(peg_name, False) # pull up the peg to avoid collision # along the x-axis of the peg grasp_pose[:3, 3] += peg_keypoint_top_pose[:3, 0] * 0.25 hole_keypoint_top_pose = rob_arm.get_object_matrix(obj_name=hole_top) # move to hole top grasp_pose[:2, 3] = hole_keypoint_top_pose[:2, 3] rob_arm.movement(grasp_pose) #time.sleep(1) # peg hole alignment # x-axis alignment hole_insert_dir = -rob_arm.get_object_matrix(hole_top)[:3, 0] # x-axis peg_insert_dir = -rob_arm.get_object_matrix(peg_top)[:3, 0] # x-axis dot_product = np.dot(peg_insert_dir, hole_insert_dir) degree = math.degrees( math.acos(dot_product / (np.linalg.norm(peg_insert_dir) * np.linalg.norm(hole_insert_dir)))) print('degree between peg and hole : ', degree) if degree > 0: cross_product = np.cross(peg_insert_dir, hole_insert_dir) cross_product = cross_product / np.linalg.norm(cross_product) w = math.cos(math.radians(degree / 2)) x = math.sin(math.radians(degree / 2)) * cross_product[0] y = math.sin(math.radians(degree / 2)) * cross_product[1] z = math.sin(math.radians(degree / 2)) * cross_product[2] quat = [w, x, y, z] rot_pose = quaternion_matrix(quat) robot_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') rot_matrix = np.dot(rot_pose[:3, :3], robot_pose[:3, :3]) robot_pose[:3, :3] = rot_matrix hole_keypoint_top_pose = rob_arm.get_object_matrix( obj_name=hole_top) robot_pose[:3, 3] = hole_keypoint_top_pose[:3, 3] + hole_keypoint_top_pose[: 3, 0] * 0.07 rob_arm.movement(robot_pose) #gripper offset robot_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') peg_keypoint_top_pose = rob_arm.get_object_matrix(obj_name=peg_top) robot_pose[:3, 3] += peg_keypoint_top_pose[:3, 1] * 0.003 # y-axis robot_pose[:3, 3] -= peg_keypoint_top_pose[:3, 0] * 0.005 # x-axis rob_arm.movement(robot_pose) # insert test #robot_pose[:3, 3] -= peg_keypoint_top_pose[:3, 0] * 0.05 # x-axis #rob_arm.movement(robot_pose) if ompl_path_planning == True: # determine the path interpolation_states = 5 path = rob_arm.compute_path_from_joint_space( target_joint_config, interpolation_states) print('len of the path : ', int(len(path) / 6)) if len(path) > 0: lineHandle = rob_arm.visualize_path(path) if move_record == True: # end point print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' path: ' + str(0)) delta_rotation = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) delta_translation = np.array([0.0, 0.0, 0.0]) r_euler = np.array([0.0, 0.0, 0.0]) step_size = 0 gripper_pose = rob_arm.get_object_matrix( obj_name='UR5_ikTip') pre_xyz = gripper_pose[:3, 3] pre_rot = gripper_pose[:3, :3] info = generate_one_im_anno( cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation, delta_translation, gripper_pose, step_size, r_euler) info_dic[cnt] = info cnt += 1 path_len = int(len(path) / 6) path_idx = 0 #for j in range(int(len(path) / 6)): track_idx = 0 while path_idx < path_len: print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' path: ' + str(track_idx + 1)) track_idx += 1 p = path_idx / path_len # step range: 0~100 step = 1 + int(p * 50) #step = random.randint(1+int(p*10), 1+int(p*100)) if path_idx + step >= path_len: subPath = path[path_idx * 6:] else: subPath = path[path_idx * 6:(path_idx + step) * 6] path_idx += step #print(subPath) rob_arm.run_through_path(subPath) step_size = step / (1 + int(1 * 50)) if step_size >= 1.0: step_size = 1.0 print('step_size', step_size) gripper_pose = rob_arm.get_object_matrix( obj_name='UR5_ikTip') cnt_xyz = gripper_pose[:3, 3] cnt_rot = gripper_pose[:3, :3] delta_translation = pre_xyz - cnt_xyz # pre <==> target # cnt <==> source cnt_rot_t = np.transpose(cnt_rot) delta_rotation = np.dot(cnt_rot_t, pre_rot) r = R.from_matrix(delta_rotation) r_euler = r.as_euler('zyx', degrees=True) print('delta_translation', delta_translation) print('delta_rotation', delta_rotation) print('r_euler', r_euler) info = generate_one_im_anno( cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation, delta_translation, gripper_pose, step_size, r_euler) info_dic[cnt] = info cnt += 1 pre_rot = cnt_rot.copy() pre_xyz = cnt_xyz.copy() else: rob_arm.run_through_all_path(path) rob_arm.clear_path_visualization(lineHandle) else: if move_record == True: # only record start point gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTip') pre_xyz = gripper_pose[:3, 3] pre_rot = gripper_pose[:3, :3] rob_arm.movement(target_pose) gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTip') cnt_xyz = gripper_pose[:3, 3] cnt_rot = gripper_pose[:3, :3] delta_translation = pre_xyz - cnt_xyz # pre <==> target # cnt <==> source cnt_rot_t = np.transpose(cnt_rot) delta_rotation = np.dot(cnt_rot_t, pre_rot) r = R.from_matrix(delta_rotation) r_euler = r.as_euler('zyx', degrees=True) print('iter: ' + str(i) + ' cnt: ' + str(cnt)) print('delta_translation', delta_translation) print('delta_rotation', delta_rotation) print('r_euler', r_euler) # step_size is not used here step_size = 0 info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation, delta_translation, gripper_pose, step_size, r_euler) info_dic[cnt] = info cnt += 1 else: rob_arm.movement(target_pose) rob_arm.finish() f = open(os.path.join(anno_data_path, 'peg_in_hole.yaml'), 'w') yaml.dump(info_dic, f, Dumper=yaml.RoundTripDumper) f.close()
def main(): rob_arm = SingleRoboticArm() data_root = '/home/luben/data/pdc/logs_proto' date = '2021-05-27' anno_data = 'box_insertion_fix_' + date + '/processed' im_data = 'box_insertion_fix_' + date + '/processed/images' anno_data_path = os.path.join(data_root, anno_data) im_data_path = os.path.join(data_root, im_data) # cam_name = 'vision_eye' cam_name = 'vision_fix' # create folder cwd = os.getcwd() os.chdir(data_root) if not os.path.exists(im_data): os.makedirs(im_data) os.chdir(cwd) info_dic = {} cnt = 0 iter = 300 gripper_init_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') origin_peg_pos_list = [] origin_peg_quat_list = [] for i in range(6): origin_peg_pose = rob_arm.get_object_matrix('peg_large' + str(i)) origin_peg_pos = origin_peg_pose[:3, 3] origin_peg_quat = rob_arm.get_object_quat('peg_large' + str(i)) origin_peg_pos_list.append(origin_peg_pos) origin_peg_quat_list.append(origin_peg_quat) origin_box_pose = rob_arm.get_object_matrix('box1') origin_box_pos = origin_box_pose[:3, 3] origin_box_quat = rob_arm.get_object_quat('box1') origin_cam_pose = rob_arm.get_object_matrix(cam_name) origin_cam_pos = origin_cam_pose[:3, 3] origin_cam_quat = rob_arm.get_object_quat(cam_name) for i in range(iter): # init pos of peg and hole rob_arm.set_object_position('box1', origin_box_pos) rob_arm.set_object_quat('box1', origin_box_quat) rob_arm.set_object_position(cam_name, origin_cam_pos) rob_arm.set_object_quat(cam_name, origin_cam_quat) for i in range(6): rob_arm.set_object_position('peg_large' + str(i), origin_peg_pos_list[i]) rob_arm.set_object_quat('peg_large' + str(i), origin_peg_quat_list[i]) rob_arm.movement(gripper_init_pose) rob_arm.open_gripper(mode=1) peg_list = list(range(6)) num_remove = random.randint(0, 5) print('number of remove peg:{:d}'.format(num_remove)) remove_list = random.sample(peg_list, num_remove) for i, v in enumerate(remove_list): print('remove peg_idx:{:d}'.format(v)) rob_arm.set_object_position('peg_large' + str(v), [2, 0.2 * i, 0.1]) peg_list.pop(peg_list.index(v)) peg_idx = random.choice(peg_list) print('peg_idx:{:d}'.format(peg_idx)) peg_top = 'peg_keypoint_top_large' + str(peg_idx) peg_bottom = 'peg_keypoint_bottom_large' + str(peg_idx) hole_top = 'hole_keypoint_top_large' + str(peg_idx) hole_bottom = 'hole_keypoint_bottom_large' + str(peg_idx) pick_record = True tilt_record = False move_record = True peg = 'peg_large' + str(peg_idx) hole = 'container' + str(peg_idx) #random move&rotate the camera cam_pose = rob_arm.get_object_matrix(cam_name) cam_pos = cam_pose[:3, 3] delta_x = random.uniform(-0.04, 0.04) delta_y = random.uniform(-0.04, 0.04) cam_pos[0] += delta_x cam_pos[1] += delta_y rob_arm.set_object_position(cam_name, cam_pos) cam_tilt_degree = 3 rot_dir = np.random.normal(size=(3, )) rot_dir = rot_dir / np.linalg.norm(rot_dir) print(rot_dir) w = math.cos(math.radians(cam_tilt_degree / 2)) x = math.sin(math.radians(cam_tilt_degree / 2)) * rot_dir[0] y = math.sin(math.radians(cam_tilt_degree / 2)) * rot_dir[1] z = math.sin(math.radians(cam_tilt_degree / 2)) * rot_dir[2] cam_rot_quat = [w, x, y, z] cam_quat = rob_arm.get_object_quat(cam_name) # [x,y,z,w] cam_quat = [cam_quat[3], cam_quat[0], cam_quat[1], cam_quat[2]] # change to [w,x,y,z] cam_quat = qmult(cam_rot_quat, cam_quat) #[w,x,y,z] cam_quat = [cam_quat[1], cam_quat[2], cam_quat[3], cam_quat[0]] # change to [x,y,z,w] rob_arm.set_object_quat(cam_name, cam_quat) # gt grasp peg_keypoint_pose = rob_arm.get_object_matrix(obj_name=peg_top) grasp_pose = peg_keypoint_pose.copy() grasp_pose[0, 3] += 0.0095 grasp_pose[2, 3] -= 0.015 rob_arm.gt_run_grasp(grasp_pose) # pull up the peg to avoid collision (hole's height = 0.2) grasp_pose[2, 3] += 0.18 rob_arm.movement(grasp_pose) if tilt_record: # randomly tilt the peg from -20 to 20 degrees peg_tilt_degree = random.uniform(-20, 20) peg_quat = rob_arm.get_object_quat(peg) # this line must be modified for angled hole peg_rot_quat = [ math.sin(math.radians(peg_tilt_degree / 2)), 0, 0, math.cos(math.radians(peg_tilt_degree / 2)) ] rob_arm.set_object_quat(peg, peg_rot_quat) peg_pose = rob_arm.get_object_matrix(peg) peg_pos = peg_pose[:3, 3] # offset due to rotation # this line must be modified for angled hole offset_peg_y = 0.035 * math.sin(math.radians(peg_tilt_degree / 2)) peg_pos[1] += offset_peg_y rob_arm.set_object_position(peg, peg_pos) # tilt the gripper peg_keypoint_top_pose = rob_arm.get_object_matrix(peg_top) peg_keypoint_bottom_pose = rob_arm.get_object_matrix(peg_bottom) hole_keypoint_top_pose = rob_arm.get_object_matrix(hole_top) hole_keypoint_bottom_pose = rob_arm.get_object_matrix(hole_bottom) hole_dir = hole_keypoint_bottom_pose[:3, 3] - hole_keypoint_top_pose[:3, 3] peg_dir = peg_keypoint_bottom_pose[:3, 3] - peg_keypoint_top_pose[:3, 3] dot_product = np.dot(peg_dir, hole_dir) degree = math.degrees( math.acos( dot_product / (np.linalg.norm(peg_dir) * np.linalg.norm(hole_dir)))) print('Degree:', degree) cross_product = np.cross(peg_dir, hole_dir) cross_product = cross_product / np.linalg.norm(cross_product) w = math.cos(math.radians(degree / 2)) x = math.sin(math.radians(degree / 2)) * cross_product[0] y = math.sin(math.radians(degree / 2)) * cross_product[1] z = math.sin(math.radians(degree / 2)) * cross_product[2] gripper_rot_quat = [w, x, y, z] rot_pose = quaternion_matrix(gripper_rot_quat) gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') rot_matrix = np.dot(rot_pose[:3, :3], gripper_pose[:3, :3]) gripper_pose[:3, :3] = rot_matrix rob_arm.movement(gripper_pose) # peg hole alignment peg_keypoint_bottom_pose = rob_arm.get_object_matrix( obj_name=peg_bottom) hole_keypoint_top_pose = rob_arm.get_object_matrix( obj_name=hole_top) err_xy = hole_keypoint_top_pose[:2, 3] - peg_keypoint_bottom_pose[:2, 3] gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') # peg falling if err_xy[0] > 0.15 or err_xy[1] > 0.15: continue gripper_pose[:2, 3] += err_xy rob_arm.movement(gripper_pose) # gripper rotation record step_size = 0.1 print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' rotate: ' + str(0)) delta_rotation_quat = np.array([1.0, 0.0, 0.0, 0.0]) delta_translation = np.array([0.0, 0.0, 0.0]) info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 split_degree = degree * step_size for j in range(int(1 / step_size)): # reverse rotation w = math.cos(math.radians(split_degree / 2)) x = math.sin(math.radians( split_degree / 2)) * cross_product[0] * -1 y = math.sin(math.radians( split_degree / 2)) * cross_product[1] * -1 z = math.sin(math.radians( split_degree / 2)) * cross_product[2] * -1 gripper_rot_quat = [w, x, y, z] rot_pose = quaternion_matrix(gripper_rot_quat) gripper_pose = rob_arm.get_object_matrix( obj_name='UR5_ikTarget') rot_matrix = np.dot(rot_pose[:3, :3], gripper_pose[:3, :3]) gripper_pose[:3, :3] = rot_matrix rob_arm.movement(gripper_pose) print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' rotate: ' + str(j + 1)) delta_rotation_quat = np.array([w, -x, -y, -z]) delta_translation = np.array([0.0, 0.0, 0.0]) info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 # pick up the peg gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') random_z = random.uniform(0.05, 0.1) random_z = 0.05 pick_vec = np.array([0, 0, random_z]) if pick_record: step_size = 0.1 for j in range(int(1 / step_size)): gripper_pose[:3, 3] += pick_vec * step_size rob_arm.movement(gripper_pose) print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' pick: ' + str(j)) delta_rotation_quat = np.array([1.0, 0.0, 0.0, 0.0]) delta_translation = -pick_vec * step_size info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 else: gripper_pose[:3, 3] += pick_vec rob_arm.movement(gripper_pose) hole_pose_1 = rob_arm.get_object_matrix('container1') hole_pos_1 = hole_pose_1[:3, 3] hole_pose_4 = rob_arm.get_object_matrix('container4') hole_pos_4 = hole_pose_4[:3, 3] hole_pos = 0.5 * hole_pos_1 + 0.5 * hole_pos_4 print(hole_pos) target_x, target_y = random_gripper_xy(hole_pos) gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') if move_record: now_x = gripper_pose[0, 3] now_y = gripper_pose[1, 3] move_vec = np.array([target_x - now_x, target_y - now_y, 0]) step_size = 0.1 for k in range(int(1 / step_size)): if k == 0: for l in range(10): gripper_pose[:3, 3] += move_vec * 0.01 rob_arm.movement(gripper_pose) print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' move: ' + str(l)) delta_rotation_quat = np.array([1.0, 0.0, 0.0, 0.0]) delta_translation = -move_vec * 0.01 info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 if l == 9: move_vec *= 0.9 else: gripper_pose[:3, 3] += move_vec * step_size rob_arm.movement(gripper_pose) print('iter: ' + str(i) + ' cnt: ' + str(cnt) + ' move: ' + str(k + 9)) delta_rotation_quat = np.array([1.0, 0.0, 0.0, 0.0]) delta_translation = -move_vec * step_size info = generate_one_im_anno(cnt, cam_name, peg_top, peg_bottom, hole_top, hole_bottom, rob_arm, im_data_path, delta_rotation_quat, delta_translation) info_dic[cnt] = info cnt += 1 else: gripper_pose[0, 3] = target_x gripper_pose[1, 3] = target_y rob_arm.movement(gripper_pose) rob_arm.finish() f = open(os.path.join(anno_data_path, 'peg_in_hole.yaml'), 'w') yaml.dump(info_dic, f, Dumper=yaml.RoundTripDumper) f.close()
def main(): rob_arm = SingleRoboticArm() init_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') # gt grasp peg_keypoint_top_pose = rob_arm.get_object_matrix( obj_name='peg_keypoint_top_large0') grasp_pose = peg_keypoint_top_pose.copy() grasp_pose[0, 3] += 0.0095 grasp_pose[2, 3] -= 0.015 rob_arm.gt_run_grasp(grasp_pose) grasp_pose[2, 3] += 0.24 print(grasp_pose) rob_arm.movement(grasp_pose) hole_keypoint_top_pose = rob_arm.get_object_matrix( obj_name='hole_keypoint_top_large0') grasp_pose[:2, 3] = hole_keypoint_top_pose[:2, 3] grasp_pose[0, 3] += 0.0095 rob_arm.movement(grasp_pose) # insertion grasp_pose[2, 3] -= 0.2 rob_arm.movement(grasp_pose) rob_arm.open_gripper(True) rob_arm.movement(init_pose) rob_arm.finish()
def main(): rob_arm = SingleRoboticArm() checkpoints_file_path = 'mover/ckpnt_rmse/checkpoints_best.pth' mover = MoveInference(checkpoints_file_path) # gt grasp peg_keypoint_bottom_pose = rob_arm.get_object_matrix( obj_name='peg_keypoint_bottom') grasp_pose = peg_keypoint_bottom_pose.copy() grasp_pose[2, 3] += 0.08 rob_arm.gt_run_grasp(grasp_pose) grasp_pose[2, 3] += 0.2 rob_arm.movement(grasp_pose) hole_keypoint_bottom_pose = rob_arm.get_object_matrix( obj_name='hole_keypoint_bottom') delta_x = random.uniform(-0.06, 0.06) #delta_y = random.uniform(-0.06, 0.06) delta_y = 0.06 delta_z = random.uniform(0.12, 0.17) gripper_pose = rob_arm.get_object_matrix(obj_name='UR5_ikTarget') gripper_pose[0, 3] = hole_keypoint_bottom_pose[0, 3] + delta_x gripper_pose[1, 3] = hole_keypoint_bottom_pose[1, 3] + delta_y gripper_pose[2, 3] = hole_keypoint_bottom_pose[2, 3] + 0.134 + delta_z rob_arm.movement(gripper_pose) cnt = 0 for i in range(60): print('========cnt:' + str(cnt) + '=========\n') cnt += 1 cam_name = 'vision_eye' rgb = rob_arm.get_rgb(cam_name=cam_name) depth = rob_arm.get_depth(cam_name=cam_name, near_plane=0.01, far_plane=1.5) depth_mm = (depth * 1000).astype(np.uint16) r, t = mover.inference_single_frame(rgb=rgb, depth_mm=depth_mm) print(t) robot_pose = rob_arm.get_object_matrix('UR5_ikTip') rot_matrix = np.dot(r, robot_pose[:3, :3]) #robot_pose[:3, :3] = rot_matrix robot_pose[:3, 3] += t * 0.5 print(robot_pose) rob_arm.movement(robot_pose) rob_arm.finish()