コード例 #1
0
 def get_imdb(self):
     ur5_kin = UR5Kin(self.t_scale)
     raw_data_file = os.path.join(self.data_path, self.config_file_name)
     output_file_name = self.config_file_name[:-4]+'_lowlyap.csv'
     output_file_path = os.path.join(self.output_path, output_file_name)
     low_lyap_count = 0
     try:
         os.remove(output_file_path)
     except OSError:
         pass
     with open(raw_data_file) as raw_data, open(output_file_path, "a") as output_file:
         reader = list(csv.DictReader(raw_data))
         writer = csv.writer(output_file)
         output_hearder =  self.joint_names + self.cube_pose_header
         writer.writerow(output_hearder)
         for row in tqdm(reader):
             joint_config = self.get_joint_config_from_row(row)
             X_H = ur5_kin.get_ee_pose(joint_config)
             X_Ts = self.get_XTs_from_row(row)
             lyap_func = LyapEH()
             lyap_buff = np.array([])
             for X_T in X_Ts:
                 if X_T[2,3]>self.table_height:
                     lyap_func.X_T = X_T
                     lyap_temp = lyap_func.cmpt_Lyap(X_H)
                     lyap_buff = np.append(lyap_buff, lyap_temp)
             lyap = np.amin(lyap_buff)
             lyap_func.X_T = X_Ts[np.argmin(lyap_buff)]
             # dataset augmentation: if True, the generated imdb will be 6x bigger
             if lyap < self.lyap_threshold:
                 writer.writerow(row.values())
                 low_lyap_count += 1
         print('Low Lyap Sample Size: %i' % low_lyap_count)
コード例 #2
0
 def infer_partials(self, img, joint_config, apcNet):
     ur5_kin_scaled = UR5Kin(self.t_scale)
     with torch.no_grad():
         apcNet.net = apcNet.net.eval()
         vp = ur5_kin_scaled.get_veced_joint_poses(joint_config)
         vp = self.np2torch(vp).cuda()
         vp = vp.unsqueeze(0)
         lyap, pd = apcNet.net.infer(img.cuda(), vp)
     return lyap, pd
コード例 #3
0
 def infer_J(self, img, joint_config, apcNet):
     ur5_kin_scaled = UR5Kin(self.t_scale)
     with torch.no_grad():
         apcNet.net = apcNet.net.eval()
         joint_config = joint_config.reshape(6)
         joint_config = self.np2torch(joint_config).cuda()
         joint_config = joint_config.unsqueeze(0)
         lyap, pd = apcNet.net.infer(img.cuda(), joint_config)
     return lyap, pd
コード例 #4
0
 def __init__(self, cnn_infer, t_scale):
     self.d_theta = 0.1
     self.t_scale = float(t_scale)
     self.grasp_offset = 0.05 # grasp offset along global Z axis
     self.window = tk.Tk()
     self.window.title('APCNet Simulator Demo')
  # Create a canvas that can fit the above video source size
     self.canvas = tk.Canvas(self.window, width = 848, height = 480)
     self.canvas.pack()
      # Button that lets the user take a snapshot
     self.start_bt = tk.Button(self.window, text='Start', width=50, command=self.start_bt_func)
     self.start_bt.pack(anchor=tk.CENTER, expand=True)
     self.home_bt = tk.Button(self.window, text="Home", width=50, command=arm2home_sim)
     self.home_bt.pack(anchor=tk.CENTER, expand=True)
     self.shuffle_bt = tk.Button(self.window, text="Shuffle Cubes", width=50, command=move_cubes_sim)
     self.shuffle_bt.pack(anchor=tk.CENTER, expand=True)
     self.delay = 30
     self.cv2_img = None
     self.update()
     self.is_start = False
     if cnn_infer == "infer":
         self.apcNet = APCNetSiam()
         self.apcNet.model_fld = 's2r_test02_2048_dense'
         self.apcNet.load_ckpt(load_best=False)
     self.preprocess = transforms.Compose([transforms.ToPILImage(),
                                            transforms.CenterCrop(480),
                                            transforms.Resize((224, 224)),
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                                 std=[0.229, 0.224, 0.225])])
     self.ur5_kin = UR5Kin(self.t_scale)
     self.lyap_func = LyapEH()
     self.apcNet_infer = cnn_infer
     self.image_topic = "/camera/rgb/image_raw"
     self.joint_names = ['shoulder_pan_joint', 'shoulder_lift_joint',
             'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
     # Init msg for publishing
     self.joint_traj = JointTrajectory()
     self.joint_traj.joint_names = self.joint_names
     self.real_joint_traj = JointTrajectory()
     self.real_joint_traj.joint_names = self.joint_names
     self.old_time = time.time()
     self.lyap_vctrl_old = np.zeros([6,1])
     self.real_joint_traj.points = [JointTrajectoryPoint(positions = [0]*6,
                                 velocities= [0]*6,
                                 time_from_start=rospy.Duration(0.1))]
     # Approximated Synchronised Topics
     self.image_capturer_sub = message_filters.Subscriber(
         self.image_topic, Image)
     self.q_sub = message_filters.Subscriber(
         "/arm_controller/state", JointTrajectoryControllerState)
     self.synced_msgs = message_filters.ApproximateTimeSynchronizer(
                     [ self.image_capturer_sub, self.q_sub], 10, 0.005)
     self.synced_msgs.registerCallback(self.image_callback)
     self.sim_joint_command_pub = rospy.Publisher("/arm_controller/command",
                                                 JointTrajectory, queue_size=10)
コード例 #5
0
 def __init__(self, data_path, output_path, dataset_focus):
     self.data_path = data_path
     self.output_path = output_path
     self.output_needs_header = True
     self.dataset_focus = dataset_focus
     config_metadata = self.load_metadata()
     self.table_height = float(config_metadata['table_height'])
     self.low_lyap_threshold = float(config_metadata['low_lyap_threshold'])
     self.grasp_offset = float(config_metadata['grasp_offset'])
     self.t_scale = float(config_metadata['t_scale'])
     self.ur5_kin = UR5Kin(self.t_scale)
     self.lyap_func = LyapEH()
コード例 #6
0
 def __init__(self, cnn_infer):
     # load metadata
     config_metadata = self.load_metadata()
     self.t_scale = float(config_metadata['t_scale'])
     self.grasp_offset = float(config_metadata['grasp_offset'])
     self.table_height = float(config_metadata['table_height'])
     self.lyap_switch_threshold = 0.0
     self.switch_control = False
     # Load Local and Global Net
     if cnn_infer == "infer":
         # Config Global Network
         self.apcNet_local = DirectSiamese()
         self.apcNet_local.model_fld = 'direct_siamese_e2e_zoom_01'
         self.apcNet_local.load_ckpt(load_best=False)
         # Config Local Network
         self.apcNet_global = DirectSiamese()
         self.apcNet_global.model_fld = 'direct_siamese_e2e_01'
         self.apcNet_global.load_ckpt(load_best=False)
     self.ur5_kin = UR5Kin(self.t_scale)
     self.lyap_func = LyapEH()
     self.apcNet_infer = cnn_infer
     self.image_topic = "/sim_camera/image_raw"
     self.joint_names = [
         'shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
         'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint'
     ]
     # Init msg for publishing
     self.joint_traj = JointTrajectory()
     self.joint_traj.joint_names = self.joint_names
     self.vel_ctrl_old = np.zeros([6, 1])
     # Approximated Synchronised Topics
     self.image_capturer_sub = message_filters.Subscriber(
         self.image_topic, Image)
     self.q_sub = message_filters.Subscriber(
         "/arm_controller/state", JointTrajectoryControllerState)
     self.synced_msgs = message_filters.ApproximateTimeSynchronizer(
         [self.image_capturer_sub, self.q_sub], 10, 0.005)
     self.synced_msgs.registerCallback(self.image_callback)
     self.sim_joint_command_pub = rospy.Publisher("/arm_controller/command",
                                                  JointTrajectory,
                                                  queue_size=10)
     self.real_joint_command_pub = rospy.Publisher("/ur_driver/joint_speed",
                                                   JointTrajectory,
                                                   queue_size=10)
コード例 #7
0
 def __init__(self, folder_name, dataset_focus):
     """
     Args:
         root_dir (string): Directory with all the images.
         transform (callable, optional): Optional transform to be applied
             on a sample.
     """
     dir_path = os.path.expanduser('~/apcNet_dataset/dataset')
     self.root_dir = os.path.join(dir_path, folder_name)
     self.img_zoomer = ZoomAtHand()
     self.csv_file = 'imdb_one_shot.csv'
     self.file_path = os.path.join(self.root_dir, self.csv_file)
     self.imdb = pd.read_csv(self.file_path, skip_blank_lines=True)
     self.ur5_kin = UR5Kin(t_scale=6)
     self.n_samples = self.imdb.shape[0]
     self.d_theta = 0.05
     hf_path = os.path.join(dir_path, folder_name + '_imdb' + '.hdf5')
     self.hf = h5py.File(hf_path, 'w')
     self.dataset_focus = dataset_focus
コード例 #8
0
 def get_zoomed_image(self, cv2_img, joint_config, offset=65):
     ur5_kin = UR5Kin()
     C, R_t = self.get_cam_porj_matrix()
     X_H = ur5_kin.get_ee_pose(joint_config)
     # get ee [X, Y, Z, 1]^T
     hand_XYZ1 = X_H[:, 3]
     # get camera matrix
     c_p_gazebo = R_t.dot(hand_XYZ1)
     c_p_cv = self.gazebo_cam_coor_conversion(c_p_gazebo)
     c_p_cv = c_p_cv / c_p_cv[-1]
     img_coor_uv1 = (C.dot(c_p_cv)).astype(int)
     # print(img_coor_uv1)
     u = img_coor_uv1[0]
     v = img_coor_uv1[1]
     # print(u,v)
     # read image
     # crop image
     # cv2.rectangle(cv2_img,(u,v),(u+1,v+1),(0,255,0),3)
     # cv2.rectangle(cv2_img,(u-offset,v-offset+10),(u+offset,v+offset+10),(0,0,255),2)t
     # cv2.imshow('test', new_img)
     # cv2.waitKey(0)
     zoomed_img_bgr = cv2_img[v - offset + 10:v + offset + 10,
                              u - offset:u + offset, :]
     return zoomed_img_bgr