def __init__(self, arms = ['L','R'], calcPosePostAdjustment=None, adjustmentInterpolation=True, errorCorrectionMode='NONE'): self.arms = arms self.errorCorrectionMode = errorCorrectionMode self.truthPose = {} self.calcPose = {} self.calcPoseAtTruth = {} self.sys_error = {} self.sys_error= {} self.est_pose_pub = {} self.pose_error_pub = {} self.estimateFromImage = dict() self.estimatedPose = defaultdict(lambda: (None,None)) if self.errorCorrectionMode == 'SYS': trained_data_R = pickle.load(open(os.path.join(roslib.packages.get_pkg_subdir('raven_2_params','data/right_arm'),'right_train_data.pkl'))) trained_data_L = pickle.load(open(os.path.join(roslib.packages.get_pkg_subdir('raven_2_params','data/left_arm'),'left_train_data.pkl'))) self.sys_error['R'] = tfx.transform(trained_data_R["sys_robot_tf"]['R']) self.sys_error['L'] = tfx.transform(trained_data_L["sys_robot_tf"]['L']) print self.sys_error['R'] print self.sys_error['L'] elif self.errorCorrectionMode == 'PRE_POST': self.pre_adjustment = dict((arm,tfx.identity_tf()) for arm in self.arms) self.post_adjustment = dict((arm,tfx.identity_tf()) for arm in self.arms) self.adjustment_side = dict((arm,'post') for arm in self.arms) self.switch_adjustment_update = True self.adjustmentInterpolation = {'pre': 1, 'post': 1} if adjustmentInterpolation is True: self.adjustmentInterpolation['pre'] = self.adjustmentInterpolation['post'] = 0.5 elif isinstance(adjustmentInterpolation,dict): self.adjustmentInterpolation['pre'] = adjustmentInterpolation.get('pre',0.5) self.adjustmentInterpolation['post'] = adjustmentInterpolation.get('post',0.5) elif isinstance(adjustmentInterpolation,(list,tuple)): self.adjustmentInterpolation['pre'] = adjustmentInterpolation[0] self.adjustmentInterpolation['post'] = adjustmentInterpolation[1] else: self.adjustmentInterpolation['pre'] = self.adjustmentInterpolation['post'] = adjustmentInterpolation self.calcPosePostAdjustment = dict((arm,tfx.identity_tf()) for arm in self.arms) if calcPosePostAdjustment: for k,v in calcPosePostAdjustment: self.calcPosePostAdjustment[k] = tfx.transform(v,copy=True) self.pre_adj_pub = {} self.post_adj_pub = {} for arm in self.arms: rospy.Subscriber(raven_constants.GripperTape.Topic+'_'+arm, PoseStamped, partial(self._truthCallback,arm)) self.pre_adj_pub[arm] = rospy.Publisher('estimated_gripper_pose_pre_adjument_%s'%arm, TransformStamped) self.post_adj_pub[arm] = rospy.Publisher('estimated_gripper_pose_post_adjument_%s'%arm, TransformStamped) for arm in self.arms: self.est_pose_pub[arm] = rospy.Publisher('estimated_gripper_pose_%s'%arm, PoseStamped) self.pose_error_pub[arm] = rospy.Publisher('estimated_gripper_pose_error_%s'%arm, PoseStamped) rospy.Subscriber(raven_constants.RavenTopics.RavenState, RavenState, self._ravenStateCallback)
def __init__(self, grasp_center, grasp_axis, grasp_width, jaw_width = 0, grasp_angle = 0, tf = stf.SimilarityTransform3D(tfx.identity_tf(from_frame = 'world'), 1.0)): """ Create a point grasp for parallel jaws with given center and width (relative to object) Params: (Note: all in meters!) grasp_center - numpy 3-array for center of grasp grasp_axis - numpy 3-array for grasp direction (should be normalized) grasp_width - how wide the jaws open in meters jaw_width - the width of the jaws tangent to the axis (0 means classical point grasp) grasp_angle - the angle of approach for parallel-jaw grippers (the 6th DOF for point grasps) wrt the orthogonal dir to the axis in the XY plane tf - the similarity tf of the grasp wrt the world """ if not isinstance(grasp_center, np.ndarray) or grasp_center.shape[0] != 3: raise ValueError('Center must be numpy ndarray of size 3') if not isinstance(grasp_axis, np.ndarray) or grasp_axis.shape[0] != 3: raise ValueError('Axis must be numpy ndarray of size 3') if jaw_width != 0: raise ValueError('Nonzero jaw width not yet supported') self.center_ = grasp_center self.axis_ = grasp_axis / np.linalg.norm(grasp_axis) self.grasp_width_ = grasp_width self.jaw_width_ = jaw_width self.approach_angle_ = grasp_angle self.tf_ = tf self.quality_ = None
def __init__(self, vertices, triangles, normals=None, metadata=None, pose=tfx.identity_tf(), scale=1.0, density=1.0, category='', component=0): self.vertices_ = vertices self.triangles_ = triangles self.normals_ = normals self.metadata_ = metadata self.pose_ = pose self.scale_ = scale self.density_ = density self.category_ = category self.component_ = component # compute mesh properties self._compute_bb_center() self._compute_centroid() self._compute_com_uniform() self._compute_mass()
def __init__(self, sdf, mesh=None, features=None, tf=stf.SimilarityTransform3D(tfx.identity_tf(), 1.0), key='', category='', model_name='', mass=1.0): """ 2D objects are initialized with sdfs only""" if not isinstance(sdf, s.Sdf3D): raise ValueError('Must initialize graspable object 3D with 3D sdf') if mesh is not None and not (isinstance(mesh, m.Mesh3D) or isinstance(mesh, mx.Mesh)): raise ValueError('Must initialize graspable object 3D with 3D sdf') self.center_of_mass_ = sdf.center_world() # use SDF bb center for now GraspableObject.__init__(self, sdf, mesh=mesh, features=features, tf=tf, key=key, category=category, model_name=model_name, mass=mass)
def __init__(self, points, sdf_meas, dims, meas_variance = 0.1, kernel_name = 'rbf', kernel_hyps = [1., 3.], origin = np.array([0,0]), resolution = 1.0, pose = tfx.identity_tf(frame="world")): if len(dims) != 3: raise ValueError('Dimensions must be 3D!') # store params self.pts_ = points self.data_ = sdf_meas self.var_ = meas_variance self.dims_ = dims self.origin_ = origin self.resolution_ = resolution self.pose_ = pose self._compute_flat_indices() # set up gp self.set_kernel(kernel_name, kernel_hyps) self.gp_ = gpy.models.GPRegression(points, sdf_meas, self.kernel_, Y_variance=meas_variance) self.gp_.optimize() print(self.gp_) # predict mean and variance of grid (self.mean_pred_, self.var_pred_) = self.predict_locations(self.grid_pts_, full_cov=False) self.feature_vector_ = None #Kmeans feature representation
def grasp_model_figure(): np.random.seed(100) h = plt.figure() ax = h.add_subplot(111, projection = '3d') sdf_3d_file_name = '/mnt/terastation/shape_data/MASTER_DB_v2/Cat50_ModelDatabase/5c7bf45b0f847489181be2d6e974dccd.sdf' sf = sdf_file.SdfFile(sdf_3d_file_name) sdf_3d = sf.read() mesh_name = '/mnt/terastation/shape_data/MASTER_DB_v2/Cat50_ModelDatabase/5c7bf45b0f847489181be2d6e974dccd.obj' of = obj_file.ObjFile(mesh_name) m = of.read() clean_mesh_name = 'data/test/meshes/flashlight.obj' mc = mesh_cleaner.MeshCleaner(m) mc.rescale_vertices(0.07) oof = obj_file.ObjFile(clean_mesh_name) oof.write(mc.mesh()) graspable = graspable_object.GraspableObject3D(sdf_3d, mesh=m, model_name=clean_mesh_name, tf=stf.SimilarityTransform3D(tfx.identity_tf(), 0.75)) rave.raveSetDebugLevel(rave.DebugLevel.Error) grasp_checker = OpenRaveGraspChecker() center = np.array([0, 0.01, 0]) axis = np.array([1, 0, 0]) axis = axis / np.linalg.norm(axis) width = 0.1 grasp = g.ParallelJawPtGrasp3D(center, axis, width) rotated_grasps = grasp.transform(graspable.tf, 2.0 * np.pi / 20.0) print len(rotated_grasps) grasp_checker.prune_grasps_in_collision(graspable, rotated_grasps, auto_step=False, close_fingers=True, delay=1)
def __init__(self, height, width, fx, fy=None, cx=None, cy=None, pose=tfx.identity_tf()): ''' Init camera parameters Params: height: (int or float) height of image width: (int of flaot) width of image fx: (float) x focal length of camera in pixels fy: (float) y focal length of camera in pixels cx: (float) optical center of camera in pixels along x axis cy: (float) optical center of camera in pixels along y axis ''' self.height_ = height self.width_ = width self.fx_ = fx self.pose_ = pose # set focal, camera center automatically if under specified if fy is None: self.fy_ = fx else: self.fy_= fy if cx is None: self.cx_ = float(width) / 2 else: self.cx_ = cx if cy is None: self.cy_ = float(height) / 2 else: self.cy_ = cy # set camera projection matrix self.K_ = np.array([[self.fx_, 0, self.cx_], [ 0, self.fy_, self.cy_], [ 0, 0, 1]])
def __init__(self, sdf_data, origin, resolution, tf = stf.SimilarityTransform3D(tfx.identity_tf(), scale = 1.0), frame = None, use_abs = True): self.data_ = sdf_data self.origin_ = origin self.resolution_ = resolution self.dims_ = self.data_.shape # set up surface params self.surface_thresh_ = self.resolution_ * np.sqrt(2) / 2 # resolution is max dist from surface when surf is orthogonal to diagonal grid cells self.center_ = np.array([(self.dims_[0]-1) / 2, (self.dims_[1]-1) / 2, (self.dims_[2]-1) / 2]) self.points_buf_ = np.zeros([Sdf3D.num_interpolants, 3], dtype=np.int) self.coords_buf_ = np.zeros([3,]) # set up tf self.tf_ = tf # tranform sdf basis to grid (X and Z axes are flipped!) R_sdf_mesh = np.eye(3) self.tf_grid_sdf_ = stf.SimilarityTransform3D(tfx.canonical.CanonicalTransform(R_sdf_mesh, -R_sdf_mesh.T.dot(self.center_)), 1.0 / self.resolution_) self.tf_sdf_grid_ = self.tf_grid_sdf_.inverse() # optionally use only the absolute values (useful for non-closed meshes in 3D) if use_abs: self.data_ = np.abs(self.data_) self._compute_flat_indices() self._compute_gradients() self.feature_vector_ = None #Kmeans feature representation
def __init__(self, sdf, tf=stf.SimilarityTransform3D(tfx.identity_tf(), 1.0)): """ 2D objects are initialized with sdfs only""" if not isinstance(sdf, s.Sdf2D): raise ValueError('Must initialize graspable object 2D with 2D sdf') GraspableObject.__init__(self, sdf, tf=tf)
def __init__(self, sdf, mesh=None, features=None, tf=stf.SimilarityTransform3D(tfx.identity_tf(), 1.0), key='', model_name='', category='', mass=1.0): if not isinstance(tf, stf.SimilarityTransform3D): raise ValueError( 'Must initialize graspable objects with 3D similarity transform' ) self.sdf_ = sdf self.mesh_ = mesh self.tf_ = tf self.key_ = key self.model_name_ = model_name # for OpenRave usage, gross! self.category_ = category self.mass_ = mass self.features_ = features # shot features # make consistent poses, scales self.sdf_.tf = self.tf_ if self.mesh_ is not None: self.mesh_.tf = self.tf_
def __init__(self, arms = ['L','R'], calcPosePostAdjustment=None, adjustmentInterpolation=True): self.arms = arms self.truthPose = {} self.calcPose = {} self.calcPoseAtTruth = {} self.estimatedPose = defaultdict(lambda: (None,None)) self.pre_adjustment = dict((arm,tfx.identity_tf()) for arm in self.arms) self.post_adjustment = dict((arm,tfx.identity_tf()) for arm in self.arms) self.adjustment_side = dict((arm,'post') for arm in self.arms) self.switch_adjustment_update = True self.adjustmentInterpolation = {'pre': 1, 'post': 1} if adjustmentInterpolation is True: self.adjustmentInterpolation['pre'] = self.adjustmentInterpolation['post'] = 0.5 elif isinstance(adjustmentInterpolation,dict): self.adjustmentInterpolation['pre'] = adjustmentInterpolation.get('pre',0.5) self.adjustmentInterpolation['post'] = adjustmentInterpolation.get('post',0.5) elif isinstance(adjustmentInterpolation,(list,tuple)): self.adjustmentInterpolation['pre'] = adjustmentInterpolation[0] self.adjustmentInterpolation['post'] = adjustmentInterpolation[1] else: self.adjustmentInterpolation['pre'] = self.adjustmentInterpolation['post'] = adjustmentInterpolation self.estimateFromImage = dict() self.calcPosePostAdjustment = dict((arm,tfx.identity_tf()) for arm in self.arms) if calcPosePostAdjustment: for k,v in calcPosePostAdjustment: self.calcPosePostAdjustment[k] = tfx.transform(v,copy=True) self.est_pose_pub = {} self.pose_error_pub = {} self.pre_adj_pub = {} self.post_adj_pub = {} for arm in self.arms: rospy.Subscriber(Constants.GripperTape.Topic+'_'+arm, PoseStamped, partial(self._truthCallback,arm)) self.est_pose_pub[arm] = rospy.Publisher('estimated_gripper_pose_%s'%arm, PoseStamped) self.pose_error_pub[arm] = rospy.Publisher('estimated_gripper_pose_error_%s'%arm, PoseStamped) self.pre_adj_pub[arm] = rospy.Publisher('estimated_gripper_pose_pre_adjument_%s'%arm, TransformStamped) self.post_adj_pub[arm] = rospy.Publisher('estimated_gripper_pose_post_adjument_%s'%arm, TransformStamped) rospy.Subscriber(Constants.RavenTopics.RavenState, RavenState, self._ravenStateCallback)
def __init__(self, sdf_data, origin = np.array([0,0]), resolution = 1.0, pose = tfx.identity_tf(from_frame="world"), scale = 1.0): self.data_ = sdf_data self.origin_ = origin self.resolution_ = resolution self.dims_ = self.data_.shape self.pose_ = pose self.scale_ = scale self.surface_thresh_ = self.resolution_ * np.sqrt(2) / 2 # resolution is max dist from surface when surf is orthogonal to diagonal grid cells self.center_ = np.array([self.dims_[0] / 2, self.dims_[1] / 2]) self._compute_flat_indices() self._compute_gradients() self.feature_vector_ = None #Kmeans feature representation
def __init__(self, vertices, triangles, normals=None, metadata=None, pose=tfx.identity_tf(), scale = 1.0, density=1.0, category='', component=0): self.vertices_ = vertices self.triangles_ = triangles self.normals_ = normals self.metadata_ = metadata self.pose_ = pose self.scale_ = scale self.density_ = density self.category_ = category self.component_ = component # compute mesh properties self._compute_bb_center() self._compute_centroid() self._compute_com_uniform() self._compute_mass()
def __init__(self, sdf, mesh=None, features=None, tf = stf.SimilarityTransform3D(tfx.identity_tf(), 1.0), key='', model_name='', category=''): if not isinstance(tf, stf.SimilarityTransform3D): raise ValueError('Must initialize graspable objects with 3D similarity transform') self.sdf_ = sdf self.mesh_ = mesh self.tf_ = tf self.key_ = key self.model_name_ = model_name # for OpenRave usage, gross! self.category_ = category self.features_ = features # shot features # make consistent poses, scales self.sdf_.tf = self.tf_ if self.mesh_ is not None: self.mesh_.tf = self.tf_
def __init__(self, sdf_data, origin, resolution, tf=stf.SimilarityTransform3D(tfx.identity_tf(), scale=1.0), frame=None, use_abs=True): self.data_ = sdf_data self.origin_ = origin self.resolution_ = resolution self.dims_ = self.data_.shape # set up surface params self.surface_thresh_ = self.resolution_ * np.sqrt( 2 ) / 2 # resolution is max dist from surface when surf is orthogonal to diagonal grid cells self.center_ = np.array([(self.dims_[0] - 1) / 2, (self.dims_[1] - 1) / 2, (self.dims_[2] - 1) / 2]) self.points_buf_ = np.zeros([Sdf3D.num_interpolants, 3], dtype=np.int) self.coords_buf_ = np.zeros([ 3, ]) # set up tf self.tf_ = tf # tranform sdf basis to grid (X and Z axes are flipped!) R_sdf_mesh = np.eye(3) self.tf_grid_sdf_ = stf.SimilarityTransform3D( tfx.canonical.CanonicalTransform(R_sdf_mesh, -R_sdf_mesh.T.dot(self.center_)), 1.0 / self.resolution_) self.tf_sdf_grid_ = self.tf_grid_sdf_.inverse() # optionally use only the absolute values (useful for non-closed meshes in 3D) if use_abs: self.data_ = np.abs(self.data_) self._compute_flat_indices() self._compute_gradients() self.feature_vector_ = None #Kmeans feature representation
def grasp_model_figure(): np.random.seed(100) h = plt.figure() ax = h.add_subplot(111, projection='3d') sdf_3d_file_name = '/mnt/terastation/shape_data/MASTER_DB_v2/Cat50_ModelDatabase/5c7bf45b0f847489181be2d6e974dccd.sdf' sf = sdf_file.SdfFile(sdf_3d_file_name) sdf_3d = sf.read() mesh_name = '/mnt/terastation/shape_data/MASTER_DB_v2/Cat50_ModelDatabase/5c7bf45b0f847489181be2d6e974dccd.obj' of = obj_file.ObjFile(mesh_name) m = of.read() clean_mesh_name = 'data/test/meshes/flashlight.obj' mc = mesh_cleaner.MeshCleaner(m) mc.rescale_vertices(0.07) oof = obj_file.ObjFile(clean_mesh_name) oof.write(mc.mesh()) graspable = graspable_object.GraspableObject3D( sdf_3d, mesh=m, model_name=clean_mesh_name, tf=stf.SimilarityTransform3D(tfx.identity_tf(), 0.75)) rave.raveSetDebugLevel(rave.DebugLevel.Error) grasp_checker = OpenRaveGraspChecker() center = np.array([0, 0.01, 0]) axis = np.array([1, 0, 0]) axis = axis / np.linalg.norm(axis) width = 0.1 grasp = g.ParallelJawPtGrasp3D(center, axis, width) rotated_grasps = grasp.transform(graspable.tf, 2.0 * np.pi / 20.0) print len(rotated_grasps) grasp_checker.prune_grasps_in_collision(graspable, rotated_grasps, auto_step=False, close_fingers=True, delay=1)
def sdf_3d(data): """ Converts HDF5 data provided in dictionary |data| to an SDF object """ sdf_data = np.array(data[SDF_DATA_KEY]) origin = np.array(data.attrs[SDF_ORIGIN_KEY]) resolution = data.attrs[SDF_RES_KEY] # should never be used, really pose = tfx.identity_tf() scale = 1.0 if SDF_POSE_KEY in data.attrs.keys(): pose = data.attrs[SDF_POSE_KEY] if SDF_SCALE_KEY in data.attrs.keys(): scale = data.attrs[SDF_SCALE_KEY] tf = stf.SimilarityTransform3D(pose, scale) frame = None if SDF_FRAME_KEY in data.attrs.keys(): frame = data.attrs[SDF_FRAME_KEY] return sdf.Sdf3D(sdf_data, origin, resolution, tf, frame)
def __init__(self, sdf_data, origin=np.array([0, 0]), resolution=1.0, pose=tfx.identity_tf(from_frame="world"), scale=1.0): self.data_ = sdf_data self.origin_ = origin self.resolution_ = resolution self.dims_ = self.data_.shape self.pose_ = pose self.scale_ = scale self.surface_thresh_ = self.resolution_ * np.sqrt( 2 ) / 2 # resolution is max dist from surface when surf is orthogonal to diagonal grid cells self.center_ = np.array([self.dims_[0] / 2, self.dims_[1] / 2]) self._compute_flat_indices() self._compute_gradients() self.feature_vector_ = None #Kmeans feature representation
def _publish_state(self): with self.lock: ma = MarkerArray() all_centers_marker = Marker() all_centers_marker.ns = 'all_centers' all_centers_marker.id = 0 all_centers_marker.type = Marker.POINTS all_centers_marker.action = Marker.ADD all_centers_marker.pose = tfx.identity_tf().msg.Pose() all_centers_marker.scale.x = 0.002 all_centers_marker.scale.y = 0.002 all_centers_marker.lifetime = rospy.Duration(1) all_centers_marker.color.r = 1 all_centers_marker.color.g = 1 all_centers_marker.color.b = 0 all_centers_marker.color.a = 0.5 for center in self._getAllCenters(): all_centers_marker.header = center.msg.Header() all_centers_marker.points.append(center.msg.Point()) ma.markers.append(all_centers_marker) curr_centers_marker = Marker() curr_centers_marker.ns = 'current_centers' curr_centers_marker.id = 0 curr_centers_marker.type = Marker.POINTS curr_centers_marker.action = Marker.ADD curr_centers_marker.pose = tfx.identity_tf().msg.Pose() curr_centers_marker.scale.x = 0.001 curr_centers_marker.scale.y = 0.001 curr_centers_marker.lifetime = rospy.Duration(1) curr_centers_marker.color.r = 0 curr_centers_marker.color.g = 0 curr_centers_marker.color.b = 1 curr_centers_marker.color.a = 1 for center in self.currentCenters: curr_centers_marker.header = center.msg.Header() curr_centers_marker.points.append(center.msg.Point()) ma.markers.append(curr_centers_marker) for id, arm in enumerate('LR'): if self.allocations.get(arm) is None: continue alloc = self.allocations[arm] alloc_marker = Marker() alloc_marker.ns = 'alloc' alloc_marker.id = id alloc_marker.type = Marker.CYLINDER alloc_marker.action = Marker.ADD alloc_marker.header = alloc.msg.Header() alloc_marker.pose = tfx.pose(alloc).msg.Pose() alloc_marker.scale.x = self.allocationRadius alloc_marker.scale.y = self.allocationRadius alloc_marker.scale.z = 0.01 alloc_marker.lifetime = rospy.Duration(1) alloc_marker.color.r = 0 alloc_marker.color.g = 1 alloc_marker.color.b = 1 alloc_marker.color.a = 0.25 ma.markers.append(alloc_marker) self.marker_pub.publish(ma)
def __init__(self, sdf, mesh = None, features=None, tf = stf.SimilarityTransform3D(tfx.identity_tf(), 1.0), key='', category='', model_name=''): """ 2D objects are initialized with sdfs only""" if not isinstance(sdf, s.Sdf3D): raise ValueError('Must initialize graspable object 3D with 3D sdf') if mesh is not None and not (isinstance(mesh, m.Mesh3D) or isinstance(mesh,mx.Mesh)): raise ValueError('Must initialize graspable object 3D with 3D sdf') self.center_of_mass_ = sdf.center_world() # use SDF bb center for now GraspableObject.__init__(self, sdf, mesh=mesh, features=features, tf=tf, key=key, category=category, model_name=model_name)
def __init__(self, sdf, tf = stf.SimilarityTransform3D(tfx.identity_tf(), 1.0)): """ 2D objects are initialized with sdfs only""" if not isinstance(sdf, s.Sdf2D): raise ValueError('Must initialize graspable object 2D with 2D sdf') GraspableObject.__init__(self, sdf, tf=tf)