def makeConf(self,x,y,th,g=0.07, vertical=False, left=None, right=None, ignoreStandard=False): global standardVerticalConf, standardHorizontalConf if left is None: left = self.useLeft if right is None: right = self.useRight dx = dy = dz = 0 dt = 0.0 if vertical else TORSO_Z - 0.3 if vertical and standardVerticalConf and (not ignoreStandard): c = standardVerticalConf.copy() c.conf['pr2Base'] = [x, y, th] if self.useLeft: c.conf['pr2LeftGripper'] = [g] if self.useRight: c.conf['pr2RightGripper'] = [g] return c elif (not vertical) and standardHorizontalConf and (not ignoreStandard): c = standardHorizontalConf.copy() c.conf['pr2Base'] = [x, y, th] if self.useLeft: c.conf['pr2LeftGripper'] = [g] if self.useRight: c.conf['pr2RightGripper'] = [g] return c else: c = JointConf(pr2Init.copy(), self) c = c.set('pr2Base', [x, y, th]) if self.useLeft: c = c.set('pr2LeftGripper', [g]) if self.useRight: c = c.set('pr2RightGripper', [g]) cart = c.cartConf() base = cart['pr2Base'] if vertical: q = np.array([0.0, 0.7071067811865475, 0.0, 0.7071067811865475]) if left and not right: h = hu.Transform(p=np.array([[a] for a in [ 0.4+dx, 0.2+dy, 1.1+dt, 1.]]), q=q) cart = cart.set('pr2LeftArm', base.compose(h)) else: cart = cart.rem('pr2LeftArm') if right: hr = hu.Transform(p=np.array([[a] for a in [ 0.4+dx, -(0.2+dy), 1.1+dt, 1.]]), q=q) cart = cart.set('pr2RightArm', base.compose(hr)) else: if left and not right: h = hu.Pose(0.3+dx,0.3+dy,0.9+dz+dt,-math.pi/2) cart = cart.set('pr2LeftArm', base.compose(h)) else: cart = cart.rem('pr2LeftArm') if right: hr = hu.Pose(0.3+dx,-(0.3+dy),0.9+dz+dt,math.pi/2) cart = cart.set('pr2RightArm', base.compose(hr)) c = self.inverseKin(cart, c) c.conf['pr2Head'] = [0., 0.] assert all(c.values()) if vertical: standardVerticalConf = c else: standardHorizontalConf = c return c
def get_grasp_frame_origin(grasp_frame): objFrame = hu.Pose(0, 0, 0, 0) faceFrame = grasp_frame.compose(GRASP_FRAME) centerFrame = faceFrame.compose(CENTER_FRAME) graspFrame = objFrame.compose(centerFrame) return graspFrame
def get_grasp_frame_origin(grasp_frame): # Find the robot wrist frame corresponding to the grasp at the placement objFrame = hu.Pose(0, 0, 0, 0) # objFrame is the origin of the object faceFrame = grasp_frame.compose(GRASP_FRAME) centerFrame = faceFrame.compose(CENTER_FRAME) graspFrame = objFrame.compose(centerFrame) return graspFrame
def stepTowardsTarget(chainName, c_f, q_f, c_i, q_i, stepSize, zthr=0.1): # diffPose must be a Pose (x,y,z,t) # diffPose = c_f[chainName].compose(c_i[chainName].inverse()) diffPose = c_i[chainName].inverse().compose(c_f[chainName]) diffs = diffPose.pose(zthr=zthr).xyztTuple() length = sum([d**2 for d in diffs])**0.5 step = stepSize/length # print 'step', step, 'length', length if length == 0. or step >= 1.: return q_f[chainName] # We're going to interpolate diffPose and apply it to c_i, which need not be a pose vals = [d*step for d in diffs] cc = q_i.robot.makeCartConf({}) cc = cc.set(chainName, c_i[chainName].compose(hu.Pose(*vals))) q = q_i.robot.inverseKin(cc, q_f) # may have Nones in values return q[chainName]
def confFromBaseAndWrist(self, basePose, hand, wrist, defaultConf, counts=None, grip=params['gripMax'], freeBase=False): ans = self.confFromBaseAndWristAux(basePose, hand, wrist, defaultConf, counts=counts, grip=grip) if ans: return ans # Not sure this is useful... This is where optimization would be useful... if freeBase: count = 0 (x0,y0,z,t0) = basePose.pose().xyztTuple() for x in (x0, x0-0.01, x0+0.01): for y in (y0, y0-0.01, y0+0.01): for t in (t0, t0-0.02, t0+0.02): bp = hu.Pose(x,y,z,t) ans = self.confFromBaseAndWristAux(bp, hand, wrist, defaultConf, counts=counts, grip=grip) if ans: if count > 0: print 'invKin recovery in freeBase', count return ans count += 1 print 'Tried', count, 'invKins during freeBase'
def approach_from_manip(manip_trans): if abs(manip_trans.matrix[2, 0]) < 0.1: return manip_trans.compose(hu.Pose(-BACKOFF, 0., PERP_BACKOFF, 0.)) return manip_trans.compose(hu.Pose(-BACKOFF, 0., 0., 0.))
def get_color(shape): return shape.properties['color'] def get_box_body(length, width, height, name='box', color='black'): return Shape([ BoxAligned( np.array([(-length / 2., -width / 2., -height / 2), (length / 2., width / 2., height / 2)]), None) ], None, name=name, color=color) GRASP_FRAME = hu.Pose(0, -0.025, 0, 0) CENTER_FRAME = hu.Pose(0, 0, 0.025, 0) def get_grasp_frame_origin(grasp_frame): objFrame = hu.Pose(0, 0, 0, 0) faceFrame = grasp_frame.compose(GRASP_FRAME) centerFrame = faceFrame.compose(CENTER_FRAME) graspFrame = objFrame.compose(centerFrame) return graspFrame Z_OFFSET = 0.02 gMat0 = hu.Transform(
def basePose(self): # type: () -> object base = self.conf[self.baseName] return hu.Pose(base[0], base[1], 0.0, base[2])
def dantam2(): m, n = 3, 3 n_obj = 2 side_dim = .07 height_dim = .1 box_dims = (side_dim, side_dim, height_dim) separation = (side_dim, side_dim) workspace = np.array([[-2.0, -1.5, 0.0], [1.0, 1.5, 2.0]]) robot = makeRobot(workspace, useLeft=True, useRight=False) base_conf = (-.75, .2, -math.pi / 2) initial_conf = robot.makeConf(*base_conf, g=robot.gripMax, ignoreStandard=True) initial_conf = initial_conf.set('pr2LeftArm', TOP_HOLDING_LEFT_ARM) initial_poses = [] goal_poses = {} length = m * (box_dims[0] + separation[0]) width = n * (box_dims[1] + separation[1]) height = .7 table = get_box_body(length, width, height, name='table', color='brown') initial_poses.append((table, hu.Pose(0, 0, height / 2, 0))) poses = [] z = height + height_dim / 2 + BODY_PLACEMENT_Z_OFFSET theta = 0 for r in range(m): row = [] x = -length / 2 + (r + .5) * (box_dims[0] + separation[0]) for c in range(n): y = -width / 2 + (c + .5) * (box_dims[1] + separation[1]) row.append(hu.Pose(x, y, z, theta)) poses.append(row) coordinates = list(product(range(m), range(n))) assert n_obj <= len(coordinates) obj_coordinates = sample(coordinates, n_obj) movable_names = [] for i, (r, c) in enumerate(obj_coordinates): row_color = np.zeros(4) row_color[2 - r] = 1. if i == 0: name = 'goal%d-%d' % (r, c) color = 'blue' goal_poses[name] = poses[m / 2][n / 2] else: name = 'block%d-%d' % (r, c) color = 'red' obj = get_box_body(*box_dims, name=name, color=color) initial_poses.append((obj, poses[r][c])) movable_names.append(name) known_poses = [pose for col in poses for pose in col] return ManipulationProblem(workspace, initial_conf, initial_poses=initial_poses, movable_names=movable_names, known_poses=known_poses, goal_poses=goal_poses, goal_conf=initial_conf)
def approach_from_manip(manip_trans): if abs(manip_trans.matrix[2, 0]) < 0.1: # Ad-hoc backoff strategy return manip_trans.compose(hu.Pose(-BACKOFF, 0., PERP_BACKOFF, 0.)) # horizontal return manip_trans.compose(hu.Pose(-BACKOFF, 0., 0., 0.)) # vertical
def ground(pose): params = list(pose.xyztTuple()) params[2] = 0.0 return hu.Pose(*params)
def fliph(pose): params = list(pose.pose().xyztTuple()) params[1] = -params[1] return hu.Pose(*params)
fing_dy = params['fingerWidth'] fing_dz = params['fingerThick'] return [Rigid(arm+'_palm', (r_forceSensorOffset if arm == 'r' else l_forceSensorOffset) * \ hu.Transform(transf.translation_matrix([o + palm_dx/2.,0.0,0.0])), None, None), Prismatic(arm+'_finger1', hu.Transform(transf.translation_matrix([palm_dx/2+fing_dx/2.,fing_dy/2.,0.0])), (0.0, params['gripMax']/2.), (0.,1.,0)), Prismatic(arm+'_finger2', hu.Transform(transf.translation_matrix([0.0,-fing_dy,0.0])), (0.0, 0.081), (0.,-1.,0))] # Transforms from wrist frame to hand frame # This leaves X axis unchanged left_gripperToolOffsetX = hu.Pose(0.18,0.0,0.0,0.0) right_gripperToolOffsetX = hu.Transform(np.dot(r_forceSensorOffset.matrix, left_gripperToolOffsetX.matrix)) # This rotates around the Y axis... so Z' points along X an X' points along -Z left_gripperToolOffsetZ = hu.Transform(np.dot(left_gripperToolOffsetX.matrix, transf.rotation_matrix(math.pi/2,(0,1,0)))) right_gripperToolOffsetZ = hu.Transform(np.dot(right_gripperToolOffsetX.matrix, transf.rotation_matrix(math.pi/2,(0,1,0)))) # Rotates wrist to grasp face frame gFaceFrame = hu.Transform(np.array([(0.,1.,0.,0.18), (0.,0.,1.,0.), (1.,0.,0.,0.), (0.,0.,0.,1.)])) gripperFaceFrame = {'left': gFaceFrame, 'right': r_forceSensorOffset*gFaceFrame}