示例#1
0
    def check_collision_free_with_object(self,limb,object,grasp):
        """Checks whether the given limb, holding an object at the given
        grasp, is collision free at the robot's current configuration"""
        objToGripperXForm = se3.mul(baxter.left_gripper_center_xform,se3.inv(grasp))
        #assume robot configuration is updated
        if limb=='left':
            gripperLink = self.robot.getLink(baxter.left_gripper_link_name)
        else:
            gripperLink = self.robot.getLink(baxter.right_gripper_link_name)

        if not isinstance(object,list):
            if object==None:
                object = []
            else:
                object = [object]

        Tgripper = gripperLink.getTransform()
        Tobj = se3.mul(Tgripper,objToGripperXForm)
        for o in object:
            o.setTransform(*Tobj)
        
        excludeids = [ o.getID() for o in object]
        if not self.check_collision_free(limb,exclude=excludeids):
            #print 'Limb is not collision free'
            return False

        for t in xrange(self.world.numRigidObjects()):
            other = self.world.rigidObject(t)
            if(other.getID() not in excludeids):
                if any(other.geometry().collides(o.geometry()) for o in object):
                    #visualization.debug(self.world)
                    #print "Held object-shelf collision"
                    return False
        return True
示例#2
0
def load_apc_world():
    """Produces a world with only the Baxter, shelf, and ground plane in it."""
    world = robotsim.WorldModel()
    #uncomment these lines and comment out the next 2 if you want to use the
    #full Baxter model
    # print "Loading full Baxter model (be patient, this will take a minute)..."
    # world.loadElement(os.path.join(model_dir,"baxter.rob"))
    print "Loading simplified Baxter model..."
    world.loadElement(os.path.join(model_dir,"baxter_with_parallel_gripper_col.rob"))
    print "Loading Kiva pod model..."
    world.loadElement(os.path.join(model_dir,"kiva_pod/meshes/pod_lowres.stl"))
    print "Loading plane model..."
    world.loadElement(os.path.join(model_dir,"plane.env"))

    #shift the Baxter up a bit (95cm)
    Rbase,tbase = world.robot(0).link(0).getParentTransform()
    world.robot(0).link(0).setParentTransform(Rbase,(0,0,0.95))
    world.robot(0).setConfig(world.robot(0).getConfig())

    #translate pod to be in front of the robot, and rotate the pod by 90 degrees
    reorient = ([1,0,0,0,0,1,0,-1,0],[0,0,0.01])
    Trel = (so3.rotation((0,0,1),-math.pi/2),[1.4,0,0])
    T = reorient
    world.terrain(0).geometry().transform(*se3.mul(Trel,T))
    # print se3.mul(Trel,T)

    #initialize the shelf xform for the visualizer and object
    #xform initialization
    global ground_truth_shelf_xform
    ground_truth_shelf_xform = se3.mul(Trel,T)
    return world
	def drawGL(self,q):
		x = self.getSensedValue(q)
		xdes = self.xdes
		if self.baseLinkNo >= 0:
			Tb = self.baseLink.getTransform()
			if self.taskType == "position":
				x = se3.apply(Tb,x)
				xdes = se3.apply(Tb,xdes)
			elif self.taskType == "po":
				x = se3.mul(Tb,x)
				xdes = se3.mul(Tb,xdes)
		glPointSize(6)
		glEnable(GL_POINT_SMOOTH)
		glBegin(GL_POINTS)
		if self.taskType == "position":
			glColor3f(1,0,0)	#red 
			glVertex3fv(xdes)
			glColor3f(1,0.5,0)	#orange
			glVertex3fv(x)
		elif self.taskType == "po":
			glColor3f(1,0,0)	#red 
			glVertex3fv(xdes[1])
			glColor3f(1,0.5,0)	#orange
			glVertex3fv(x[1])
		glEnd()
示例#4
0
 def drawGL(self, q):
     x = self.getSensedValue(q)
     xdes = self.xdes
     if self.baseLinkNo >= 0:
         Tb = self.baseLink.getTransform()
         if self.taskType == "position":
             x = se3.apply(Tb, x)
             xdes = se3.apply(Tb, xdes)
         elif self.taskType == "po":
             x = se3.mul(Tb, x)
             xdes = se3.mul(Tb, xdes)
     glPointSize(6)
     glEnable(GL_POINT_SMOOTH)
     glBegin(GL_POINTS)
     if self.taskType == "position":
         glColor3f(1, 0, 0)  #red
         glVertex3fv(xdes)
         glColor3f(1, 0.5, 0)  #orange
         glVertex3fv(x)
     elif self.taskType == "po":
         glColor3f(1, 0, 0)  #red
         glVertex3fv(xdes[1])
         glColor3f(1, 0.5, 0)  #orange
         glVertex3fv(x[1])
     glEnd()
示例#5
0
 def grasp_xforms(self,object):
     if object.xform == None: return None
     res = []
     for g in object.info.grasps:
         grasp_xform_world = se3.mul(object.xform,g.grasp_xform)
         pregrasp_xform_world = se3.mul(object.xform,g.pregrasp)
         res.append((g,grasp_xform_world,pregrasp_xform_world))
     return res
 def __init__(self):
     threading.Thread.__init__(self)
     self.writer = HapticCartesianTaskService(system_state_addr)
     self.reader = HapticWidgetUIService(haptic_service_addr)
     getter1 = TopicListener(system_state_addr,'.robot.endEffectors.0.dest_xform')
     getter2 = TopicListener(system_state_addr,'.robot.endEffectors.1.dest_xform')
     self.reader.widgetGetters = [lambda :se3.mul(getter1.get(),endEffectorLocalTransforms[0]),
                                  lambda :se3.mul(getter2.get(),endEffectorLocalTransforms[1])]
示例#7
0
    def move_to_grasp_object(self,object):
        # return True
        """Sets the robot's configuration so the gripper grasps object at
        one of its potential grasp locations.  Might change self.active_limb
        to the appropriate limb.

        If successful, changes self.config and returns True.
        Otherwise, does not change self.config and returns False.
        """
        #TODO: put my code here
        if self.active_limb=="LEFT":
            first_gripper = self.left_gripper_link
            second_gripper = self.right_gripper_link
            first_offset = left_gripper_center_xform
            second_offset = right_gripper_center_xform
            first_limb = "LEFT"
            second_limb = "RIGHT"
        elif self.active_limb=="RIGHT":
            first_gripper = self.right_gripper_link
            second_gripper = self.left_gripper_link
            first_offset = right_gripper_center_xform
            second_offset = left_gripper_center_xform
            first_limb = "RIGHT"
            second_limb = "LEFT"
        else:
            print "Grasping... Unrecognized active limb"
            return False
        item_to_world = object.xform
        for grasp in object.info.grasps:
            gripper_to_item = grasp.grasp_xform
            for _ in xrange(100):
                self.robot.setConfig(self.random_init())
                (R_net, t_net) = se3.mul(item_to_world, se3.mul(gripper_to_item, se3.inv(first_offset)))
                goal = ik.objective(first_gripper, R=R_net, t=t_net)
                res = ik.solve(goal)
                if res==True:
                    self.config = self.robot.getConfig()
                    #print "Grasp succeeded"
                    self.active_limb=first_limb
                    return True
                else:
                    pass
                    #self.config = self.robot.getConfig()
                
                self.robot.setConfig(self.random_init())
                (R_net, t_net) = se3.mul(item_to_world, se3.mul(gripper_to_item, se3.inv(second_offset)))
                goal = ik.objective(second_gripper, R=R_net, t=t_net)
                res = ik.solve(goal)
                if res==True:
                    self.config = self.robot.getConfig()
                    #print "Grasp succeeded"
                    self.active_limb=second_limb
                    return True
                else:
                    #self.config = self.robot.getConfig()
                    pass
        '''    
示例#8
0
def graspedObjectTransform(robot, hand, qrobot0, Tobj0, qrobot):
    """Given initial robot configuration qrobot0 and object transform Tobj0,
    returns the object transformation corresponding to new configuration
    qrobot assuming the object is rigidly attached to the hand"""
    robot.setConfig(qrobot0)
    Thand0 = robot.link(hand.link).getTransform()
    Tgrasp = se3.mul(se3.inv(Thand0), Tobj0)
    robot.setConfig(qrobot)
    Thand = robot.link(hand.link).getTransform()
    return se3.mul(Thand, Tgrasp)
示例#9
0
文件: ex.py 项目: arocchi/Klampt
def graspedObjectTransform(robot,hand,qrobot0,Tobj0,qrobot):
    """Given initial robot configuration qrobot0 and object transform Tobj0,
    returns the object transformation corresponding to new configuration
    qrobot assuming the object is rigidly attached to the hand"""
    robot.setConfig(qrobot0)
    Thand0 = robot.link(hand.link).getTransform()
    Tgrasp = se3.mul(se3.inv(Thand0),Tobj0)
    robot.setConfig(qrobot)
    Thand = robot.link(hand.link).getTransform()
    return se3.mul(Thand,Tgrasp)
示例#10
0
def load_apc_world():
    """Produces a world with only the Baxter, shelf, and ground plane in it."""
    world = WorldModel()
    #uncomment these lines and comment out the next 2 if you want to use the
    #full Baxter model
    #print "Loading full Baxter model (be patient, this will take a minute)..."
    #world.loadElement(os.path.join(model_dir,"baxter.rob"))
    print "Loading simplified Baxter model..."
    world.loadElement(os.path.join(model_dir,baxter.klampt_model_name))
    
    #print "Loading Kiva pod model..."
    #world.loadElement(os.path.join(model_dir,"kiva_pod/meshes/pod_lowres.stl"))
    
    print 'Loading north hall shelf...'
    world.loadElement(os.path.join(model_dir,"north_shelf/mesh/shelf.stl"))
    
    print "Loading plane model..."
    world.loadElement(os.path.join(model_dir,"plane.env"))
    
    #shift the Baxter up a bit (95cm)
    Rbase,tbase = world.robot(0).getLink(0).getParentTransform()
    world.robot(0).getLink(0).setParentTransform(Rbase,(0,0,0.95))
    world.robot(0).setConfig(world.robot(0).getConfig())
    
    #translate pod to be in front of the robot, and rotate the pod by 90 degrees 
    reorient = ([1,0,0,0,0,1,0,-1,0],[0,0,0])
    
    #kiva
    #Trel = (so3.rotation((0,0,1),-math.pi/2),[1.3,0,0])
    
    #north shelf
    Trel = (so3.rotation((0,0,1),-3*math.pi/2),[1.,0,0])
    Trel = se3.mul(Trel,(so3.rotation((1,0,0),-math.pi/2),[0,0,0]))
    
    T = reorient
    #world.terrain(0).geometry().transform(*Trel)
    world.terrain(0).geometry().transform(*se3.mul(Trel,T))
    

    #initialize the shelf xform for the visualizer and object
    #xform initialization
    global ground_truth_shelf_xform
    ground_truth_shelf_xform = se3.mul(Trel,T)

#####################################test#################################################
    # world.loadElement('../klampt_models/kiva_pod/meshes/pod_lowres.stl')
    # reorient = ([1,0,0,0,0,1,0,-1,0],[0,0,0.01])
    # #kiva
    # Trel = (so3.rotation((0,0,1),-math.pi/2),[.3,0,0])
    
    # T = reorient
    # world.terrain(2).geometry().transform(*se3.mul(Trel,T))

##########################################################################################
    return world
示例#11
0
    def move_to_grasp_object(self,object):
        """Sets the robot's configuration so the gripper grasps object at
        one of its potential grasp locations.  Might change self.active_limb
        to the appropriate limb.  Must change self.active_grasp to the
        selected grasp.

        If successful, sends the motion to the low-level controller and
        returns True.
        
        Otherwise, does not modify the low-level controller and returns False.
        """
        grasps = self.knowledge.grasp_xforms(object)
        qmin,qmax = self.robot.getJointLimits()
        qcmd = self.controller.getCommandedConfig()
        #phase 1: init IK from the commanded config, search among grasps
        
        for (grasp,gxform) in grasps:
            if self.active_limb == 'left':
                Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
            self.robot.setConfig(qcmd)
            if ik.solve(goal):
                #self.controller.setMilestone(self.robot.getConfig())
                #self.active_grasp = grasp
                no_collision = LimbPlanner.check_collision_free(self.planner, self.active_limb) ###
                if no_collision:
                    self.active_grasp = grasp
                    self.controller.setMilestone(self.robot.getConfig())
                    return True
                    
        #Phase 2: that didn't work, now try random sampling
        for i in range(100):
            #pick a config at random
            self.randomize_limb_position(self.active_limb)
            #pick a grasp at random
            (grasp,gxform) = random.choice(grasps)
            if self.active_limb == 'left':
                Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
            if ik.solve(goal):
                # self.active_grasp = grasp
                #TODO: plan a path
                no_collision = LimbPlanner.check_collision_free(self.planner, self.active_limb)
                if no_collision:
                    self.active_grasp = grasp
                    self.controller.setMilestone(self.robot.getConfig())
                    return True
        return False
示例#12
0
    def __init__(self, knowledge_base):
        PerceptionInterface.__init__(self, knowledge_base)

        world = WorldModel()
        robot = world.loadRobot(os.path.join('klampt_models', baxter.klampt_model_name))
        robot.setConfig(self.knowledge_base.robot_state.sensed_config)

        self.base_xform = robot.getLink('{}_gripper'.format(self.knowledge_base.active_limb)).getTransform()
        if self.knowledge_base.active_limb == 'left':
            self.camera_xform = se3.mul(self.base_xform, self.knowledge_base.left_camera_offset_xform)
        else:
            self.camera_xform = se3.mul(self.base_xform, self.knowledge_base.right_camera_offset_xform)
示例#13
0
    def set_in_bin_xform(self,shelf_xform,ux,uy,theta):
        """A utility convenience function for setting up virtual shelves.

        Sets self.xform assuming the object is resting in the bin
        with its center's translational parameters (ux,uy) in the
        range [0,1]x[0,1] and orientation theta about the z axis.
        ux goes from left to right and uy goes from front to back."""
        global bin_bounds
        bmin,bmax = bin_bounds[self.bin_name]
        tlocal = [bmin[0]+ux*(bmax[0]-bmin[0]),bmin[1]-self.info.bmin[2],bmax[2]+uy*(bmin[2]-bmax[2])]
        Rlocal = so3.from_axis_angle(([0,1,0],theta))
        toShelfCoords = (so3.from_axis_angle(([1,0,0],math.pi/2)),[0,0,0])
        self.xform = se3.mul(shelf_xform,se3.mul((Rlocal,tlocal),toShelfCoords))
def main():
    """The main loop that loads the models and starts the OpenGL visualizer"""
    
    world = WorldModel()
    #uncomment these lines and comment out the next 2 if you want to use the
    #full Baxter model
    #print "Loading full Baxter model (be patient, this will take a minute)..."
    #world.loadElement(os.path.join(model_dir,"baxter.rob"))
    print "Loading simplified Baxter model..."
    world.loadElement(os.path.join(model_dir,"baxter_col.rob"))
    print "Loading Kiva pod model..."
    world.loadElement(os.path.join(model_dir,"kiva_pod/model.obj"))
    print "Loading plane model..."
    world.loadElement(os.path.join(model_dir,"plane.env"))
    
    #shift the Baxter up a bit (95cm)
    Rbase,tbase = world.robot(0).getLink(0).getParentTransform()
    world.robot(0).getLink(0).setParentTransform(Rbase,(0,0,0.95))
    world.robot(0).setConfig(world.robot(0).getConfig())
    
    #translate pod to be in front of the robot, and rotate the pod by 90 degrees 
    Trel = (so3.rotation((0,0,1),-math.pi/2),[1.2,0,0])
    T = world.rigidObject(0).getTransform()
    world.rigidObject(0).setTransform(*se3.mul(Trel,T))

    #load the resting configuration from klampt_models/baxter_rest.config
    global baxter_rest_config
    f = open(model_dir+'baxter_rest.config','r')
    baxter_rest_config = loader.readVector(f.readline())
    f.close()
    world.robot(0).setConfig(baxter_rest_config)
    
    #run the visualizer
    visualizer = MyGLViewer(world)
    visualizer.run()
示例#15
0
    def grasp_xforms(self,object):
        #you may want to implement this.  Returns a list of world transformations
        #for the grasps defined for the given object (an ItemInBin instance).
        #The visualizer will draw these transforms if 'draw_grasps' is turned to True.
        #TODO:
	objGrasps = object.info.grasps
	
	return [se3.mul(object.xform,grasp.grasp_xform) for grasp in objGrasps]
示例#16
0
 def objectTransform(self, x):
     """Given an arm configuration x, returns the object transformation"""
     q = self.q0[:]
     for i, xi in zip(self.hand.armIndices, x):
         q[i] = xi
     self.robot.setConfig(q)
     Thand = self.robot.link(self.hand.link).getTransform()
     return se3.mul(Thand, self.Tgrasp)
示例#17
0
文件: ex.py 项目: arocchi/Klampt
 def objectTransform(self,x):
     """Given an arm configuration x, returns the object transformation"""
     q = self.q0[:]
     for i,xi in zip(self.hand.armIndices,x):
         q[i] = xi
     self.robot.setConfig(q)
     Thand = self.robot.link(self.hand.link).getTransform()
     return se3.mul(Thand,self.Tgrasp)
示例#18
0
 def spawn_item_test(self):
     self.world.loadElement('../klampt_models/kiva_pod/meshes/pod_lowres.stl')
     reorient = ([1,0,0,0,0,1,0,-1,0],[0,0,0.01])
     #kiva
     Trel = (so3.rotation((0,0,1),-math.pi/2),[.3,0,0])
     
     T = reorient
     self.world.terrain(2).geometry().transform(*se3.mul(Trel,T))
     self.planner.updateWorld(self.world)
    def move_to_grasp_object(self,object):
        """Sets the robot's configuration so the gripper grasps object at
        one of its potential grasp locations.  Might change self.active_limb
        to the appropriate limb.

        If successful, changes self.config and returns True.
        Otherwise, does not change self.config and returns False.
        """
        print
        print 'Trying to grasp ' + object.info.name
        grasp_transforms = self.knowledge.grasp_xforms(object);

        
    #try all of the grasps and stop if one is successful
        for grasp_t in grasp_transforms:
           
           #get the world point goal which is offset from the grasp_t

            rot_xform = se3.mul(grasp_t,se3.inv(left_gripper_center_xform))
            #rot = so3.rotation([0,0,1],math.pi);
            #rot_t = [rot,[0,0,0]];
            world_goal_xyz = rot_xform[1];

            cnt = 0;
            while(cnt < 100):
        #pick a random starting config for the left link and try to solve the ik problem
                config = self.robot.getConfig()
                for ind in self.left_arm_indices:
                    config[ind] = random.uniform(0,3);
        
                self.robot.setConfig(config);
                
                goal = ik.objective(self.left_gripper_link,R=se3.mul(grasp_t,se3.identity())[0],t=world_goal_xyz)

                if ik.solve(goal):
                    self.active_limb = 'Left'
                    self.config = self.robot.getConfig()
                    return True

                cnt = cnt + 1



        return False
示例#20
0
 def grasp_xforms(self,object):
     #you may want to implement this.  Returns a list of world transformations
     #for the grasps defined for the given object (an ItemInBin instance).
     #The visualizer will draw these transforms if 'draw_grasps' is turned to True.
     
     if not object.is_localized():
         return None
     
     # compose the object and graph transforms
     return [ se3.mul(object.xform, g.grasp_xform) for g in object.info.grasps ]
示例#21
0
    def localizeSpecificObject_old(self, bin, object):
        (xform, cloud) = self._localize_task('SpecificObject', [ bin, object ])

        # apply the camera transform
        xform = se3.mul(self.camera_xform, xform)
        cloud = map(lambda p: se3.apply(self.camera_xform, p[:3]) + [ p[3] ], cloud)

        debug_cloud(cloud, [ self.base_xform, self.camera_xform, xform ])

        return (xform, cloud)
示例#22
0
    def preGraspAction(self,object):
        
        grasps = self.knowledge.grasp_xforms(object)
        qmin,qmax = self.robot.getJointLimits()
        qcmd = self.controller.getCommandedConfig()

        isPreGrasp = False
        #pregrasp - allign the gripper with the grasp
        for i in range(1000):
            #pick a config at random
            self.randomize_limb_position(self.active_limb)
            #pick a grasp at random
            (grasp,gxform,pregrasp) = random.choice(grasps)
            if self.active_limb == 'left':
                Tg = se3.mul(pregrasp,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(pregrasp,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
            if ik.solve(goal):
                if(self.check_arms_for_collisions(True)):
                    
                    #TODO: plan a path
                    finalConfig = self.robot.getConfig()
                    #self.controller.setMilestone(self.robot.getConfig())
                    if self.active_limb == 'left':
                        path = self.planner.plan(qcmd,finalConfig)
                    else:
                        path = self.planner.plan(qcmd,finalConfig,order=['right','left'])
                            
                    if path == None:
                        continue
                    else:
                        isPreGrasp = True
                        for pp in path:
                            self.controller.setMilestone(pp)
                            self.waitForMove()
                        break;

        if(not isPreGrasp):
            return False

        return True
示例#23
0
def icp(object,scene):
	#return se3.identity()
	"""Computes a rotation and translation of the object to the scene
	using the ICP algorithm.  Returns the transform (R,t) in Klamp't se3
	format.  
	"""
	kd = KDTree(scene)
	print "kd tree done"

	#TODO: implement me
	# Sample both maps to make the closest point computations faster

	# Compute the minimum distance between the points



	# Reject the outliers, for example, using a threshold

	# Compute the R and t matrices. The procedure can be similar as the one for the
	# 2D images.
	#ret = se3.identity()
	#return ret
	#ret[1][2]+=0.2
	#ret[1][1]+=0.3
	#return ret
	sampled = random.sample(object, 1000)
	threshold = 0.75
	net = se3.identity()
	#net =[net[0], (0.2,0.05,0)]
	#return net
	sampled = [se3.apply(net, o) for o in sampled]
	#print object[0]
	for _ in xrange(10):
		print "start finding correspondances"
		correspondings = []
		for i in sampled:
			try:
				(dist, idx) = kd.query(i)
			except:
				print i
				raise Exception()
			correspondings.append((i, scene[idx], dist))
		correspondings.sort(key=lambda x: x[2])
		correspondings = correspondings[0:int(len(correspondings)*threshold)]
		#print "\n".join(map(str,correspondings))
		print "done finding correspondances"
		object_points = [i[0] for i in correspondings]
		scene_points = [i[1] for i in correspondings]
		(R,t) = one_round_icp(scene_points, object_points, 'list')
		#(R,t) = one_round_icp(object_points, scene_points, 'matrix')
		sampled = [se3.apply((R,t),i) for i in sampled]
		net = se3.mul((R,t), net)
		#print R,t
		#print net
	return net
    def update(self):
        self.objXform = self.object.getTransform()
        self.robotXform = self.robot.link(6).getTransform()
        self.relativeXform = se3.mul(se3.inv(self.objXform), self.robotXform)

        # for sanity check (checked that the relative transform output is correct)
        # print "obj", self.objXform[1], "robot", self.robotXform[1], "relative", self.relativeXform[1]

        self.robotConfig = []
        for i in self.validLinks:
            self.robotConfig.append(self.robot.getConfig()[i])
 def grasp_xforms(self,object):
     #you may want to implement this.  Returns a list of world transformations
     #for the grasps defined for the given object (an ItemInBin instance).
     #The visualizer will draw these transforms if 'draw_grasps' is turned to True.
     l = [x.grasp_xform for x in object.info.grasps]
     r = [se3.mul(object.xform,x) for x in l]
     #if len(r) > 11:
     #    return [r[11]]
     #else:
     #    return None
     return r
示例#26
0
 def grasp_xforms(self,object):
     if object.xform == None: return None
     res = []
     for g in object.info.grasps:
         # NOTE: g.grasp_xform: the transformation of the gripper fingers
         #                      relative to the object's local frame
         #        object.xform: the transformation of the object center
         #                      relative to the world frame
         #   grasp_xform_world: the transformation of the gripper fingers
         #                      relative to the world frame
         grasp_xform_world = se3.mul(object.xform,g.grasp_xform)
         res.append((g,grasp_xform_world))
     return res
示例#27
0
 def grasp_xforms(self,object):
     #TODO: remove me
     if object.xform == None: return None
     res = []
     for g in object.info.grasps:
         grasp_xform_world = se3.mul(object.xform,g.grasp_xform)
         res.append(grasp_xform_world)
     return res
     
     #you may want to implement this.  Returns a list of world transformations
     #for the grasps defined for the given object (an ItemInBin instance).
     #The visualizer will draw these transforms if 'draw_grasps' is turned to True.
     #TODO:
     return None
示例#28
0
def getCameraToWorldXform(linkName=None, index=0):
    '''
    get current transformation (R, t) of camera in world frame. 
    '''

    global CAMERA_TRANSFORM, SIMWORLD

    if len(CAMERA_TRANSFORM) < index:
        print 'Error, camera index does not exist'
        return ([0,0,0,0,0,0,0,0,0],[0,0,0])

    if linkName != None:
        return se3.mul(SIMWORLD.robot(0).link(linkName).getTransform(), CAMERA_TRANSFORM[index])
    else: 
        return CAMERA_TRANSFORM[index]
    def grasp_xforms(self,object):
        #you may want to implement this.  Returns a list of world transformations
        #for the grasps defined for the given object (an ItemInBin instance).
        #The visualizer will draw these transforms if 'draw_grasps' is turned to True.
        
        possibleGrasps = object.info.grasps
        objectToWorldXform = object.xform
        if objectToWorldXform == None:
            return None

        possibleGraspsWorld = list()
        for grasp in possibleGrasps:
            possibleGraspsWorld.append(se3.mul(objectToWorldXform, grasp.grasp_xform))

        return possibleGraspsWorld
示例#30
0
    def check_collision_free_with_object(self,limb,objectGeom,grasp):
        """Checks whether the given limb, holding an object at the given
        grasp, is collision free at the robot's current configuration"""
        if not self.check_collision_free(limb):
            return False
        objToGripperXForm = se3.mul(left_gripper_center_xform,se3.inv(grasp.grasp_xform))
        #assume robot configuration is updated
        if limb=='left':
            gripperLink = self.robot.link(left_gripper_link_name)
        else:
            gripperLink = self.robot.link(right_gripper_link_name)

        Tgripper = gripperLink.getTransform();
        Tobj = se3.mul(Tgripper,objToGripperXForm)
        objectGeom.setCurrentTransform(*Tobj)
        for t in xrange(self.world.numTerrains()):
            if self.world.terrain(t).geometry().collides(objectGeom):
                # print "Held object-shelf collision"
                return False
        for o in self.dynamic_objects:
            if o.info.geometry.collides(objectGeom):
                # print "Held object-object collision"
                return False
        return True
示例#31
0
文件: ex.py 项目: arocchi/Klampt
 def __init__(self,globals,hand,object):
     CSpace.__init__(self)
     self.globals = globals
     self.robot = globals.robot
     self.hand = hand
     self.object = object
     #setup initial object-robot transform
     Thand0 = self.robot.link(hand.link).getTransform()
     Tobj0 = object.getTransform()
     self.Tgrasp = se3.mul(se3.inv(Thand0),Tobj0)
     #initial whole-body configuratoin
     self.q0 = self.robot.getConfig()
     #setup CSpace sampling range
     qlimits = zip(*self.robot.getJointLimits())
     self.bound = [qlimits[i] for i in self.hand.armIndices]
     #setup CSpace edge checking epsilon
     self.eps = 1e-2
示例#32
0
 def __init__(self, globals, hand, object):
     CSpace.__init__(self)
     self.globals = globals
     self.robot = globals.robot
     self.hand = hand
     self.object = object
     #setup initial object-robot transform
     Thand0 = self.robot.link(hand.link).getTransform()
     Tobj0 = object.getTransform()
     self.Tgrasp = se3.mul(se3.inv(Thand0), Tobj0)
     #initial whole-body configuratoin
     self.q0 = self.robot.getConfig()
     #setup CSpace sampling range
     qlimits = zip(*self.robot.getJointLimits())
     self.bound = [qlimits[i] for i in self.hand.armIndices]
     #setup CSpace edge checking epsilon
     self.eps = 1e-2
示例#33
0
    def move_gripper_upright(self,limb,moveVector,collisionchecker = None):
        vertical = [0,0,0.1]
        if self.active_limb == 'left':
            gripperlink = self.left_gripper_link
        else:
            gripperlink = self.right_gripper_link
        qcur = self.robot.getConfig()
        vertical_in_gripper_frame = so3.apply(so3.inv(gripperlink.getTransform()[0]),vertical)
        centerpos = se3.mul(gripperlink.getTransform(),left_gripper_center_xform)[1]
        move_target = vectorops.add(centerpos,moveVector)
        movegoal = ik.objective(gripperlink,local=[left_gripper_center_xform[1],vectorops.add(left_gripper_center_xform[1],vertical_in_gripper_frame)],world=[move_target,vectorops.add(move_target,vertical)])

        sortedSolutions = self.get_ik_solutions([movegoal],[self.active_limb],qcur,validity_checker=collisionchecker,maxResults=1)
        if len(sortedSolutions) == 0:
            print "No upright-movement config found"
            return False
        self.robot.setConfig(sortedSolutions[0][0])
        return True
示例#34
0
    def getSensedValue(self, q):
        """Get link x, which is rotation matrix and/or translation
		"""
        self.robot.setConfig(q)
        T = self.link.getTransform()
        #check if relative transform task, modify T to local transform
        if self.baseLinkNo >= 0:
            Tb = self.baseLink.getTransform()
            Tbinv = se3.inv(Tb)
            T = se3.mul(Tbinv, T)
        if self.taskType == 'po':
            x = (T[0], se3.apply(T, self.localPosition))
        elif self.taskType == 'position':
            x = se3.apply(T, self.localPosition)
        elif self.taskType == 'orientation':
            x = T[0]
        else:
            raise ValueError("Invalid taskType " + self.taskType)
        return x
	def getSensedValue(self, q):
		"""Get link x, which is rotation matrix and/or translation
		"""
		self.robot.setConfig(q)
		T = self.link.getTransform()
		#check if relative transform task, modify T to local transform
		if self.baseLinkNo >= 0:
			Tb = self.baseLink.getTransform()
			Tbinv = se3.inv(Tb)
			T = se3.mul(Tbinv,T)
		if self.taskType == 'po':
			x = (T[0],se3.apply(T,self.localPosition))
		elif self.taskType == 'position':
			x = se3.apply(T,self.localPosition)
		elif self.taskType == 'orientation':
			x = T[0]
		else:
			raise ValueError("Invalid taskType "+self.taskType)
		return x
def setupWorld():
    world = WorldModel()
    print "Loading full Baxter model (be patient, this will take a minute)..."
    world.loadElement(os.path.join(KLAMPT_MODELS_DIR, "baxter.rob"))
    # print "Loading simplified Baxter model..."
    # world.loadElement(os.path.join(KLAMPT_MODELS_DIR,"baxter_col.rob"))
    print "Loading Kiva pod model..."
    world.loadElement(os.path.join(KLAMPT_MODELS_DIR, "kiva_pod/model.obj"))
    print "Loading plane model..."
    world.loadElement(os.path.join(KLAMPT_MODELS_DIR, "plane.env"))

    Rbase, tbase = world.robot(0).link(0).getParentTransform()
    world.robot(0).link(0).setParentTransform(Rbase, (0, 0, 0.95))
    world.robot(0).setConfig(world.robot(0).getConfig())

    Trel = (so3.rotation((0, 0, 1), -math.pi / 2), SHELF_MODEL_XFORM)
    T = world.rigidObject(0).getTransform()
    world.rigidObject(0).setTransform(*se3.mul(Trel, T))

    return world
示例#37
0
    """
    
    import sys
    import random
    robot_fn = "../Klampt/data/robots/baxter_col.rob"
    world = WorldModel()
    if not world.readFile(robot_fn):
        exit(1)
    robot = world.robot(0)
    camera_obs_link = "head_camera"
    marker_obs_link = "left_gripper"
    lc = robot.link(camera_obs_link)
    lm = robot.link(marker_obs_link)
    pc = robot.link(lc.getParent())
    pm = robot.link(lm.getParent())
    Tc0 = se3.mul(se3.inv(pc.getTransform()),lc.getTransform())
    Tm0 = se3.mul(se3.inv(pm.getTransform()),lm.getTransform())
    print "True camera transform",Tc0
    print "True marker transform",Tm0
    print
    camera_link = pc.getName()
    marker_link = pm.getName()

    #generate synthetic data, corrupted with joint encoder and sensor measurement errors
    qmin,qmax = robot.getJointLimits()
    numObs = 10
    jointEncoderError = 1e-5
    sensorErrorRads = 1e-2
    sensorErrorMeters = 2e-3
    trueCalibrationConfigs = []
    calibrationConfigs = []
示例#38
0
def calibrate_xform_camera(camera_link_transforms,
                           marker_link_transforms,
                           marker_observations,
                           marker_ids,
                           observation_relative_errors=None,
                           camera_initial_guess=None,
                           marker_initial_guess=None,
                           regularizationFactor=0,
                           maxIters=100,
                           tolerance=1e-7):
    """Single camera calibration function for a camera and markers on some
    set of rigid bodies.
    
    Given body transforms and a list of estimated calibration marker observations
    in the camera frame, estimates both the camera transform relative to the
    camera link as well as the marker transforms relative to their links.

    M: is the set of m markers. By default there is at most one marker per link.
       Markers can either be point markers (e.g., a mocap ball), or transform
       markers (e.g., an AR tag or checkerboard pattern).
    O: is the set of n observations, consisting of a reading (Tc_i,Tm_i,o_i,l_i)
       where Tc_i is the camera link's transform, Tm_i is the marker link's
       transform, o_i is the reading which consists of either a point or transform
       estimate in the camera frame, and l_i is the ID of the marker (by default,
       just its link)

    Output: a tuple (err,Tc,marker_dict) where err is the norm of the
    reconstruction residual, Tc is the estimated camera transform relative to the
    camera's link, and marker_dict is a dict mapping each marker id to its
    estimated position or transform on the marker's link.

    Arguments:
    - camera_link: an integer index or a RobotModelLink instance on which
      the camera lies.
    - calibration_configs: a list of the RobotModel configurations q_1,...,q_n
      that generated the marker_observations list.
    - marker_observations: a list of estimated positions or transformations
      of calibration markers o_1,...,o_n, given in the camera's reference
      frame (z forward, x right, y down).

      If o_i is a 3-vector, the marker is considered to be a point marker.
      If a se3 element (R,t) is given, the marker is considered to be
      a transform marker.  You may not mix point and transform observations
      for a single marker ID.
    - marker_ids: a list of marker ID #'s l_1,...,l_n corresponding to
      each observation, e.g., the link index on which each marker lies.
    - observation_relative_errors: if you have an idea of the magnitude of
      each observation error, it can be placed into this list.  Must be
      a list of n floats, 3-lists (point markers), or 6-lists (transform
      markers).  By default, errors will be set proportionally to the
      observed distance between the camera and marker.
    - camera_initial_guess: if not None, an initial guess for the camera transform
    - marker_initial_guess: if not None, a dictionary containing initial guesses
      for the marker transforms
    - regularizationFactor: if nonzero, the optimization penalizes deviation
      of the estimated camera transform and marker transforms from zero
      proportionally to this factor.
    - maxIters: maximum number of iterations for optimization.
    - tolerance: optimization convergence tolerance.  Stops when the change of
      estimates falls below this threshold
    """
    if len(camera_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as camera transforms")
    if len(marker_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as marker transforms")
    if len(marker_observations) != len(marker_ids):
        raise ValueError("Must provide the same number of marker observations as marker transforms")
    #get all unique marker ids
    marker_id_list = list(set(marker_ids))
    #detect marker types
    marker_types = dict((v,None) for v in marker_id_list)
    for i,(obs,id) in enumerate(zip(marker_observations,marker_ids)):
        if len(obs)==3:
            if marker_types[id] == 't':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 'p'
        elif len(obs)==2 and len(obs[0])==9 and len(obs[1])==3:
            if marker_types[id] == 'p':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 't'
        else:
            raise ValueError("Invalid observation for observation #%d, id %s\n"%(i,str(id)))

    n = len(marker_observations)
    m = len(marker_id_list)

    #get all the observation weights
    observation_weights = []
    if observation_relative_errors is None:
        #default weights: proportional to distance
        for obs in marker_observations:
            if len(obs) == 3:
                observation_weights.append(1.0/vectorops.norm(obs))
            else:
                observation_weights.append(1.0/vectorops.norm(obs[1]))
        observation_weights = [1.0]*len(observation_weights)
    else:
        if len(observation_relative_errors) != n:
            raise ValueError("Invalid length of observation errors")
        for err in observation_relative_errors:
            if hasattr(err,'__iter__'):
                observation_weights.append([1.0/v for v in err])
            else:
                observation_weights.append(1.0/err)

    #initial guesses
    if camera_initial_guess == None:
        camera_initial_guess = se3.identity()
        if any(v == 't' for v in marker_types.itervalues()):
            #estimate camera rotation from point estimates because rotations are more prone to initialization failures
            point_observations = []
            marker_point_rel = []
            for i,obs in enumerate(marker_observations):
                if len(obs)==2:
                    point_observations.append(obs[1])
                else:
                    point_observations.append(obs)
                marker_point_rel.append(se3.mul(se3.inv(camera_link_transforms[i]),marker_link_transforms[i])[1])
            camera_initial_guess = (point_fit_rotation_3d(point_observations,marker_point_rel),[0.0]*3)
            print "Estimated camera rotation from points:",camera_initial_guess
    if marker_initial_guess == None:
        marker_initial_guess = dict((l,(se3.identity() if marker_types[l]=='t' else [0.0]*3)) for l in marker_id_list)
    else:
        marker_initial_guess = marker_initial_guess.copy()
        for l in marker_id_list:
            if l not in marker_initial_guess:
                marker_initial_guess[l] = (se3.identity() if marker_types[l]=='t' else [0.0]*3)
    camera_transform = camera_initial_guess
    marker_transforms = marker_initial_guess.copy()

    if DO_VISUALIZATION:
        rgroup = coordinates.addGroup("calibration")
        rgroup.addFrame("camera link",worldCoordinates=camera_link_transforms[-1])
        rgroup.addFrame("marker link",worldCoordinates=marker_link_transforms[-1])
        rgroup.addFrame("camera estimate",parent="camera link",relativeCoordinates=camera_transform)
        rgroup.addFrame("marker estimate",parent="marker link",relativeCoordinates=marker_transforms.values()[0])
        for i,obs in enumerate(marker_observations):
            rgroup.addFrame("obs"+str(i)+" estimate",parent="camera estimate",relativeCoordinates=obs)
        vis.add("coordinates",rgroup)
        vis.dialog()

    point_observations_only = all(marker_types[marker] == 'p' for marker in marker_id_list)
    xform_observations_only = all(marker_types[marker] == 't' for marker in marker_id_list)
    if not point_observations_only and not xform_observations_only:
        raise NotImplementedError("Can't calibrate camera from mixed point/transform markers yet")
    for iters in range(maxIters):
        #attempt to minimize the error on the following over all observations i
        #camera_link_transform(q_i)*camera_transform*observation_i = marker_link_transform(l_i,q_i)*marker_transform(l_i)
        #first, we'll assume the camera transform is fixed and then optimize the marker transforms.
        #then, we'll assume the marker transforms are fixed and then optimize the camera transform.
        #finally we'll check the error to detect convergence
        #1. Estimate marker transforms from current camera transform
        new_marker_transforms = dict((l,(TransformStats(zero=marker_initial_guess[l],prior=regularizationFactor) if marker_types[l]=='t' else VectorStats(value,zero=[0.0]*3,prior=RegularizationFactor))) for l in marker_id_list)
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Trel = se3.mul(se3.inv(Tmlink),se3.mul(Tclink,camera_transform))
            if marker_types[marker] == 't':
                estimate = se3.mul(Trel,obs)
            else:
                estimate = se3.apply(Trel,obs)
            new_marker_transforms[marker].add(estimate,observation_weights[i])
        print "ITERATION",iters
        #print "  ESTIMATED MARKER TRANSFORMS:",dict((k,v.average) for (k,v) in new_marker_transforms.iteritems())
        #2. Estimate camera transform from current marker transforms
        new_camera_transform = TransformStats(zero=camera_initial_guess,prior=regularizationFactor)
        if point_observations_only:
            #TODO: weighted point fitting
            relative_points = []
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                pRel = se3.apply(se3.inv(Tclink),se3.apply(Tmlink,new_marker_transforms[marker].average))
                relative_points.append(pRel)
            new_camera_transform.add(point_fit_xform_3d(marker_observations,relative_points),sum(observation_weights))
        else:
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                Trel = se3.mul(se3.inv(Tclink),se3.mul(Tmlink,new_marker_transforms[marker].average))
                estimate = se3.mul(Trel,se3.inv(obs))
                new_camera_transform.add(estimate,observation_weights[i])
        #print "  ESTIMATED CAMERA TRANSFORMS:",new_camera_transform.average
        #3. compute difference between last and current estimates
        diff = 0.0
        diff += vectorops.normSquared(se3.error(camera_transform,new_camera_transform.average))
        for marker in marker_id_list:
            if marker_types[marker]=='t':
                diff += vectorops.normSquared(se3.error(marker_transforms[marker],new_marker_transforms[marker].average))
            else:
                diff += vectorops.distanceSquared(marker_transforms[marker],new_marker_transforms[marker].average)
        camera_transform = new_camera_transform.average
        for marker in marker_id_list:
            marker_transforms[marker] = new_marker_transforms[marker].average
        if math.sqrt(diff) < tolerance:
            #converged!
            print "Converged with diff %g on iteration %d"%(math.sqrt(diff),iters)
            break
        print "  ESTIMATE CHANGE:",math.sqrt(diff)
        error = 0.0
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Tc = se3.mul(Tclink,camera_transform)
            if marker_types[marker] == 't':
                Tm = se3.mul(Tmlink,marker_transforms[marker])
                error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
            else:
                Tm = se3.apply(Tmlink,marker_transforms[marker])
                error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
        print "  OBSERVATION ERROR:",math.sqrt(error)
        #raw_input()
        if DO_VISUALIZATION:
            rgroup.setFrameCoordinates("camera estimate",camera_transform)
            rgroup.setFrameCoordinates("marker estimate",marker_transforms.values()[0])
            for i,obs in enumerate(marker_observations):
                rgroup.setFrameCoordinates("obs"+str(i)+" estimate",obs)
            vis.add("coordinates",rgroup)
            vis.dialog()
    if math.sqrt(diff) >= tolerance:
        print "Max iters reached"
    error = 0.0
    for i in xrange(n):
        marker = marker_ids[i]
        Tclink = camera_link_transforms[i]
        Tmlink = marker_link_transforms[i]
        obs = marker_observations[i]
        Tc = se3.mul(Tclink,camera_transform)
        if marker_types[marker] == 't':
            Tm = se3.mul(Tmlink,marker_transforms[marker])
            error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
        else:
            Tm = se3.apply(Tmlink,marker_transforms[marker])
            error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
    return (math.sqrt(error),camera_transform,marker_transforms)
    def onMessage(self, msg):
        global deviceState, defaultDeviceState
        #print "Getting haptic message"
        #print msg
        if msg['type'] != 'MultiHapticState':
            print "Strange message type", msg['type']
            return
        if len(deviceState) == 0:
            print "Adding", len(
                msg['devices']) - len(deviceState), "haptic devices"
            while len(deviceState) < len(msg['devices']):
                deviceState.append(defaultDeviceState.copy())

        if len(msg['devices']) != len(deviceState):
            print "Incorrect number of devices in message:", len(
                msg['devices'])
            return

        #change overall state depending on button 2
        if 'events' in msg:
            for e in msg['events']:
                #print e['type']
                if e['type'] == 'b2_down':
                    dstate = deviceState[e['device']]
                    toggleMode(dstate)
                    print 'Changing device', e['device'], 'to state', dstate[
                        'mode']

        for i in range(len(deviceState)):
            dmsg = msg['devices'][i]
            dstate = deviceState[i]
            if dmsg['button1'] == 1:
                #drag widget if button 1 is down
                oldButtonDown = dstate['buttonDown']
                dstate['buttonDown'] = True
                #moving
                if dstate['mode'] == 'normal':
                    if not oldButtonDown:
                        dstate['moveStartDeviceTransform'] = (so3.from_moment(
                            dmsg['rotationMoment']), dmsg['position'])
                        if self.widgetGetters == None:
                            dstate['moveStartWidgetTransform'] = dstate[
                                'widgetTransform']
                        else:
                            Twidget = self.widgetGetters[i]()
                            if Twidget != None:
                                dstate['moveStartWidgetTransform'] = Twidget
                            else:
                                dstate['moveStartWidgetTransform'] = dstate[
                                    'widgetTransform']
#================================================================================================
#                    #perform transformation of widget
#                    deviceTransform = (so3.from_moment(dmsg['rotationMoment']),dmsg['position'])
#                    #compute relative motion
#                    Tdev = se3.mul(deviceToWidgetTransform,deviceTransform)
#                    TdevStart = se3.mul(deviceToWidgetTransform,dstate['moveStartDeviceTransform'])
#                    relDeviceTransform = (so3.mul(Tdev[0],so3.inv(TdevStart[0])),vectorops.sub(Tdev[1],TdevStart[1]))
#                    #scale relative motion
#                    Rrel,trel = relDeviceTransform
#                    wrel = so3.moment(Rrel)
#                    wrel = vectorops.mul(wrel,dstate['rotationScale'])
#                    trel = vectorops.mul(trel,dstate['positionScale'])
#                    #print "Moving",trel,"rotating",wrel
#                    relDeviceTransform = (so3.from_moment(wrel),trel)
#                    #perform transform
#                    dstate['widgetTransform'] = se3.mul(dstate['moveStartWidgetTransform'],relDeviceTransform)
#================================================================================================
#- perform transformation of widget
                    deviceTransform = (so3.from_moment(dmsg['rotationMoment']),
                                       dmsg['position'])
                    # delta_device = vectorops.sub(deviceTransform[1],dstate['moveStartDeviceTransform'][1])
                    # print "haptic moves: ", delta_device
                    delta_device_R_matrix = so3.mul(
                        deviceTransform[0],
                        so3.inv(dstate['moveStartDeviceTransform'][0]))
                    #                    delta_device_R = so3.moment(delta_device_R_matrix)
                    #                    norm_delta_R = vectorops.norm(delta_device_R)
                    #                    if norm_delta_R != 0:
                    #                        vec_delta_R = vectorops.div(delta_device_R, norm_delta_R)
                    #                    else:
                    #                        vec_delta_R = [0,0,0]
                    #
                    #                    print "haptic rotate: norm = ", norm_delta_R, ", vec = ", vec_delta_R

                    #- compute relative motion
                    Tdev = se3.mul(deviceToBaxterBaseTransform,
                                   deviceTransform)
                    TdevStart = se3.mul(deviceToBaxterBaseTransform,
                                        dstate['moveStartDeviceTransform'])
                    # delta_widget = vectorops.sub(Tdev[1],TdevStart[1])
                    # print "Baxter moves: ", delta_widget
                    #                    delta_widget_R_matrix = so3.mul(Tdev[0],so3.inv(TdevStart[0]))
                    #                    delta_widget_R = so3.moment(delta_widget_R_matrix)
                    #                    norm_widget_R = vectorops.norm(delta_widget_R)
                    #                    if norm_widget_R != 0:
                    #                        vec_widget_R = vectorops.div(delta_widget_R, norm_widget_R)
                    #                    else:
                    #                        vec_widget_R = [0,0,0]

                    #                    print "Baxter rotate: norm = ", norm_widget_R, ", vec = ", vec_widget_R

                    relDeviceTransform = (so3.mul(Tdev[0],
                                                  so3.inv(TdevStart[0])),
                                          vectorops.sub(Tdev[1], TdevStart[1]))

                    #- scale relative motion
                    Rrel, trel = relDeviceTransform
                    wrel = so3.moment(Rrel)
                    wrel = vectorops.mul(wrel, dstate['rotationScale'])
                    trel = vectorops.mul(trel, dstate['positionScale'])

                    #- print "Moving",trel,"rotating",wrel
                    relDeviceTransform = (so3.from_moment(wrel), trel)

                    #- perform transform
                    # dstate['widgetTransform'] = se3.mul(dstate['moveStartWidgetTransform'],relDeviceTransform)
                    dstate['widgetTransform'] = (
                        so3.mul(dstate['moveStartWidgetTransform'][0],
                                relDeviceTransform[0]),
                        vectorops.add(dstate['moveStartWidgetTransform'][1],
                                      relDeviceTransform[1]))


#================================================================================================

                elif dstate['mode'] == 'scaling':
                    if not oldButtonDown:
                        dstate['moveStartDeviceTransform'] = (so3.from_moment(
                            dmsg['rotationMoment']), dmsg['position'])
                    #perform scaling
                    deviceTransform = (so3.from_moment(dmsg['rotationMoment']),
                                       dmsg['position'])
                    oldDeviceTransform = dstate['moveStartDeviceTransform']
                    znew = deviceTransform[1][1]
                    zold = oldDeviceTransform[1][1]
                    posscalerange = [1e-2, 20]
                    rotscalerange = [0.5, 2]
                    newposscale = min(
                        max(
                            dstate['positionScale'] * math.exp(
                                (znew - zold) * 10), posscalerange[0]),
                        posscalerange[1])
                    newrotscale = min(
                        max(
                            dstate['rotationScale'] * math.exp(
                                (znew - zold) * 4), rotscalerange[0]),
                        rotscalerange[1])
                    if any(newposscale == v for v in posscalerange) or any(
                            newrotscale == v for v in rotscalerange):
                        pass  #dont change the scale
                    else:
                        dstate['positionScale'] = newposscale
                        dstate['rotationScale'] = newrotscale
                    print "Setting position scale to", dstate[
                        'positionScale'], "rotation scale to", dstate[
                            'rotationScale']
                    #update start device transform
                    dstate['moveStartDeviceTransform'] = deviceTransform
                elif dstate['mode'] == 'gripper':
                    print "Gripper mode not available"
                    dstate['mode'] = 'normal'
                    dstate['buttonDown'] = False
            else:
                dstate['buttonDown'] = False
        if self.viewer: self.viewer.update(deviceState)
        else: print "No viewer..."
示例#40
0
'''Reference points'''
frwrd = [0,0,-1,0,1,0,1,0,0] # rotation relative to global/baxter fram to point hands/z forward
ext = [0.91,-0.41,1.46]

'''Transforms'''
T_target = [frwrd,ext]
T_EEtoPPU = [[-1,0,0,0,0,1,0,1,0], [0,0,0]]

# '''Correction loop'''
# while True:
# 	T_EE = robot.right_ee.sensedTransform()
# 	R_PPU = so3.mul(T_EEtoPPU[0],T_EE[0])
# 	roundEachAndPrint(R_PPU,3)

while True:
	T_bestEE = se3.mul(se3.inv(T_EEtoPPU),T_target)
	T_EEcmd = T_bestEE
	robot.right_ee.moveTo(T_EEcmd[0],T_EEcmd[1])

# while True:
# 	transform = robot.right_ee.sensedTransform()
# 	position = transform[1]
# 	rotation = transform[0]
# 	roundEachAndPrint(rotation,3)