Example #1
0
def editExistingGrasp(item,object,gindex):
    global world,robot
    grasp = None
    if isinstance(gindex,int):
        grasp = item.grasps[gindex]
    else:
        for g in item.grasps:
            if g.name == gindex:
                grasp = g
                break
        if grasp == None:
            print "Grasp",gindex,"does not exist, creating new grasp"
            raise RuntimeError("Invalid grasp")
    #get the object transform
    object.setTransform(*se3.inv(grasp.grasp_xform))
    open_config = grippermodule.commandToConfig(grasp.gripper_open_command)
    closed_config = grippermodule.commandToConfig(grasp.gripper_close_command)
    robot.setConfig(closed_config)
    xform = resource.get(name=None,type='RigidTransform',default=object.getTransform(),description="Transform of "+item.name,world=world,frame=object)
    if xform==None: return None
    #set the object in the world 
    object.setTransform(*xform)
    #get the hand open configuration
    open_config = resource.get(name=None,type='Config',default=open_config,description="Hand open configuration",world=world)
    if open_config==None: return None
    closed_config = resource.get(name=None,type='Config',default=closed_config,description="Hand closed configuration",world=world)
    if closed_config==None: return None
    #return grasp
    grasp.grasp_xform = se3.inv(xform)
    grasp.gripper_close_command = grippermodule.configToCommand(closed_config)
    grasp.gripper_open_command = grippermodule.configToCommand(open_config)
    return grasp
    def move_to_grasp_object(self,object):
        # return True
        """Sets the robot's configuration so the gripper grasps object at
        one of its potential grasp locations.  Might change self.active_limb
        to the appropriate limb.

        If successful, changes self.config and returns True.
        Otherwise, does not change self.config and returns False.
        """
        #TODO: put my code here
        if self.active_limb=="LEFT":
            first_gripper = self.left_gripper_link
            second_gripper = self.right_gripper_link
            first_offset = left_gripper_center_xform
            second_offset = right_gripper_center_xform
            first_limb = "LEFT"
            second_limb = "RIGHT"
        elif self.active_limb=="RIGHT":
            first_gripper = self.right_gripper_link
            second_gripper = self.left_gripper_link
            first_offset = right_gripper_center_xform
            second_offset = left_gripper_center_xform
            first_limb = "RIGHT"
            second_limb = "LEFT"
        else:
            print "Grasping... Unrecognized active limb"
            return False
        item_to_world = object.xform
        for grasp in object.info.grasps:
            gripper_to_item = grasp.grasp_xform
            for _ in xrange(100):
                self.robot.setConfig(self.random_init())
                (R_net, t_net) = se3.mul(item_to_world, se3.mul(gripper_to_item, se3.inv(first_offset)))
                goal = ik.objective(first_gripper, R=R_net, t=t_net)
                res = ik.solve(goal)
                if res==True:
                    self.config = self.robot.getConfig()
                    #print "Grasp succeeded"
                    self.active_limb=first_limb
                    return True
                else:
                    pass
                    #self.config = self.robot.getConfig()
                
                self.robot.setConfig(self.random_init())
                (R_net, t_net) = se3.mul(item_to_world, se3.mul(gripper_to_item, se3.inv(second_offset)))
                goal = ik.objective(second_gripper, R=R_net, t=t_net)
                res = ik.solve(goal)
                if res==True:
                    self.config = self.robot.getConfig()
                    #print "Grasp succeeded"
                    self.active_limb=second_limb
                    return True
                else:
                    #self.config = self.robot.getConfig()
                    pass
        '''    
Example #3
0
    def move_to_grasp_object(self,object):
        """Sets the robot's configuration so the gripper grasps object at
        one of its potential grasp locations.  Might change self.active_limb
        to the appropriate limb.  Must change self.active_grasp to the
        selected grasp.

        If successful, sends the motion to the low-level controller and
        returns True.
        
        Otherwise, does not modify the low-level controller and returns False.
        """
        grasps = self.knowledge.grasp_xforms(object)
        qmin,qmax = self.robot.getJointLimits()
        qcmd = self.controller.getCommandedConfig()
        #phase 1: init IK from the commanded config, search among grasps
        
        for (grasp,gxform) in grasps:
            if self.active_limb == 'left':
                Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
            self.robot.setConfig(qcmd)
            if ik.solve(goal):
                #self.controller.setMilestone(self.robot.getConfig())
                #self.active_grasp = grasp
                no_collision = LimbPlanner.check_collision_free(self.planner, self.active_limb) ###
                if no_collision:
                    self.active_grasp = grasp
                    self.controller.setMilestone(self.robot.getConfig())
                    return True
                    
        #Phase 2: that didn't work, now try random sampling
        for i in range(100):
            #pick a config at random
            self.randomize_limb_position(self.active_limb)
            #pick a grasp at random
            (grasp,gxform) = random.choice(grasps)
            if self.active_limb == 'left':
                Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
            if ik.solve(goal):
                # self.active_grasp = grasp
                #TODO: plan a path
                no_collision = LimbPlanner.check_collision_free(self.planner, self.active_limb)
                if no_collision:
                    self.active_grasp = grasp
                    self.controller.setMilestone(self.robot.getConfig())
                    return True
        return False
Example #4
0
    def check_collision_free_with_object(self,limb,object,grasp):
        """Checks whether the given limb, holding an object at the given
        grasp, is collision free at the robot's current configuration"""
        objToGripperXForm = se3.mul(baxter.left_gripper_center_xform,se3.inv(grasp))
        #assume robot configuration is updated
        if limb=='left':
            gripperLink = self.robot.getLink(baxter.left_gripper_link_name)
        else:
            gripperLink = self.robot.getLink(baxter.right_gripper_link_name)

        if not isinstance(object,list):
            if object==None:
                object = []
            else:
                object = [object]

        Tgripper = gripperLink.getTransform()
        Tobj = se3.mul(Tgripper,objToGripperXForm)
        for o in object:
            o.setTransform(*Tobj)
        
        excludeids = [ o.getID() for o in object]
        if not self.check_collision_free(limb,exclude=excludeids):
            #print 'Limb is not collision free'
            return False

        for t in xrange(self.world.numRigidObjects()):
            other = self.world.rigidObject(t)
            if(other.getID() not in excludeids):
                if any(other.geometry().collides(o.geometry()) for o in object):
                    #visualization.debug(self.world)
                    #print "Held object-shelf collision"
                    return False
        return True
Example #5
0
 def getJacobian(self, q):
     self.robot.setConfig(q)
     J = None
     if self.taskType == 'po':
         J = self.link.getJacobian(self.localPosition)
     elif self.taskType == 'position':
         J = self.link.getPositionJacobian(self.localPosition)
     elif self.taskType == 'orientation':
         J = self.link.getOrientationJacobian()
     else:
         raise ValueError("Invalid taskType " + self.taskType)
     J = np.array(J)
     #check if relative transform task, modify Jacobian accordingly
     if self.baseLinkNo >= 0:
         T = self.link.getTransform()
         Tb = self.baseLink.getTransform()
         Tbinv = se3.inv(Tb)
         pb = se3.apply(Tbinv, se3.apply(T, self.localPosition))
         if self.taskType == 'po':
             Jb = self.baseLink.getJacobian(pb)
         elif self.taskType == 'position':
             Jb = self.baseLink.getPositionJacobian(pb)
         elif self.taskType == 'orientation':
             Jb = self.baseLink.getOrientationJacobian()
         Jb = np.array(Jb)
         #subtract out jacobian w.r.t. baseLink
         J -= Jb
     if self.activeDofs != None:
         J = select_cols(J, self.activeDofs)
     return J
	def getJacobian(self, q):
		self.robot.setConfig(q)
		J = None
		if self.taskType == 'po':
			J = self.link.getJacobian(self.localPosition)
		elif self.taskType == 'position':
			J = self.link.getPositionJacobian(self.localPosition)
		elif self.taskType == 'orientation':
			J = self.link.getOrientationJacobian()
		else:
			raise ValueError("Invalid taskType "+self.taskType)
		J = np.array(J)
		#check if relative transform task, modify Jacobian accordingly
		if self.baseLinkNo >= 0:
			T = self.link.getTransform()
			Tb = self.baseLink.getTransform()
			Tbinv = se3.inv(Tb)
			pb = se3.apply(Tbinv,se3.apply(T,self.localPosition))
			if self.taskType == 'po':
				Jb = self.baseLink.getJacobian(pb)
			elif self.taskType == 'position':
				Jb = self.baseLink.getPositionJacobian(pb)
			elif self.taskType == 'orientation':
				Jb = self.baseLink.getOrientationJacobian()
			Jb = np.array(Jb)
			#subtract out jacobian w.r.t. baseLink
			J -= Jb
		if self.activeDofs!=None:
			J = select_cols(J,self.activeDofs)
		return J
Example #7
0
def editNewGrasp(item,object,name=None,openClose=1):
    global world,robot
    global default_open_config,default_close_config
    #get the object transform
    xform = resource.get(name=None,
                         type='RigidTransform',
                         default=object.getTransform(),
                         description="Transform of "+item.name,
                         world=world,frame=object)
    if xform==None: return None
    #set the object in the world 
    object.setTransform(*xform)

    if openClose:
        #get the hand open configuration
        open_config = resource.get(name=None,type='Config',default=default_open_config,description="Hand open configuration",world=world)
        if open_config==None: return None
        closed_config = resource.get(name=None,type='Config',default=open_config,description="Hand closed configuration",world=world)
        if closed_config==None: return None

    #return grasp
    grasp = apc.ItemGrasp()
    grasp.grasp_xform = se3.inv(xform)

    if openClose:
        grasp.gripper_close_command = grippermodule.configToCommand(closed_config)
        grasp.gripper_open_command = grippermodule.configToCommand(open_config)
    if name==None:
        #default: just number it
        grasp.name = str(len(item.grasps))
    else:
        grasp.name = name
    return grasp
Example #8
0
def graspedObjectTransform(robot, hand, qrobot0, Tobj0, qrobot):
    """Given initial robot configuration qrobot0 and object transform Tobj0,
    returns the object transformation corresponding to new configuration
    qrobot assuming the object is rigidly attached to the hand"""
    robot.setConfig(qrobot0)
    Thand0 = robot.link(hand.link).getTransform()
    Tgrasp = se3.mul(se3.inv(Thand0), Tobj0)
    robot.setConfig(qrobot)
    Thand = robot.link(hand.link).getTransform()
    return se3.mul(Thand, Tgrasp)
Example #9
0
File: ex.py Project: arocchi/Klampt
def graspedObjectTransform(robot,hand,qrobot0,Tobj0,qrobot):
    """Given initial robot configuration qrobot0 and object transform Tobj0,
    returns the object transformation corresponding to new configuration
    qrobot assuming the object is rigidly attached to the hand"""
    robot.setConfig(qrobot0)
    Thand0 = robot.link(hand.link).getTransform()
    Tgrasp = se3.mul(se3.inv(Thand0),Tobj0)
    robot.setConfig(qrobot)
    Thand = robot.link(hand.link).getTransform()
    return se3.mul(Thand,Tgrasp)
	def getSensedValue(self, q):
		"""Returns CoM position
		"""
		self.robot.setConfig(q)
		com = self.robot.getCom()
		if self.baseLinkNo >= 0:
			Tb = self.robot.getLink(self.baseLinkNo).getTransform()
			Tbinv = se3.inv(Tb)
			com = se3.apply(Tbinv,com)
		return com
Example #11
0
    def preGraspAction(self,object):
        
        grasps = self.knowledge.grasp_xforms(object)
        qmin,qmax = self.robot.getJointLimits()
        qcmd = self.controller.getCommandedConfig()

        isPreGrasp = False
        #pregrasp - allign the gripper with the grasp
        for i in range(1000):
            #pick a config at random
            self.randomize_limb_position(self.active_limb)
            #pick a grasp at random
            (grasp,gxform,pregrasp) = random.choice(grasps)
            if self.active_limb == 'left':
                Tg = se3.mul(pregrasp,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(pregrasp,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
            if ik.solve(goal):
                if(self.check_arms_for_collisions(True)):
                    
                    #TODO: plan a path
                    finalConfig = self.robot.getConfig()
                    #self.controller.setMilestone(self.robot.getConfig())
                    if self.active_limb == 'left':
                        path = self.planner.plan(qcmd,finalConfig)
                    else:
                        path = self.planner.plan(qcmd,finalConfig,order=['right','left'])
                            
                    if path == None:
                        continue
                    else:
                        isPreGrasp = True
                        for pp in path:
                            self.controller.setMilestone(pp)
                            self.waitForMove()
                        break;

        if(not isPreGrasp):
            return False

        return True
Example #12
0
    def getSensedValue(self, q):
        """Returns CoM position
		"""
        self.robot.setConfig(q)
        com = self.robot.getCom()
        if self.baseLinkNo >= 0:
            Tb = self.robot.getLink(self.baseLinkNo).getTransform()
            Tbinv = se3.inv(Tb)
            com = se3.apply(Tbinv, com)
        return com
    def update(self):
        self.objXform = self.object.getTransform()
        self.robotXform = self.robot.link(6).getTransform()
        self.relativeXform = se3.mul(se3.inv(self.objXform), self.robotXform)

        # for sanity check (checked that the relative transform output is correct)
        # print "obj", self.objXform[1], "robot", self.robotXform[1], "relative", self.relativeXform[1]

        self.robotConfig = []
        for i in self.validLinks:
            self.robotConfig.append(self.robot.getConfig()[i])
	def getJacobian(self, q):
		"""Returns axis-weighted CoM Jacobian by averaging
		mass-weighted Jacobian of each link.
		"""
		self.robot.setConfig(q)
		numLinks = self.robot.numLinks()
		Jcom = np.array(self.robot.getComJacobian())
		#if relative positioning task, subtract out COM jacobian w.r.t. base
		if self.baseLinkNo >= 0:
			Tb = self.robot.getLink(self.baseLinkNo).getTransform()
			pb = se3.apply(se3.inv(Tb),self.robot.getCom())
			Jb = np.array(self.robot.getLink(self.baseLinkNo).getPositionJacobian(pb))
			Jcom -= Jb
		if self.activeDofs != None:
			Jcom = select_cols(Jcom,self.activeDofs)
		return Jcom
Example #15
0
	def getJacobian(self, q):
		"""Returns axis-weighted CoM Jacobian by averaging
		mass-weighted Jacobian of each link.
		"""
		self.robot.setConfig(q)
		numLinks = self.robot.numLinks()
		Jcom = np.array(self.robot.getComJacobian())
		#if relative positioning task, subtract out COM jacobian w.r.t. base
		if self.baseLinkNo >= 0:
			Tb = self.robot.link(self.baseLinkNo).getTransform()
			pb = se3.apply(se3.inv(Tb),self.robot.getCom())
			Jb = np.array(self.robot.link(self.baseLinkNo).getPositionJacobian(pb))
			Jcom -= Jb
		if self.activeDofs != None:
			Jcom = select_cols(Jcom,self.activeDofs)
		return Jcom
Example #16
0
File: ex.py Project: arocchi/Klampt
 def __init__(self,globals,hand,object):
     CSpace.__init__(self)
     self.globals = globals
     self.robot = globals.robot
     self.hand = hand
     self.object = object
     #setup initial object-robot transform
     Thand0 = self.robot.link(hand.link).getTransform()
     Tobj0 = object.getTransform()
     self.Tgrasp = se3.mul(se3.inv(Thand0),Tobj0)
     #initial whole-body configuratoin
     self.q0 = self.robot.getConfig()
     #setup CSpace sampling range
     qlimits = zip(*self.robot.getJointLimits())
     self.bound = [qlimits[i] for i in self.hand.armIndices]
     #setup CSpace edge checking epsilon
     self.eps = 1e-2
Example #17
0
 def __init__(self, globals, hand, object):
     CSpace.__init__(self)
     self.globals = globals
     self.robot = globals.robot
     self.hand = hand
     self.object = object
     #setup initial object-robot transform
     Thand0 = self.robot.link(hand.link).getTransform()
     Tobj0 = object.getTransform()
     self.Tgrasp = se3.mul(se3.inv(Thand0), Tobj0)
     #initial whole-body configuratoin
     self.q0 = self.robot.getConfig()
     #setup CSpace sampling range
     qlimits = zip(*self.robot.getJointLimits())
     self.bound = [qlimits[i] for i in self.hand.armIndices]
     #setup CSpace edge checking epsilon
     self.eps = 1e-2
    def move_to_grasp_object(self,object):
        """Sets the robot's configuration so the gripper grasps object at
        one of its potential grasp locations.  Might change self.active_limb
        to the appropriate limb.

        If successful, changes self.config and returns True.
        Otherwise, does not change self.config and returns False.
        """
        print
        print 'Trying to grasp ' + object.info.name
        grasp_transforms = self.knowledge.grasp_xforms(object);

        
    #try all of the grasps and stop if one is successful
        for grasp_t in grasp_transforms:
           
           #get the world point goal which is offset from the grasp_t

            rot_xform = se3.mul(grasp_t,se3.inv(left_gripper_center_xform))
            #rot = so3.rotation([0,0,1],math.pi);
            #rot_t = [rot,[0,0,0]];
            world_goal_xyz = rot_xform[1];

            cnt = 0;
            while(cnt < 100):
        #pick a random starting config for the left link and try to solve the ik problem
                config = self.robot.getConfig()
                for ind in self.left_arm_indices:
                    config[ind] = random.uniform(0,3);
        
                self.robot.setConfig(config);
                
                goal = ik.objective(self.left_gripper_link,R=se3.mul(grasp_t,se3.identity())[0],t=world_goal_xyz)

                if ik.solve(goal):
                    self.active_limb = 'Left'
                    self.config = self.robot.getConfig()
                    return True

                cnt = cnt + 1



        return False
Example #19
0
    def getSensedValue(self, q):
        """Get link x, which is rotation matrix and/or translation
		"""
        self.robot.setConfig(q)
        T = self.link.getTransform()
        #check if relative transform task, modify T to local transform
        if self.baseLinkNo >= 0:
            Tb = self.baseLink.getTransform()
            Tbinv = se3.inv(Tb)
            T = se3.mul(Tbinv, T)
        if self.taskType == 'po':
            x = (T[0], se3.apply(T, self.localPosition))
        elif self.taskType == 'position':
            x = se3.apply(T, self.localPosition)
        elif self.taskType == 'orientation':
            x = T[0]
        else:
            raise ValueError("Invalid taskType " + self.taskType)
        return x
	def getSensedValue(self, q):
		"""Get link x, which is rotation matrix and/or translation
		"""
		self.robot.setConfig(q)
		T = self.link.getTransform()
		#check if relative transform task, modify T to local transform
		if self.baseLinkNo >= 0:
			Tb = self.baseLink.getTransform()
			Tbinv = se3.inv(Tb)
			T = se3.mul(Tbinv,T)
		if self.taskType == 'po':
			x = (T[0],se3.apply(T,self.localPosition))
		elif self.taskType == 'position':
			x = se3.apply(T,self.localPosition)
		elif self.taskType == 'orientation':
			x = T[0]
		else:
			raise ValueError("Invalid taskType "+self.taskType)
		return x
Example #21
0
 def emulate(self, sim):
     """Given a Simulator instance, emulates the sensor.
     Result is a CameraColorDetectorOutput structure."""
     global objectColors
     xscale = math.tan(math.radians(self.fov * 0.5)) * self.w / 2
     blobs = []
     for i in range(sim.world.numRigidObjects()):
         o = sim.world.rigidObject(i)
         body = sim.body(o)
         Tb = body.getTransform()
         plocal = se3.apply(se3.inv(self.Tsensor), Tb[1])
         x, y, z = plocal
         if z < self.dmin or z > self.dmax:
             continue
         c = objectColors[i % len(objectColors)]
         o.geometry().setCurrentTransform(
             so3.mul(so3.inv(self.Tsensor[0]), Tb[0]), [0] * 3)
         bmin, bmax = o.geometry().getBB()
         err = self.pixelError
         dscale = xscale / z
         xim = dscale * x + self.w / 2
         yim = dscale * y + self.h / 2
         xmin = int(xim - dscale * (bmax[0] - bmin[0]) * 0.5 +
                    random.uniform(-err, err))
         ymin = int(yim - dscale * (bmax[1] - bmin[1]) * 0.5 +
                    random.uniform(-err, err))
         xmax = int(xim + dscale * (bmax[0] - bmin[0]) * 0.5 +
                    random.uniform(-err, err))
         ymax = int(yim + dscale * (bmax[1] - bmin[1]) * 0.5 +
                    random.uniform(-err, err))
         if xmin < 0: xmin = 0
         if ymin < 0: ymin = 0
         if xmax >= self.w: xmax = self.w - 1
         if ymax >= self.h: ymax = self.h - 1
         if ymax <= ymin: continue
         if xmax <= xmin: continue
         #print "Actual x,y,z",x,y,z
         #print "blob color",c[0:3],"dimensions",xmin,xmax,"x",ymin,ymax
         blob = CameraBlob(c[0:3], 0.5 * (xmin + xmax), 0.5 * (ymin + ymax),
                           xmax - xmin, ymax - ymin)
         blobs.append(blob)
     return CameraColorDetectorOutput(blobs)
Example #22
0
 def bin_vantage_point(self,bin_name):
     #you may want to implement this.  Returns the world position of the
     #vantage point for viewing the bin.  The visualizer will draw these points if
     #'draw_bins' is turned to True.
     
     # determine the vantage point by moving the front center point back from the shelf
     # start by getting the front center in world coordinates
     front_center_world = self.bin_front_center(bin_name)
     if not front_center_world:        
         return None
         
     # move back into local coordinates since the local z frame is aligned with the offset direction
     front_center_local = se3.apply(se3.inv(self.shelf_xform), front_center_world)
     
     # now offset the the front center point along z to get the vantage point
     vantage_point_local = vectorops.add(front_center_local, [ 0, 0, vantage_point_offset ])
     
     # transform back into world coordinates
     vantage_point_world = se3.apply(self.shelf_xform, vantage_point_local)
     
     return vantage_point_world
Example #23
0
	def crop_cloud(self, cloud, bin_bound, perturb_xform):
		'''
		crop the cloud to keep only those that are in the bin_bound transformed by perturb_xform

		bin_bound is a list of two lists of three numbers: [[xmin, ymin, zmin], [xmax, ymax, zmax]]
		
		solution: rather than transforming the bin_bound and test for inside-ness of skewed cube, inverse transform 
		the cloud and test for inside-ness of axis-parallel cube. 
		'''
		assert len(cloud.shape)==2, 'cloud shape must be N x 3 or N x 4 (with color)'
		if bin_bound is None:
			return cloud
		
		min_list, max_list = bin_bound
		xmin, ymin, zmin = min_list
		xmax, ymax, zmax = max_list

		if perturb_xform is None:
			perturb_xform = [[1,0,0,0,1,0,0,0,1],[0,0,0]] # identity xform
		cloud_xyz = cloud[:,0:3]
		inv_perturb_R, inv_perturb_t = se3.inv(perturb_xform)
		inv_perturb_R = np.array(inv_perturb_R).reshape(3,3).T
		inv_perturb_t = np.array(inv_perturb_t)
		cloud_xyz_inv_xformed = cloud_xyz.dot(inv_perturb_R.T) + inv_perturb_t
		xmin_flag = cloud_xyz_inv_xformed[:,0] >= xmin
		xmax_flag = cloud_xyz_inv_xformed[:,0] <= xmax
		ymin_flag = cloud_xyz_inv_xformed[:,1] >= ymin
		ymax_flag = cloud_xyz_inv_xformed[:,1] <= ymax
		zmin_flag = cloud_xyz_inv_xformed[:,2] >= zmin
		zmax_flag = cloud_xyz_inv_xformed[:,2] <= zmax
		# print cloud_xyz_inv_xformed.mean(axis=0)
		# print xmin, ymin, zmin
		# print xmax, ymax, zmax
		in_flag = reduce(np.logical_and, [xmin_flag, xmax_flag, ymin_flag, ymax_flag, zmin_flag, zmax_flag])
		cropped_cloud = cloud[in_flag, :]
		print 'Cropped cloud from %i points to %i points'%(cloud.shape[0], cropped_cloud.shape[0])
		return cropped_cloud
Example #24
0
 def emulate(self,sim):
     """Given a Simulator instance, emulates the sensor.
     Result is a CameraColorDetectorOutput structure."""
     global objectColors
     xscale = math.tan(math.radians(self.fov*0.5))*self.w/2
     blobs = []
     for i in range(sim.world.numRigidObjects()):
         o = sim.world.rigidObject(i)
         body = sim.getBody(o)
         Tb = body.getTransform()
         plocal = se3.apply(se3.inv(self.Tsensor),Tb[1])
         x,y,z = plocal
         if z < self.dmin or z > self.dmax:
             continue
         c = objectColors[i%len(objectColors)]
         o.geometry().setCurrentTransform(*se3.identity())
         bmin,bmax = o.geometry().getBB()
         err = self.pixelError
         dscale = xscale/z
         xim = dscale * x + self.w/2 
         yim = dscale * y + self.h/2
         xmin = int(xim - dscale*(bmax[0]-bmin[0])*0.5 + random.uniform(-err,err))
         ymin = int(yim - dscale*(bmax[1]-bmin[1])*0.5 + random.uniform(-err,err))
         xmax = int(xim + dscale*(bmax[0]-bmin[0])*0.5 + random.uniform(-err,err))
         ymax = int(yim + dscale*(bmax[1]-bmin[1])*0.5 + random.uniform(-err,err))
         if xmin < 0: xmin=0
         if ymin < 0: ymin=0
         if xmax >= self.w: xmax=self.w-1
         if ymax >= self.h: ymax=self.h-1
         if ymax <= ymin: continue
         if xmax <= xmin: continue
         #print "Actual x,y,z",x,y,z
         #print "blob color",c[0:3],"dimensions",xmin,xmax,"x",ymin,ymax
         blob = CameraBlob(c[0:3],0.5*(xmin+xmax),0.5*(ymin+ymax),xmax-xmin,ymax-ymin)
         blobs.append(blob)
     return CameraColorDetectorOutput(blobs)
Example #25
0
    def check_collision_free_with_object(self,limb,objectGeom,grasp):
        """Checks whether the given limb, holding an object at the given
        grasp, is collision free at the robot's current configuration"""
        if not self.check_collision_free(limb):
            return False
        objToGripperXForm = se3.mul(left_gripper_center_xform,se3.inv(grasp.grasp_xform))
        #assume robot configuration is updated
        if limb=='left':
            gripperLink = self.robot.link(left_gripper_link_name)
        else:
            gripperLink = self.robot.link(right_gripper_link_name)

        Tgripper = gripperLink.getTransform();
        Tobj = se3.mul(Tgripper,objToGripperXForm)
        objectGeom.setCurrentTransform(*Tobj)
        for t in xrange(self.world.numTerrains()):
            if self.world.terrain(t).geometry().collides(objectGeom):
                # print "Held object-shelf collision"
                return False
        for o in self.dynamic_objects:
            if o.info.geometry.collides(objectGeom):
                # print "Held object-object collision"
                return False
        return True
Example #26
0
            #    print 'Save file', path, 'already exists. Overwrite? [y/N]',
            #    prompt = raw_input()
            #    if prompt.lower() != 'y':
            #        continue

            #while not master.manager.control.update().done: sleep(0.1)
            #while not master.manager.control.update().done: sleep(0.1)
            robot.setConfig(kb.robot_state.sensed_config)
            #debug_world(world)

            #object_xform = kb.object_xforms[kb.target_object]
            gripper_xform = se3.mul(robot.getLink('left_gripper').getTransform(), baxter.left_gripper_center_xform)
	    print gripper_xform
            grasp = {
                'name': name,
                'grasp_xform': se3.mul(se3.inv(object_xform), gripper_xform),
                'gripper_close_command': [0.8, 0.8, 0.0, 0.1],
                'gripper_open_command': [0.3, 0.3, 0.0, 0.1]
            }

            print json.dumps(grasp, indent=4)

            json.dump(grasp, open(path, 'w'), indent=4)

        elif cmd == 'gripper' and len(args) == 1:
            task = None

            if args[0] == 'parallel':
                task = master.manager.control.execute([
                    ( 'left_gripper', [0.5, 0.5, 0.5, 1], 0, 0),
                    ( 'right_gripper', [0.5, 0.5, 0.5, 1], 0, 0),
Example #27
0
    def move_to_grasp_object(self,object):
        """Sets the robot's configuration so the gripper grasps object at
        one of its potential grasp locations.  Might change self.active_limb
        to the appropriate limb.  Must change self.active_grasp to the
        selected grasp.

        If successful, sends the motion to the low-level controller and
        returns True.
        
        Otherwise, does not modify the low-level controller and returns False.
        """

        collisionTries = 0
        grasps = self.knowledge.grasp_xforms(object)
        qmin,qmax = self.robot.getJointLimits()
        qcmd = self.controller.getCommandedConfig()
        #phase 1: init IK from the commanded config, search among grasps
        for (grasp,gxform) in grasps:
            if self.active_limb == 'left':
                Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
            self.robot.setConfig(qcmd)
            if ik.solve(goal):
                    # check that this solution is self collision free
                    # we call the self collision function of WorldCollider
                    # and see how many items the generator returns. If it is 0
                    # then we don't collide (if it is not zero, we don't care what
                    # they are, we throw the solution away and retry with a different
                    # initial arm configuration).
                    collisionTries += 1
                    collisions = self.collider.robotSelfCollisions(robot = self.robot)
                    numCols = 0
                    for cols in collisions:
                        numCols += 1

                    if numCols == 0:
                        self.controller.setMilestone(self.robot.getConfig())
                        self.active_grasp = grasp
                        print "Found self collision free IK solution after " + str(collisionTries) + " tries"
                        return True

        #Phase 2: that didn't work, now try random sampling
        for i in range(100):
            #pick a config at random
            self.randomize_limb_position(self.active_limb)
            #pick a grasp at random
            (grasp,gxform) = random.choice(grasps)
            if self.active_limb == 'left':
                Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
            if ik.solve(goal):
                #TODO: plan a path

                # check that this solution is self collision free
                # we call the self collision function of WorldCollider
                # and see how many items the generator returns. If it is 0
                # then we don't collide (if it is not zero, we don't care what
                # they are, we throw the solution away and retry with a different
                # initial arm configuration).
                collisionTries += 1
                collisions = self.collider.robotSelfCollisions(robot = self.robot)
                numCols = 0
                for cols in collisions:
                   numCols += 1

                self.controller.setMilestone(self.robot.getConfig())
                if numCols == 0:
                    self.controller.setMilestone(self.robot.getConfig())
                    self.active_grasp = grasp
                    print "Found self collision free IK solution after " + str(collisionTries) + " tries"
                    return True
        return False
Example #28
0
    def display(self):
        #you may run auxiliary openGL calls, if you wish to visually debug

        #draw the world
        self.sim.updateWorld()
        self.simworld.drawGL()

        #if you're doing question 1, this will draw the shelf and floor
        if self.simworld.numTerrains()==0:
            for i in range(self.planworld.numTerrains()):
                self.planworld.terrain(i).drawGL()

        #draw commanded configurations
        glEnable(GL_BLEND)
        glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)
        glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[0,1,0,0.5])
        # only 1 robot in this case, but still use for-loop for generality
        for i in xrange(self.simworld.numRobots()):
            r = self.simworld.robot(i)
            #q = self.sim.controller(i).getCommandedConfig()
            q = self.low_level_controller.getCommandedConfig()
            r.setConfig(q)
            r.drawGL(False)
        glDisable(GL_BLEND)

        #show bin boxes
        if self.draw_bins:
            glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[1,1,0,1])
            for b in apc.bin_bounds.values():
                draw_oriented_box(self.picking_controller.knowledge.shelf_xform,b[0],b[1])
            for b in apc.bin_names:
                c = self.picking_controller.knowledge.bin_front_center(b)
                if c:
                    glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[1,1,0.5,1])
                    r = 0.01
                    gldraw.box([c[0]-r,c[1]-r,c[2]-r],[c[0]+r,c[1]+r,c[2]+r])
                c = self.picking_controller.knowledge.bin_vantage_point(b)
                if c:
                    glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[0.5,1,0.5,1])
                    r = 0.01
                    gldraw.box([c[0]-r,c[1]-r,c[2]-r],[c[0]+r,c[1]+r,c[2]+r])

        #show object state
        for i in ground_truth_items:
            if i.xform == None:
                continue
            if i.bin_name == 'order_bin':
                continue
            #if perceived, draw in solid color
            if self.picking_controller.knowledge.bin_contents[i.bin_name]!=None and i in self.picking_controller.knowledge.bin_contents[i.bin_name]:
                glMaterialfv(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE,[1,0.5,0,1])
                draw_oriented_box(i.xform,i.info.bmin,i.info.bmax)
            else:
                #otherwise, draw in wireframe
                glDisable(GL_LIGHTING)
                glColor3f(1,0.5,0)
                draw_oriented_wire_box(i.xform,i.info.bmin,i.info.bmax)
                glEnable(GL_LIGHTING)
            if self.draw_grasps:
                #draw grasps, if available
                g = self.picking_controller.knowledge.grasp_xforms(i)
                if g:
                    for grasp,xform in g:
                        gldraw.xform_widget(xform,0.05,0.005,fancy=False)

        # Draws the object held on gripper
        obj,limb,grasp = self.picking_controller.held_object,self.picking_controller.active_limb,self.picking_controller.active_grasp
        if obj != None:
            if limb == 'left':
                gripper_xform = self.simworld.robot(0).link(left_gripper_link_name).getTransform()
            else:
                gripper_xform = self.simworld.robot(0).link(right_gripper_link_name).getTransform()
            objxform = se3.mul(gripper_xform,se3.mul(left_gripper_center_xform,se3.inv(grasp.grasp_xform)))
            glDisable(GL_LIGHTING)
            glColor3f(1,1,1)
            draw_oriented_wire_box(objxform,obj.info.bmin,obj.info.bmax)
            glEnable(GL_LIGHTING)

        #show gripper and camera frames
        if self.draw_gripper_and_camera:
            left_camera_link = self.simworld.robot(0).link(left_camera_link_name)
            right_camera_link = self.simworld.robot(0).link(right_camera_link_name)
            left_gripper_link = self.simworld.robot(0).link(left_gripper_link_name)
            right_gripper_link = self.simworld.robot(0).link(right_gripper_link_name)
            gldraw.xform_widget(left_camera_link.getTransform(),0.1,0.01,fancy=False)
            gldraw.xform_widget(right_camera_link.getTransform(),0.1,0.01,fancy=False)
            gldraw.xform_widget(se3.mul(left_gripper_link.getTransform(),left_gripper_center_xform),0.05,0.005,fancy=False)
            gldraw.xform_widget(se3.mul(right_gripper_link.getTransform(),right_gripper_center_xform),0.05,0.005,fancy=False)

        # Show world frame and shelf frame
        gldraw.xform_widget(ground_truth_shelf_xform, 0.1, 0.015, lighting=False, fancy=True)
        gldraw.xform_widget(se3.identity(), 0.2, 0.037, lighting=False, fancy=True)

        #draw order box
        glDisable(GL_LIGHTING)
        glColor3f(1,0,0)
        draw_oriented_wire_box(order_bin_xform,order_bin_bounds[0],order_bin_bounds[1])
        glEnable(GL_LIGHTING)
        return
Example #29
0
    def move_to_grasp_object(self,object):
        """Sets the robot's configuration so the gripper grasps object at
        one of its potential grasp locations.  Might change self.active_limb
        to the appropriate limb.  Must change self.active_grasp to the
        selected grasp.

        If successful, sends the motion to the low-level controller and
        returns True.

        Otherwise, does not modify the low-level controller and returns False.
        """
        self.waitForMove()
        self.controller.commandGripper(self.active_limb,[1])
        grasps = self.knowledge.grasp_xforms(object)
        qmin,qmax = self.robot.getJointLimits()

        #get the end of the commandGripper motion
        qcmd = self.controller.getCommandedConfig()
        self.robot.setConfig(qcmd)
        set_model_gripper_command(self.robot,self.active_limb,[1])
        qcmd = self.robot.getConfig()

        #solve an ik solution to one of the grasps
        grasp_goals = []
        pregrasp_goals = []
        pregrasp_shift = [0,0,0.05]
        for (grasp,gxform) in grasps:
            if self.active_limb == 'left':
                Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
                Tpg = se3.mul(gxform,se3.inv((left_gripper_center_xform[0],vectorops.add(left_gripper_center_xform[1],pregrasp_shift))))
                pregoal = ik.objective(self.left_gripper_link,R=Tpg[0],t=Tpg[1])
            else:
                Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
                Tpg = se3.mul(gxform,se3.inv((right_gripper_center_xform[0],vectorops.add(right_gripper_center_xform[1],pregrasp_shift))))
                pregoal = ik.objective(self.right_gripper_link,R=Tpg[0],t=Tpg[1])
            grasp_goals.append(goal)
            pregrasp_goals.append(pregoal)
        sortedSolutions = self.get_ik_solutions(pregrasp_goals,[self.active_limb]*len(grasp_goals),qcmd)
        if len(sortedSolutions)==0: return False

        #prototyping hack: move straight to target
        if SKIP_PATH_PLANNING:
            for pregrasp in sortedSolutions:
                graspIndex = pregrasp[1]
                gsolns = self.get_ik_solutions([grasp_goals[graspIndex]],[self.active_limb],pregrasp[0],maxResults=1)
                if len(gsolns)==0:
                    print "Couldn't find grasp config for pregrasp? Trying another"
                else:
                    self.waitForMove()
                    self.sendPath([pregrasp[0],gsolns[0][0]])
                    self.active_grasp = grasps[graspIndex][0]
                    return True
            print "Planning failed"
            return False
        for solution in sortedSolutions:
            path = self.planner.plan(qcmd,solution[0])
            graspIndex = solution[1]
            if path != None:
                #now solve for grasp
                gsolns = self.get_ik_solutions([grasp_goals[graspIndex]],[self.active_limb],path[-1],maxResults=1)
                if len(gsolns)==0:
                    print "Couldn't find grasp config??"
                else:
                    self.waitForMove()
                    self.sendPath(path+[gsolns[0][0]])
                    self.active_grasp = grasps[graspIndex][0]
                    return True
        print "Planning failed"
        return False
Example #30
0
    def move_to_grasp_object(self,object):
        """Sets the robot's configuration so the gripper grasps object at
        one of its potential grasp locations.  Might change self.active_limb
        to the appropriate limb.  Must change self.active_grasp to the
        selected grasp.

        If successful, sends the motion to the low-level controller and
        returns True.
        
        Otherwise, does not modify the low-level controller and returns False.
        """
        grasps = self.knowledge.grasp_xforms(object)
        qmin,qmax = self.robot.getJointLimits()
        qcmd = self.controller.getCommandedConfig()


        #phase 1: init IK from the commanded config, search among grasps
        for (grasp,gxform,pregrasp) in grasps:
            if self.active_limb == 'left':
                Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])
            self.robot.setConfig(qcmd)
            if ik.solve(goal):
                if(self.check_arms_for_collisions(True)):
                    #TODO plan a path
                    finalConfig = self.robot.getConfig()

                    if self.active_limb == 'left':
                        path = self.planner.plan(qcmd,finalConfig)
                    else:
                        path = self.planner.plan(qcmd,finalConfig,order=['right','left'])
                            
                    if path == None:
                        continue
                    else:
                        for pp in path:
                            self.controller.setMilestone(pp)
                            self.waitForMove()
                        self.active_grasp = grasp
                        return True

        #Phase 2: that didn't work, now try random sampling
        for i in range(5000):           
            #pick a config at random
            self.randomize_limb_position(self.active_limb)
            #pick a grasp at random
            (grasp,gxform,pregrasp) = random.choice(grasps)
            if self.active_limb == 'left':
                Tg = se3.mul(gxform,se3.inv(left_gripper_center_xform))
                goal = ik.objective(self.left_gripper_link,R=Tg[0],t=Tg[1])
            else:
                Tg = se3.mul(gxform,se3.inv(right_gripper_center_xform))
                goal = ik.objective(self.right_gripper_link,R=Tg[0],t=Tg[1])

            if ik.solve(goal):
                if(self.check_arms_for_collisions(False)):
                    finalConfig = self.robot.getConfig()
                    if self.active_limb == 'left':
                        path = self.planner.plan(qcmd,finalConfig)
                    else:
                        path = self.planner.plan(qcmd,finalConfig,order=['right','left'])
                            
                    if path == None:
                        continue
                    else:
                        for pp in path:
                            self.controller.setMilestone(pp)
                            self.waitForMove()
                        self.active_grasp = grasp
                        return True

            
        return False
Example #31
0
    """
    
    import sys
    import random
    robot_fn = "../Klampt/data/robots/baxter_col.rob"
    world = WorldModel()
    if not world.readFile(robot_fn):
        exit(1)
    robot = world.robot(0)
    camera_obs_link = "head_camera"
    marker_obs_link = "left_gripper"
    lc = robot.link(camera_obs_link)
    lm = robot.link(marker_obs_link)
    pc = robot.link(lc.getParent())
    pm = robot.link(lm.getParent())
    Tc0 = se3.mul(se3.inv(pc.getTransform()),lc.getTransform())
    Tm0 = se3.mul(se3.inv(pm.getTransform()),lm.getTransform())
    print "True camera transform",Tc0
    print "True marker transform",Tm0
    print
    camera_link = pc.getName()
    marker_link = pm.getName()

    #generate synthetic data, corrupted with joint encoder and sensor measurement errors
    qmin,qmax = robot.getJointLimits()
    numObs = 10
    jointEncoderError = 1e-5
    sensorErrorRads = 1e-2
    sensorErrorMeters = 2e-3
    trueCalibrationConfigs = []
    calibrationConfigs = []
Example #32
0
def calibrate_xform_camera(camera_link_transforms,
                           marker_link_transforms,
                           marker_observations,
                           marker_ids,
                           observation_relative_errors=None,
                           camera_initial_guess=None,
                           marker_initial_guess=None,
                           regularizationFactor=0,
                           maxIters=100,
                           tolerance=1e-7):
    """Single camera calibration function for a camera and markers on some
    set of rigid bodies.
    
    Given body transforms and a list of estimated calibration marker observations
    in the camera frame, estimates both the camera transform relative to the
    camera link as well as the marker transforms relative to their links.

    M: is the set of m markers. By default there is at most one marker per link.
       Markers can either be point markers (e.g., a mocap ball), or transform
       markers (e.g., an AR tag or checkerboard pattern).
    O: is the set of n observations, consisting of a reading (Tc_i,Tm_i,o_i,l_i)
       where Tc_i is the camera link's transform, Tm_i is the marker link's
       transform, o_i is the reading which consists of either a point or transform
       estimate in the camera frame, and l_i is the ID of the marker (by default,
       just its link)

    Output: a tuple (err,Tc,marker_dict) where err is the norm of the
    reconstruction residual, Tc is the estimated camera transform relative to the
    camera's link, and marker_dict is a dict mapping each marker id to its
    estimated position or transform on the marker's link.

    Arguments:
    - camera_link: an integer index or a RobotModelLink instance on which
      the camera lies.
    - calibration_configs: a list of the RobotModel configurations q_1,...,q_n
      that generated the marker_observations list.
    - marker_observations: a list of estimated positions or transformations
      of calibration markers o_1,...,o_n, given in the camera's reference
      frame (z forward, x right, y down).

      If o_i is a 3-vector, the marker is considered to be a point marker.
      If a se3 element (R,t) is given, the marker is considered to be
      a transform marker.  You may not mix point and transform observations
      for a single marker ID.
    - marker_ids: a list of marker ID #'s l_1,...,l_n corresponding to
      each observation, e.g., the link index on which each marker lies.
    - observation_relative_errors: if you have an idea of the magnitude of
      each observation error, it can be placed into this list.  Must be
      a list of n floats, 3-lists (point markers), or 6-lists (transform
      markers).  By default, errors will be set proportionally to the
      observed distance between the camera and marker.
    - camera_initial_guess: if not None, an initial guess for the camera transform
    - marker_initial_guess: if not None, a dictionary containing initial guesses
      for the marker transforms
    - regularizationFactor: if nonzero, the optimization penalizes deviation
      of the estimated camera transform and marker transforms from zero
      proportionally to this factor.
    - maxIters: maximum number of iterations for optimization.
    - tolerance: optimization convergence tolerance.  Stops when the change of
      estimates falls below this threshold
    """
    if len(camera_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as camera transforms")
    if len(marker_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as marker transforms")
    if len(marker_observations) != len(marker_ids):
        raise ValueError("Must provide the same number of marker observations as marker transforms")
    #get all unique marker ids
    marker_id_list = list(set(marker_ids))
    #detect marker types
    marker_types = dict((v,None) for v in marker_id_list)
    for i,(obs,id) in enumerate(zip(marker_observations,marker_ids)):
        if len(obs)==3:
            if marker_types[id] == 't':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 'p'
        elif len(obs)==2 and len(obs[0])==9 and len(obs[1])==3:
            if marker_types[id] == 'p':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 't'
        else:
            raise ValueError("Invalid observation for observation #%d, id %s\n"%(i,str(id)))

    n = len(marker_observations)
    m = len(marker_id_list)

    #get all the observation weights
    observation_weights = []
    if observation_relative_errors is None:
        #default weights: proportional to distance
        for obs in marker_observations:
            if len(obs) == 3:
                observation_weights.append(1.0/vectorops.norm(obs))
            else:
                observation_weights.append(1.0/vectorops.norm(obs[1]))
        observation_weights = [1.0]*len(observation_weights)
    else:
        if len(observation_relative_errors) != n:
            raise ValueError("Invalid length of observation errors")
        for err in observation_relative_errors:
            if hasattr(err,'__iter__'):
                observation_weights.append([1.0/v for v in err])
            else:
                observation_weights.append(1.0/err)

    #initial guesses
    if camera_initial_guess == None:
        camera_initial_guess = se3.identity()
        if any(v == 't' for v in marker_types.itervalues()):
            #estimate camera rotation from point estimates because rotations are more prone to initialization failures
            point_observations = []
            marker_point_rel = []
            for i,obs in enumerate(marker_observations):
                if len(obs)==2:
                    point_observations.append(obs[1])
                else:
                    point_observations.append(obs)
                marker_point_rel.append(se3.mul(se3.inv(camera_link_transforms[i]),marker_link_transforms[i])[1])
            camera_initial_guess = (point_fit_rotation_3d(point_observations,marker_point_rel),[0.0]*3)
            print "Estimated camera rotation from points:",camera_initial_guess
    if marker_initial_guess == None:
        marker_initial_guess = dict((l,(se3.identity() if marker_types[l]=='t' else [0.0]*3)) for l in marker_id_list)
    else:
        marker_initial_guess = marker_initial_guess.copy()
        for l in marker_id_list:
            if l not in marker_initial_guess:
                marker_initial_guess[l] = (se3.identity() if marker_types[l]=='t' else [0.0]*3)
    camera_transform = camera_initial_guess
    marker_transforms = marker_initial_guess.copy()

    if DO_VISUALIZATION:
        rgroup = coordinates.addGroup("calibration")
        rgroup.addFrame("camera link",worldCoordinates=camera_link_transforms[-1])
        rgroup.addFrame("marker link",worldCoordinates=marker_link_transforms[-1])
        rgroup.addFrame("camera estimate",parent="camera link",relativeCoordinates=camera_transform)
        rgroup.addFrame("marker estimate",parent="marker link",relativeCoordinates=marker_transforms.values()[0])
        for i,obs in enumerate(marker_observations):
            rgroup.addFrame("obs"+str(i)+" estimate",parent="camera estimate",relativeCoordinates=obs)
        visualization.add("coordinates",rgroup)
        visualization.dialog()

    point_observations_only = all(marker_types[marker] == 'p' for marker in marker_id_list)
    xform_observations_only = all(marker_types[marker] == 't' for marker in marker_id_list)
    if not point_observations_only and not xform_observations_only:
        raise NotImplementedError("Can't calibrate camera from mixed point/transform markers yet")
    for iters in range(maxIters):
        #attempt to minimize the error on the following over all observations i
        #camera_link_transform(q_i)*camera_transform*observation_i = marker_link_transform(l_i,q_i)*marker_transform(l_i)
        #first, we'll assume the camera transform is fixed and then optimize the marker transforms.
        #then, we'll assume the marker transforms are fixed and then optimize the camera transform.
        #finally we'll check the error to detect convergence
        #1. Estimate marker transforms from current camera transform
        new_marker_transforms = dict((l,(TransformStats(zero=marker_initial_guess[l],prior=regularizationFactor) if marker_types[l]=='t' else VectorStats(value,zero=[0.0]*3,prior=RegularizationFactor))) for l in marker_id_list)
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Trel = se3.mul(se3.inv(Tmlink),se3.mul(Tclink,camera_transform))
            if marker_types[marker] == 't':
                estimate = se3.mul(Trel,obs)
            else:
                estimate = se3.apply(Trel,obs)
            new_marker_transforms[marker].add(estimate,observation_weights[i])
        print "ITERATION",iters
        #print "  ESTIMATED MARKER TRANSFORMS:",dict((k,v.average) for (k,v) in new_marker_transforms.iteritems())
        #2. Estimate camera transform from current marker transforms
        new_camera_transform = TransformStats(zero=camera_initial_guess,prior=regularizationFactor)
        if point_observations_only:
            #TODO: weighted point fitting
            relative_points = []
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                pRel = se3.apply(se3.inv(Tclink),se3.apply(Tmlink,new_marker_transforms[marker].average))
                relative_points.append(pRel)
            new_camera_transform.add(point_fit_xform_3d(marker_observations,relative_points),sum(observation_weights))
        else:
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                Trel = se3.mul(se3.inv(Tclink),se3.mul(Tmlink,new_marker_transforms[marker].average))
                estimate = se3.mul(Trel,se3.inv(obs))
                new_camera_transform.add(estimate,observation_weights[i])
        #print "  ESTIMATED CAMERA TRANSFORMS:",new_camera_transform.average
        #3. compute difference between last and current estimates
        diff = 0.0
        diff += vectorops.normSquared(se3.error(camera_transform,new_camera_transform.average))
        for marker in marker_id_list:
            if marker_types[marker]=='t':
                diff += vectorops.normSquared(se3.error(marker_transforms[marker],new_marker_transforms[marker].average))
            else:
                diff += vectorops.distanceSquared(marker_transforms[marker],new_marker_transforms[marker].average)
        camera_transform = new_camera_transform.average
        for marker in marker_id_list:
            marker_transforms[marker] = new_marker_transforms[marker].average
        if math.sqrt(diff) < tolerance:
            #converged!
            print "Converged with diff %g on iteration %d"%(math.sqrt(diff),iters)
            break
        print "  ESTIMATE CHANGE:",math.sqrt(diff)
        error = 0.0
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Tc = se3.mul(Tclink,camera_transform)
            if marker_types[marker] == 't':
                Tm = se3.mul(Tmlink,marker_transforms[marker])
                error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
            else:
                Tm = se3.apply(Tmlink,marker_transforms[marker])
                error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
        print "  OBSERVATION ERROR:",math.sqrt(error)
        #raw_input()
        if DO_VISUALIZATION:
            rgroup.setFrameCoordinates("camera estimate",camera_transform)
            rgroup.setFrameCoordinates("marker estimate",marker_transforms.values()[0])
            for i,obs in enumerate(marker_observations):
                rgroup.setFrameCoordinates("obs"+str(i)+" estimate",obs)
            visualization.add("coordinates",rgroup)
            visualization.dialog()
    if math.sqrt(diff) >= tolerance:
        print "Max iters reached"
    error = 0.0
    for i in xrange(n):
        marker = marker_ids[i]
        Tclink = camera_link_transforms[i]
        Tmlink = marker_link_transforms[i]
        obs = marker_observations[i]
        Tc = se3.mul(Tclink,camera_transform)
        if marker_types[marker] == 't':
            Tm = se3.mul(Tmlink,marker_transforms[marker])
            error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
        else:
            Tm = se3.apply(Tmlink,marker_transforms[marker])
            error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
    return (math.sqrt(error),camera_transform,marker_transforms)
Example #33
0
'''Reference points'''
frwrd = [0,0,-1,0,1,0,1,0,0] # rotation relative to global/baxter fram to point hands/z forward
ext = [0.91,-0.41,1.46]

'''Transforms'''
T_target = [frwrd,ext]
T_EEtoPPU = [[-1,0,0,0,0,1,0,1,0], [0,0,0]]

# '''Correction loop'''
# while True:
# 	T_EE = robot.right_ee.sensedTransform()
# 	R_PPU = so3.mul(T_EEtoPPU[0],T_EE[0])
# 	roundEachAndPrint(R_PPU,3)

while True:
	T_bestEE = se3.mul(se3.inv(T_EEtoPPU),T_target)
	T_EEcmd = T_bestEE
	robot.right_ee.moveTo(T_EEcmd[0],T_EEcmd[1])

# while True:
# 	transform = robot.right_ee.sensedTransform()
# 	position = transform[1]
# 	rotation = transform[0]
# 	roundEachAndPrint(rotation,3)






Example #34
0
def calibrate_xform_camera(camera_link_transforms,
                           marker_link_transforms,
                           marker_observations,
                           marker_ids,
                           observation_relative_errors=None,
                           camera_initial_guess=None,
                           marker_initial_guess=None,
                           regularizationFactor=0,
                           maxIters=100,
                           tolerance=1e-7):
    """Single camera calibration function for a camera and markers on some
    set of rigid bodies.
    
    Given body transforms and a list of estimated calibration marker observations
    in the camera frame, estimates both the camera transform relative to the
    camera link as well as the marker transforms relative to their links.

    M: is the set of m markers. By default there is at most one marker per link.
       Markers can either be point markers (e.g., a mocap ball), or transform
       markers (e.g., an AR tag or checkerboard pattern).
    O: is the set of n observations, consisting of a reading (Tc_i,Tm_i,o_i,l_i)
       where Tc_i is the camera link's transform, Tm_i is the marker link's
       transform, o_i is the reading which consists of either a point or transform
       estimate in the camera frame, and l_i is the ID of the marker (by default,
       just its link)

    Output: a tuple (err,Tc,marker_dict) where err is the norm of the
    reconstruction residual, Tc is the estimated camera transform relative to the
    camera's link, and marker_dict is a dict mapping each marker id to its
    estimated position or transform on the marker's link.

    Arguments:
    - camera_link: an integer index or a RobotModelLink instance on which
      the camera lies.
    - calibration_configs: a list of the RobotModel configurations q_1,...,q_n
      that generated the marker_observations list.
    - marker_observations: a list of estimated positions or transformations
      of calibration markers o_1,...,o_n, given in the camera's reference
      frame (z forward, x right, y down).

      If o_i is a 3-vector, the marker is considered to be a point marker.
      If a se3 element (R,t) is given, the marker is considered to be
      a transform marker.  You may not mix point and transform observations
      for a single marker ID.
    - marker_ids: a list of marker ID #'s l_1,...,l_n corresponding to
      each observation, e.g., the link index on which each marker lies.
    - observation_relative_errors: if you have an idea of the magnitude of
      each observation error, it can be placed into this list.  Must be
      a list of n floats, 3-lists (point markers), or 6-lists (transform
      markers).  By default, errors will be set proportionally to the
      observed distance between the camera and marker.
    - camera_initial_guess: if not None, an initial guess for the camera transform
    - marker_initial_guess: if not None, a dictionary containing initial guesses
      for the marker transforms
    - regularizationFactor: if nonzero, the optimization penalizes deviation
      of the estimated camera transform and marker transforms from zero
      proportionally to this factor.
    - maxIters: maximum number of iterations for optimization.
    - tolerance: optimization convergence tolerance.  Stops when the change of
      estimates falls below this threshold
    """
    if len(camera_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as camera transforms")
    if len(marker_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as marker transforms")
    if len(marker_observations) != len(marker_ids):
        raise ValueError("Must provide the same number of marker observations as marker transforms")
    #get all unique marker ids
    marker_id_list = list(set(marker_ids))
    #detect marker types
    marker_types = dict((v,None) for v in marker_id_list)
    for i,(obs,id) in enumerate(zip(marker_observations,marker_ids)):
        if len(obs)==3:
            if marker_types[id] == 't':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 'p'
        elif len(obs)==2 and len(obs[0])==9 and len(obs[1])==3:
            if marker_types[id] == 'p':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 't'
        else:
            raise ValueError("Invalid observation for observation #%d, id %s\n"%(i,str(id)))

    n = len(marker_observations)
    m = len(marker_id_list)

    #get all the observation weights
    observation_weights = []
    if observation_relative_errors is None:
        #default weights: proportional to distance
        for obs in marker_observations:
            if len(obs) == 3:
                observation_weights.append(1.0/vectorops.norm(obs))
            else:
                observation_weights.append(1.0/vectorops.norm(obs[1]))
        observation_weights = [1.0]*len(observation_weights)
    else:
        if len(observation_relative_errors) != n:
            raise ValueError("Invalid length of observation errors")
        for err in observation_relative_errors:
            if hasattr(err,'__iter__'):
                observation_weights.append([1.0/v for v in err])
            else:
                observation_weights.append(1.0/err)

    #initial guesses
    if camera_initial_guess == None:
        camera_initial_guess = se3.identity()
        if any(v == 't' for v in marker_types.itervalues()):
            #estimate camera rotation from point estimates because rotations are more prone to initialization failures
            point_observations = []
            marker_point_rel = []
            for i,obs in enumerate(marker_observations):
                if len(obs)==2:
                    point_observations.append(obs[1])
                else:
                    point_observations.append(obs)
                marker_point_rel.append(se3.mul(se3.inv(camera_link_transforms[i]),marker_link_transforms[i])[1])
            camera_initial_guess = (point_fit_rotation_3d(point_observations,marker_point_rel),[0.0]*3)
            print "Estimated camera rotation from points:",camera_initial_guess
    if marker_initial_guess == None:
        marker_initial_guess = dict((l,(se3.identity() if marker_types[l]=='t' else [0.0]*3)) for l in marker_id_list)
    else:
        marker_initial_guess = marker_initial_guess.copy()
        for l in marker_id_list:
            if l not in marker_initial_guess:
                marker_initial_guess[l] = (se3.identity() if marker_types[l]=='t' else [0.0]*3)
    camera_transform = camera_initial_guess
    marker_transforms = marker_initial_guess.copy()

    if DO_VISUALIZATION:
        rgroup = coordinates.addGroup("calibration")
        rgroup.addFrame("camera link",worldCoordinates=camera_link_transforms[-1])
        rgroup.addFrame("marker link",worldCoordinates=marker_link_transforms[-1])
        rgroup.addFrame("camera estimate",parent="camera link",relativeCoordinates=camera_transform)
        rgroup.addFrame("marker estimate",parent="marker link",relativeCoordinates=marker_transforms.values()[0])
        for i,obs in enumerate(marker_observations):
            rgroup.addFrame("obs"+str(i)+" estimate",parent="camera estimate",relativeCoordinates=obs)
        vis.add("coordinates",rgroup)
        vis.dialog()

    point_observations_only = all(marker_types[marker] == 'p' for marker in marker_id_list)
    xform_observations_only = all(marker_types[marker] == 't' for marker in marker_id_list)
    if not point_observations_only and not xform_observations_only:
        raise NotImplementedError("Can't calibrate camera from mixed point/transform markers yet")
    for iters in range(maxIters):
        #attempt to minimize the error on the following over all observations i
        #camera_link_transform(q_i)*camera_transform*observation_i = marker_link_transform(l_i,q_i)*marker_transform(l_i)
        #first, we'll assume the camera transform is fixed and then optimize the marker transforms.
        #then, we'll assume the marker transforms are fixed and then optimize the camera transform.
        #finally we'll check the error to detect convergence
        #1. Estimate marker transforms from current camera transform
        new_marker_transforms = dict((l,(TransformStats(zero=marker_initial_guess[l],prior=regularizationFactor) if marker_types[l]=='t' else VectorStats(value,zero=[0.0]*3,prior=RegularizationFactor))) for l in marker_id_list)
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Trel = se3.mul(se3.inv(Tmlink),se3.mul(Tclink,camera_transform))
            if marker_types[marker] == 't':
                estimate = se3.mul(Trel,obs)
            else:
                estimate = se3.apply(Trel,obs)
            new_marker_transforms[marker].add(estimate,observation_weights[i])
        print "ITERATION",iters
        #print "  ESTIMATED MARKER TRANSFORMS:",dict((k,v.average) for (k,v) in new_marker_transforms.iteritems())
        #2. Estimate camera transform from current marker transforms
        new_camera_transform = TransformStats(zero=camera_initial_guess,prior=regularizationFactor)
        if point_observations_only:
            #TODO: weighted point fitting
            relative_points = []
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                pRel = se3.apply(se3.inv(Tclink),se3.apply(Tmlink,new_marker_transforms[marker].average))
                relative_points.append(pRel)
            new_camera_transform.add(point_fit_xform_3d(marker_observations,relative_points),sum(observation_weights))
        else:
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                Trel = se3.mul(se3.inv(Tclink),se3.mul(Tmlink,new_marker_transforms[marker].average))
                estimate = se3.mul(Trel,se3.inv(obs))
                new_camera_transform.add(estimate,observation_weights[i])
        #print "  ESTIMATED CAMERA TRANSFORMS:",new_camera_transform.average
        #3. compute difference between last and current estimates
        diff = 0.0
        diff += vectorops.normSquared(se3.error(camera_transform,new_camera_transform.average))
        for marker in marker_id_list:
            if marker_types[marker]=='t':
                diff += vectorops.normSquared(se3.error(marker_transforms[marker],new_marker_transforms[marker].average))
            else:
                diff += vectorops.distanceSquared(marker_transforms[marker],new_marker_transforms[marker].average)
        camera_transform = new_camera_transform.average
        for marker in marker_id_list:
            marker_transforms[marker] = new_marker_transforms[marker].average
        if math.sqrt(diff) < tolerance:
            #converged!
            print "Converged with diff %g on iteration %d"%(math.sqrt(diff),iters)
            break
        print "  ESTIMATE CHANGE:",math.sqrt(diff)
        error = 0.0
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Tc = se3.mul(Tclink,camera_transform)
            if marker_types[marker] == 't':
                Tm = se3.mul(Tmlink,marker_transforms[marker])
                error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
            else:
                Tm = se3.apply(Tmlink,marker_transforms[marker])
                error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
        print "  OBSERVATION ERROR:",math.sqrt(error)
        #raw_input()
        if DO_VISUALIZATION:
            rgroup.setFrameCoordinates("camera estimate",camera_transform)
            rgroup.setFrameCoordinates("marker estimate",marker_transforms.values()[0])
            for i,obs in enumerate(marker_observations):
                rgroup.setFrameCoordinates("obs"+str(i)+" estimate",obs)
            vis.add("coordinates",rgroup)
            vis.dialog()
    if math.sqrt(diff) >= tolerance:
        print "Max iters reached"
    error = 0.0
    for i in xrange(n):
        marker = marker_ids[i]
        Tclink = camera_link_transforms[i]
        Tmlink = marker_link_transforms[i]
        obs = marker_observations[i]
        Tc = se3.mul(Tclink,camera_transform)
        if marker_types[marker] == 't':
            Tm = se3.mul(Tmlink,marker_transforms[marker])
            error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
        else:
            Tm = se3.apply(Tmlink,marker_transforms[marker])
            error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
    return (math.sqrt(error),camera_transform,marker_transforms)
Example #35
0
    """
    
    import sys
    import random
    robot_fn = "../Klampt/data/robots/baxter_col.rob"
    world = WorldModel()
    if not world.readFile(robot_fn):
        exit(1)
    robot = world.robot(0)
    camera_obs_link = "head_camera"
    marker_obs_link = "left_gripper"
    lc = robot.link(camera_obs_link)
    lm = robot.link(marker_obs_link)
    pc = robot.link(lc.getParent())
    pm = robot.link(lm.getParent())
    Tc0 = se3.mul(se3.inv(pc.getTransform()),lc.getTransform())
    Tm0 = se3.mul(se3.inv(pm.getTransform()),lm.getTransform())
    print "True camera transform",Tc0
    print "True marker transform",Tm0
    print
    camera_link = pc.getName()
    marker_link = pm.getName()

    #generate synthetic data, corrupted with joint encoder and sensor measurement errors
    qmin,qmax = robot.getJointLimits()
    numObs = 10
    jointEncoderError = 1e-5
    sensorErrorRads = 1e-2
    sensorErrorMeters = 2e-3
    trueCalibrationConfigs = []
    calibrationConfigs = []