Exemplo n.º 1
0
    def getCommandVelocity(self, q, dq, dt):
        """Gets the command velocity from the current state of the
		robot.
		"""
        eP = self.getSensedError(q)
        #vcmd = hP*eP + hD*eV + hI*eI
        vP = gen_mul(self.hP, eP)
        vcmd = vP
        #D term
        vcur = self.getSensedVelocity(q, dq, dt)
        eD = None
        if vcur != None:
            eD = vectorops.sub(vcur, self.dxdes)
            vD = gen_mul(self.hD, eD)
            vcmd = vectorops.add(vcmd, vD)
        #I term
        if self.eI != None:
            vI = gen_mul(self.hI, self.eI)
            vcmd = vectorops.add(vcmd, vI)
        #print "task",self.name,"error P=",eP,"D=",eD,"E=",self.eI
        #do acceleration limiting
        if vcur != None:
            dvcmd = vectorops.div(vectorops.sub(vcmd, vcur), dt)
            dvnorm = vectorops.norm(dvcmd)
            if dvnorm > self.dvcmdmax:
                vcmd = vectorops.madd(vcur, dvcmd, self.dvcmdmax / dvnorm * dt)
                print self.name, "acceleration limited by factor", self.dvcmdmax / dvnorm * dt, "to", vcmd
        #do velocity limiting
        vnorm = vectorops.norm(vcmd)
        if vnorm > self.vcmdmax:
            vcmd = vectorops.mul(vcmd, self.vcmdmax / vnorm)
            print self.name, "velocity limited by factor", self.vcmdmax / vnorm, "to", vcmd
        return vcmd
def angle(a, b):
    anorm = vectorops.norm(a)
    bnorm = vectorops.norm(b)
    if anorm < 1e-6 or bnorm < 1e-6: return 0
    dp = vectorops.dot(vectorops.div(a, anorm), vectorops.div(b, bnorm))
    dp = max(min(dp, 1), -1)
    return math.acos(dp)
Exemplo n.º 3
0
	def getCommandVelocity(self, q, dq, dt):
		"""Gets the command velocity from the current state of the
		robot.
		"""
		eP = self.getSensedError(q)
		#vcmd = hP*eP + hD*eV + hI*eI
		vP = gen_mul(self.hP,eP)
		vcmd = vP
		#D term
		vcur = self.getSensedVelocity(q,dq,dt)
		eD = None
		if vcur != None:
			eD = vectorops.sub(vcur, self.dxdes)
			vD = gen_mul(self.hD,eD)
			vcmd = vectorops.add(vcmd, vD)
		#I term
		if self.eI != None:
			vI = gen_mul(self.hI,self.eI)
			vcmd = vectorops.add(vcmd, vI)
		#print "task",self.name,"error P=",eP,"D=",eD,"E=",self.eI
		#do acceleration limiting
		if vcur != None:
			dvcmd = vectorops.div(vectorops.sub(vcmd,vcur),dt)
			dvnorm = vectorops.norm(dvcmd)
			if dvnorm > self.dvcmdmax:
				vcmd = vectorops.madd(vcur,dvcmd,self.dvcmdmax/dvnorm*dt)
				print self.name,"acceleration limited by factor",self.dvcmdmax/dvnorm*dt,"to",vcmd
		#do velocity limiting
		vnorm = vectorops.norm(vcmd)
		if vnorm > self.vcmdmax:
			vcmd = vectorops.mul(vcmd,self.vcmdmax/vnorm)
			print self.name,"velocity limited by factor",self.vcmdmax/vnorm,"to",vcmd
		return vcmd
    def right_arm_ik_near_seed(self, right_target, ignore_elbow_up_constraint=True):
        """Solves IK to move the right arm to the specified
            right_target ([x, y, z] in world space)
        """
        self.load_real_robot_state()
        self.set_model_right_arm(eval("Q_IK_SEED_" + self.current_bin))

        oldRSquared = -1
        q_ik = None

        qmin, qmax = self.robotModel.getJointLimits()
        for i in range(1000):
            point2_local = vectorops.add(VACUUM_POINT_XFORM[1], [0.5, 0, 0])
            point2_world = vectorops.add(right_target, [0, 0, -0.5])
            goal1 = ik.objective(self.robotModel.link("right_wrist"), local=VACUUM_POINT_XFORM[1], world=right_target)
            goal2 = ik.objective(self.robotModel.link("right_wrist"), local=point2_local, world=point2_world)
            if ik.solve([goal1, goal2], tol=0.0001) and (self.elbow_up() or ignore_elbow_up_constraint):
                q_vals = [self.robotModel.getConfig()[v] for v in self.right_arm_indices]
                rSquared = vectorops.norm(vectorops.sub(q_vals, eval("Q_IK_SEED_" + self.current_bin)))
                if oldRSquared < 0 or oldRSquared > rSquared:
                    oldRSquared = rSquared
                    q_ik = q_vals
        print FAIL_COLOR + "right_arm_ik failed for " + str(right_target) + END_COLOR
        if not (self.elbow_up() or ignore_elbow_up_constraint):
            print FAIL_COLOR + str([self.robotModel.getConfig()[v] for v in self.right_arm_indices]) + END_COLOR
            print FAIL_COLOR + "IK found but elbow wasn't up" + END_COLOR

        if oldRSquared >= 0:
            self.set_model_right_arm(q_ik)
            return True
        return False
Exemplo n.º 5
0
def solve_2R_inverse_kinematics(x,y,L1=1,L2=1):
    """For a 2R arm centered at the origin, solves for the joint angles
    (q1,q2) that places the end effector at (x,y).

    The result is a list of up to 2 solutions, e.g. [(q1,q2),(q1',q2')].
    """
    D = vectorops.norm((x,y))
    thetades = math.atan2(y,x)
    if D == 0:
        raise ValueError("(x,y) at origin, infinite # of solutions")
    c2 = (D**2-L1**2-L2**2)/(2.0*L1*L2)
    q2s = []
    if c2 < -1:
        print "solve_2R_inverse_kinematics: (x,y) inside inner circle"
        return []
    elif c2 > 1:
        print "solve_2R_inverse_kinematics: (x,y) out of reach"
        return []
    else:
        if c2 == 1:
            q2s = [math.acos(c2)]
        else:
            q2s = [math.acos(c2),-math.acos(c2)]
    res = []
    for q2 in q2s:
        thetaactual = math.atan2(math.sin(q2),L1+L2*math.cos(q2))
        q1 = thetades - thetaactual
        res.append((q1,q2))
    return res
Exemplo n.º 6
0
    def _solveIK(self, coord):
        """
        Returns the degrees for the pan and tilt motor and the inches for the linear actuator needed
        to reach the point [x,y,z].

        Args:
            coord - cartesian coordinates [x,y,z] in PPU base frame in inches
        Returns:
            PPU configuration [q1,q2,q3] to reach the x,y,z coordinates, in degrees and inches where
                q1 is degree angle of pan (base) motor
                q2 is degree angle of tilt motor
                q3 is extension of linear actuator in inches
        """
        xw, yw, zw = coord
        y = yw
        x = xw + PPU.xb
        z = zw - PPU.hb
        q3 = vectorops.norm((x, y, z))
        q2 = math.degrees(math.asin(-z / q3))
        if q2 == 90 or q2 == -90:
            q1 = 0  #infinite solutions
        else:
            q1 = math.degrees(math.atan2(y, x))
        if (q1 < PPU.q1LL) or (q1 > PPU.q1UL):
            raise ValueError("q1 is %f, which is not within IK joint limits." %
                             q1)
        elif (q2 < PPU.q2LL) or (q2 > PPU.q2UL):
            raise ValueError("q2 is %f, which is not within IK joint limits." %
                             q2)
        elif (q3 < PPU.q3LL) or (q3 > PPU.q3UL):
            raise ValueError("q3 is %f, which is not within IK joint limits." %
                             q3)
        else:
            return [q1, q2, q3]
Exemplo n.º 7
0
def solve_2R_inverse_kinematics(x, y, L1=1, L2=1):
    """For a 2R arm centered at the origin, solves for the joint angles
    (q1,q2) that places the end effector at (x,y).

    The result is a list of up to 2 solutions, e.g. [(q1,q2),(q1',q2')].
    """
    D = vectorops.norm((x, y))
    thetades = math.atan2(y, x)
    if D == 0:
        raise ValueError("(x,y) at origin, infinite # of solutions")
    c2 = (D**2 - L1**2 - L2**2) / (2.0 * L1 * L2)
    q2s = []
    if c2 < -1:
        print "solve_2R_inverse_kinematics: (x,y) inside inner circle"
        return []
    elif c2 > 1:
        print "solve_2R_inverse_kinematics: (x,y) out of reach"
        return []
    else:
        if c2 == 1:
            q2s = [math.acos(c2)]
        else:
            q2s = [math.acos(c2), -math.acos(c2)]
    res = []
    for q2 in q2s:
        thetaactual = math.atan2(math.sin(q2), L1 + L2 * math.cos(q2))
        q1 = thetades - thetaactual
        res.append((q1, q2))
    return res
Exemplo n.º 8
0
	def printStatus(self,q):
		"""Prints a status printout summarizing all tasks' errors."""
		priorities = set()
		names = dict()
		errors = dict()
		totalerrors = dict()
		for t in self.taskList:
			if t.weight==0: continue
			priorities.add(t.level)
			s = t.name
			if len(s) > 8:
				s = s[0:8]
			err = t.getSensedError(q)
			names.setdefault(t.level,[]).append(s)
			errors.setdefault(t.level,[]).append("%.3f"%(vectorops.norm(err)),)
			werrsq = vectorops.normSquared(vectorops.mul(err,t.weight))
			totalerrors[t.level] = totalerrors.get(t.level,0.0) + werrsq
		cols = 5
		colwidth = 10
		for p in priorities:
			print "Priority",p,"weighted error^2",totalerrors[p]
			pnames = names[p]
			perrs = errors[p]
			start = 0
			while start < len(pnames):
				last = min(start+cols,len(pnames))
				print "  Name:  ",
				for i in range(start,last):
					print pnames[i],' '*(colwidth-len(pnames[i])),
				print
				print "  Error: ",
				for i in range(start,last):
					print perrs[i],' '*(colwidth-len(perrs[i])),
				print
				start=last
Exemplo n.º 9
0
	def printStatus(self,q):
		"""Prints a status printout summarizing all tasks' errors."""
		priorities = set()
		names = dict()
		errors = dict()
		totalerrors = dict()
		for t in self.taskList:
			if t.weight==0: continue
			priorities.add(t.level)
			s = t.name
			if len(s) > 8:
				s = s[0:8]
			err = t.getSensedError(q)
			names.setdefault(t.level,[]).append(s)
			errors.setdefault(t.level,[]).append("%.3f"%(vectorops.norm(err)),)
			werrsq = vectorops.normSquared(vectorops.mul(err,t.weight))
			totalerrors[t.level] = totalerrors.get(t.level,0.0) + werrsq
		cols = 5
		colwidth = 10
		for p in priorities:
			print "Priority",p,"weighted error^2",totalerrors[p]
			pnames = names[p]
			perrs = errors[p]
			start = 0
			while start < len(pnames):
				last = min(start+cols,len(pnames))
				print "  Name:  ",
				for i in range(start,last):
					print pnames[i],' '*(colwidth-len(pnames[i])),
				print
				print "  Error: ",
				for i in range(start,last):
					print perrs[i],' '*(colwidth-len(perrs[i])),
				print
				start=last
Exemplo n.º 10
0
 def insideGeometry(self, point, g, upperThreshold, lowerThreshold):
     #t1 = time.time()
     #print "using rayCast"
     direction = vectorops.unit(point)
     #print g.getBB()[0][0]
     (hit, pt) = g.rayCast((0, 0, 0), direction)
     depthPoint = vectorops.norm(point)
     depthHit = vectorops.norm(pt)
     diff = depthPoint - depthHit
     #print "rayCast done in: ",time.time()-t1
     #self.count2 = self.count2 + 1
     if hit:
         #print "hit!!!!!!"
         self.count2 = self.count2 + 1
         if diff < upperThreshold and diff > lowerThreshold:  # If point lies inside robot body
             return True
     return False
Exemplo n.º 11
0
 def score(self,robot):
     """Returns an error score that is equal to the optimum at a feasible
     solution. Evaluated at the robot's current configuration."""
     for obj in self.objectives:
         obj.robot = robot
     solver = ik.solver(self.objectives)
     c = (0 if self.costFunction is None else self.costFunction(robot.getConfig()))
     return c+vectorops.norm(solver.getResidual())
Exemplo n.º 12
0
def interpolate_rotation(R1,R2,u):
    """Interpolate linearly between the two rotations R1 and R2.
    TODO: the current implementation doesn't work properly.  Why? """
    m1 = so3.moment(R1)
    m2 = so3.moment(R2)
    mu = interpolate_linear(m1,m2,u)
    angle = vectorops.norm(mu)
    axis = vectorops.div(mu,angle)
    return so3.rotation(axis,angle)
Exemplo n.º 13
0
def interpolate_rotation(R1, R2, u):
    """Interpolate linearly between the two rotations R1 and R2.
    TODO: the current implementation doesn't work properly.  Why? """
    m1 = so3.moment(R1)
    m2 = so3.moment(R2)
    mu = interpolate_linear(m1, m2, u)
    angle = vectorops.norm(mu)
    axis = vectorops.div(mu, angle)
    return so3.rotation(axis, angle)
    def robotSelfFilter(self, points, Rinv, tinv):
        """ Get rid of points that belong to the robot's arms"""
        robot = self.world.robot(0)
        arm_link_names = self.left_arm_link_names + self.right_arm_link_names
        bbMargin = 0.1
        #upperThreshold = 0.1 # Need to tune
        #lowerThreshold = -0.1 # Need to tune
        armBBs = []
        transformedG = []
        maxArmDepth = 0
        for i in arm_link_names:
            currentT = robot.link(i).getTransform()
            (newR, newt) = se3.mul((Rinv, tinv), currentT)
            g = robot.link(i).geometry()
            g.setCurrentTransform(newR, newt)
            transformedG.append(g)
            myBB = g.getBB()
            if myBB[1][2] > maxArmDepth:
                maxArmDepth = myBB[1][2]
            expandedBB = ((myBB[0][0] - bbMargin, myBB[0][1] - bbMargin,
                           myBB[0][2] - bbMargin),
                          (myBB[1][0] + bbMargin, myBB[1][1] + bbMargin,
                           myBB[1][2] + bbMargin))
            armBBs.append(expandedBB)
        allIndices = self.indicesOfInterest(armBBs)
        count = 0
        self.count2 = 0
        for j, indices in enumerate(allIndices):
            if indices == None:
                continue
            for i in indices:

                p = points[i]
                if p == None:
                    continue
                x = p[0]
                y = p[1]
                z = p[2]

                if vectorops.norm((x, y, z)) > maxArmDepth + 0.3:
                    continue
                points[i] = None
                continue
                #point = (x,y,z)
                #if(z<1.2) and self.insideGeometry(point,transformedG[j],upperThreshold,lowerThreshold):
                #    count = count + 1
                #    points[i] = None
        return points
Exemplo n.º 15
0
	def advance(self, q, dq, dt):
		""" Updates internal state: accumulates iterm and updates x_last
		"""
		if self.weight > 0:
			eP = self.getSensedError(q)
			# update iterm
			if self.eI == None:
				self.eI = vectorops.mul(eP,dt)
			else:
				self.eI = vectorops.madd(self.eI, eP, dt)
			einorm = vectorops.norm(self.eI)
			if einorm > self.eImax:
				self.eI = vectorops.mul(self.eI,self.eImax/einorm)

		#update qLast
		self.qLast = q
		return
Exemplo n.º 16
0
    def advance(self, q, dq, dt):
        """ Updates internal state: accumulates iterm and updates x_last
		"""
        if self.weight > 0:
            eP = self.getSensedError(q)
            # update iterm
            if self.eI == None:
                self.eI = vectorops.mul(eP, dt)
            else:
                self.eI = vectorops.madd(self.eI, eP, dt)
            einorm = vectorops.norm(self.eI)
            if einorm > self.eImax:
                self.eI = vectorops.mul(self.eI, self.eImax / einorm)

        #update qLast
        self.qLast = q
        return
Exemplo n.º 17
0
    def voxelgridfilter(self, points, gridX, gridY, gridZ, num):
        #filtered = []
        dictionary = {}
        for i, p in enumerate(points):

            x = p[0]
            y = p[1]
            z = p[2]

            #xangle = math.degrees(math.atan2(x,z))+35.3
            #yangle = math.degrees(math.atan2(y,z)) + 30
            #if xangle>self.xmax:
            # 	self.xmax = xangle
            #if xangle<self.xmin:
            #	self.xmin = xangle
            #if yangle>self.ymax:
            #	self.ymax = yangle
            #if yangle<self.ymin:
            #	self.ymin = yangle

            if math.isnan(x) or vectorops.norm((x, y, z)) < 0.31:
                points[i] = None
                continue
            xindex = int(math.floor(x / gridX))
            yindex = int(math.floor(y / gridY))
            zindex = int(math.floor(z / gridZ))
            key = (xindex, yindex, zindex)
            if key not in dictionary:
                dictionary[key] = []

            dictionary[key].append(i)
        for key, value in dictionary.iteritems():
            #print value
            if len(value) < num:
                for j in value:
                    points[j] = None
                    #print points[j]
        return points
    def voxelgridfilter(self, points, gridX, gridY, gridZ, num):
        """ a simple voxelgrid filter to get rid of noise in point cloud data"""
        dictionary = {}
        for i, p in enumerate(points):
            x = p[0]
            y = p[1]
            z = p[2]

            if math.isnan(x) or vectorops.norm((x, y, z)) < 0.31:
                points[i] = None
                continue
            xindex = int(math.floor(x / gridX))
            yindex = int(math.floor(y / gridY))
            zindex = int(math.floor(z / gridZ))
            key = (xindex, yindex, zindex)
            if key not in dictionary:
                dictionary[key] = []
            dictionary[key].append(i)
        for key, value in dictionary.iteritems():
            if len(value) < num:
                for j in value:
                    points[j] = None
        return points
    def callback(self, data):
        if self.newPC == None:
            data_out = pc2.read_points(data, skip_nans=True)
            #data_out = pc2.read_points(data)
            pc = PointCloud()
            pc.propertyNames.append('rgba')
            t0 = time.time()
            points = list(data_out)

            #filtered = self.radiusfilter(points,0.1,0)
            filtered = self.voxelgridfilter(points, 0.1, 0.1, 0.1, 10)
            print "Noise cleaned up in time", time.time() - t0
            #filtered = points
            for p in filtered:
                coordinates = (p[0], p[1], p[2])
                if vectorops.norm(coordinates) > 0.4:
                    index = pc.addPoint(coordinates)
                    i = struct.unpack("i", struct.pack("f", p[3]))[0]
                    pc.setProperty(index, 0, i)
            #print "Point cloud copied to Klampt format in time",time.time()-t0

            q = (0.566222, 0.423455, 0.424871, 0.5653)
            R1 = so3.from_quaternion(q)
            t1 = (0.228665, 0.0591513, 0.0977748)
            T1 = (R1, t1)
            R2 = so3.rotation((0, 0, 1), math.pi / 2)
            t2 = (0, 0, 1)
            T2 = (R2, t2)
            T = se3.mul(T2, T1)
            (R, t) = T
            pc.transform(R, t)
            #print "Point cloud transformed in time",time.time()-t0
            self.newPC = pc
        else:

            print "Not ready yet to receive new point cloud..."
Exemplo n.º 20
0
        elif cmd == 'find':
            print master._find_object()

        elif cmd == 'grasp':
            print master._grasp_object()

        elif cmd == 'retrieve':
            print master._retrieve_object()

        elif cmd == 'move_cartesian' and len(args) == 4:
            task = None
            limb = args[0]
            amount = [float(args[1]),float(args[2]),float(args[3])]
            maxspeed = 0.01
            command = {'limb':limb,'velocity':vectorops.div(amount,vectorops.norm(amount)/maxspeed),'duration':vectorops.norm(amount)/maxspeed}
            print "Trying cartesian_drive low level command"
            task = master.manager.control.execute([
                ( 'cartesian_drive', command, 0, 0),
            ])

            while task and not task.done:
                sleep(0.1)

        elif cmd == 'gripper' and len(args) == 1:
            task = None

            if args[0] == 'parallel':
                task = master.manager.control.execute([
                    ( 'left_gripper', [0.5, 0.5, 0.5, 1], 0, 0),
                    ( 'right_gripper', [0.5, 0.5, 0.5, 1], 0, 0),
Exemplo n.º 21
0
    def robotSelfFilter(self, points, Rinv, tinv):
        #t1 = time.time()
        #filtered = []
        robot = self.world.robot(0)
        #camPos = (0.24539, 0.0893425, 0.00112145) # Not needed anymore in camera frame
        arm_link_names = self.left_arm_link_names + self.right_arm_link_names
        bbMargin = 0.1
        upperThreshold = 0.1  # Need to tune
        lowerThreshold = -0.1  # Need to tune
        armBBs = []
        transformedG = []
        #testPoint = (0.26839444608864566, -0.6676082722078356, 1.2409759988857147)
        #testPoint = (0.6351629925287235, -0.8301655827368236, 1.2909760000026105)
        #print se3.apply((Rinv, tinv), testPoint)
        maxArmDepth = 0
        for i in arm_link_names:
            #robot.link(i).appearance().setColor(0,0,1,1)
            currentT = robot.link(i).getTransform()
            #pt = se3.apply(currentT, (0,0,0.1))
            #print se3.apply((Rinv, tinv), pt)
            #print currentT
            (newR, newt) = se3.mul((Rinv, tinv), currentT)
            #print se3.apply((newR,newt), (0,0,0.1))
            g = robot.link(i).geometry()
            g.setCurrentTransform(newR, newt)
            #g.transform(Rinv, tinv)
            transformedG.append(g)
            myBB = g.getBB()
            if myBB[1][2] > maxArmDepth:
                maxArmDepth = myBB[1][2]
            expandedBB = ((myBB[0][0] - bbMargin, myBB[0][1] - bbMargin,
                           myBB[0][2] - bbMargin),
                          (myBB[1][0] + bbMargin, myBB[1][1] + bbMargin,
                           myBB[1][2] + bbMargin))
            armBBs.append(expandedBB)
        #robot.link(arm_link_names[10]).appearance().setColor(0,0,1,1)
        #print armBBs[10]
        #print "Arm geometry transformed in time: ",time.time()-t1
        allIndices = self.indicesOfInterest(armBBs)

        #for i, p in enumerate(points):
        #print points[15000]
        count = 0
        self.count2 = 0
        for j, indices in enumerate(allIndices):
            #print indices[len(indices)-1]

            if indices == None:
                continue
            #print len(indices)
            for i in indices:

                #print points[i][3]
                #if points[i] !=None:

                #	newpoint = (points[i][0], points[i][1], points[i][2], -1.7014118346e+38)

                #	points[i] = newpoint

                #continue

                p = points[i]
                if p == None:
                    continue
                x = p[0]
                y = p[1]
                z = p[2]

                if vectorops.norm((x, y, z)) > maxArmDepth + 0.3:
                    continue
                points[i] = None
                continue

                point = (x, y, z)
                #discard = False
                #for j,bb in enumerate(armBBs):
                #if(z<1.2):

                #g = robot.link(j).geometry()
                #g.transform(Rinv, tinv)    # geometry transformed to camera frame
                #if(self.insideGeometryBB(point,bb,bbMargin)):
                #    if(self.insideGeometry(point,transformedG[j],upperThreshold,lowerThreshold)):
                #        discard = True
                #break
                #pass
                #if discard:
                #    points[i] = None
                #print "**********************"
                #     count2 = count2+1
                if (z < 1.2) and self.insideGeometry(point, transformedG[j],
                                                     upperThreshold,
                                                     lowerThreshold):
                    #print "detected"
                    count = count + 1
                    points[i] = None
        print "detected: ", count
        print "hits: ", self.count2
        return points
Exemplo n.º 22
0
        rgroup.addFrame("marker (ground truth)",parent="marker link",relativeCoordinates=Tm0)
        for i,(obs,obs0) in enumerate(zip(observations,trueObservations)):
            rgroup.addFrame("obs"+str(i)+" (ground truth)",parent="camera (ground truth)",relativeCoordinates=obs0)
            rgroup.addFrame("obs"+str(i)+" (from camera)",parent="camera (ground truth)",relativeCoordinates=obs)
        visualization.add("world",world)
        for i,q in enumerate(calibrationConfigs):
            visualization.add("config"+str(i),q)
            app = lc.appearance().clone()
            app.setColor(0.5,0.5,0.5,0.1)
            visualization.setAppearance("config"+str(i),app)
        visualization.add("simulated coordinates",rgroup)
        visualization.dialog()

    res = calibrate_robot_camera(robot,camera_link,
                                 calibrationConfigs,
                                 observations,
                                 [marker_link]*len(calibrationConfigs))
    print 
    print "Per-observation reconstruction error:",res[0]/numObs
    print "Estimated camera transform:",res[1]
    print "  total error:",vectorops.norm(se3.error(res[1],Tc0))
    print "  rotation errors:",se3.error(res[1],Tc0)[:3]
    print "  translation errors:",se3.error(res[1],Tc0)[3:]
    print "Estimated marker transform:",res[2][marker_link]
    print "  error:",vectorops.norm(se3.error(res[2][marker_link],Tm0))
    print "  rotation errors:",se3.error(res[2][marker_link],Tm0)[:3]
    print "  translation errors:",se3.error(res[2][marker_link],Tm0)[3:]

    visualization.kill()
    
Exemplo n.º 23
0
def calibrate_xform_camera(camera_link_transforms,
                           marker_link_transforms,
                           marker_observations,
                           marker_ids,
                           observation_relative_errors=None,
                           camera_initial_guess=None,
                           marker_initial_guess=None,
                           regularizationFactor=0,
                           maxIters=100,
                           tolerance=1e-7):
    """Single camera calibration function for a camera and markers on some
    set of rigid bodies.
    
    Given body transforms and a list of estimated calibration marker observations
    in the camera frame, estimates both the camera transform relative to the
    camera link as well as the marker transforms relative to their links.

    M: is the set of m markers. By default there is at most one marker per link.
       Markers can either be point markers (e.g., a mocap ball), or transform
       markers (e.g., an AR tag or checkerboard pattern).
    O: is the set of n observations, consisting of a reading (Tc_i,Tm_i,o_i,l_i)
       where Tc_i is the camera link's transform, Tm_i is the marker link's
       transform, o_i is the reading which consists of either a point or transform
       estimate in the camera frame, and l_i is the ID of the marker (by default,
       just its link)

    Output: a tuple (err,Tc,marker_dict) where err is the norm of the
    reconstruction residual, Tc is the estimated camera transform relative to the
    camera's link, and marker_dict is a dict mapping each marker id to its
    estimated position or transform on the marker's link.

    Arguments:
    - camera_link: an integer index or a RobotModelLink instance on which
      the camera lies.
    - calibration_configs: a list of the RobotModel configurations q_1,...,q_n
      that generated the marker_observations list.
    - marker_observations: a list of estimated positions or transformations
      of calibration markers o_1,...,o_n, given in the camera's reference
      frame (z forward, x right, y down).

      If o_i is a 3-vector, the marker is considered to be a point marker.
      If a se3 element (R,t) is given, the marker is considered to be
      a transform marker.  You may not mix point and transform observations
      for a single marker ID.
    - marker_ids: a list of marker ID #'s l_1,...,l_n corresponding to
      each observation, e.g., the link index on which each marker lies.
    - observation_relative_errors: if you have an idea of the magnitude of
      each observation error, it can be placed into this list.  Must be
      a list of n floats, 3-lists (point markers), or 6-lists (transform
      markers).  By default, errors will be set proportionally to the
      observed distance between the camera and marker.
    - camera_initial_guess: if not None, an initial guess for the camera transform
    - marker_initial_guess: if not None, a dictionary containing initial guesses
      for the marker transforms
    - regularizationFactor: if nonzero, the optimization penalizes deviation
      of the estimated camera transform and marker transforms from zero
      proportionally to this factor.
    - maxIters: maximum number of iterations for optimization.
    - tolerance: optimization convergence tolerance.  Stops when the change of
      estimates falls below this threshold
    """
    if len(camera_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as camera transforms")
    if len(marker_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as marker transforms")
    if len(marker_observations) != len(marker_ids):
        raise ValueError("Must provide the same number of marker observations as marker transforms")
    #get all unique marker ids
    marker_id_list = list(set(marker_ids))
    #detect marker types
    marker_types = dict((v,None) for v in marker_id_list)
    for i,(obs,id) in enumerate(zip(marker_observations,marker_ids)):
        if len(obs)==3:
            if marker_types[id] == 't':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 'p'
        elif len(obs)==2 and len(obs[0])==9 and len(obs[1])==3:
            if marker_types[id] == 'p':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 't'
        else:
            raise ValueError("Invalid observation for observation #%d, id %s\n"%(i,str(id)))

    n = len(marker_observations)
    m = len(marker_id_list)

    #get all the observation weights
    observation_weights = []
    if observation_relative_errors is None:
        #default weights: proportional to distance
        for obs in marker_observations:
            if len(obs) == 3:
                observation_weights.append(1.0/vectorops.norm(obs))
            else:
                observation_weights.append(1.0/vectorops.norm(obs[1]))
        observation_weights = [1.0]*len(observation_weights)
    else:
        if len(observation_relative_errors) != n:
            raise ValueError("Invalid length of observation errors")
        for err in observation_relative_errors:
            if hasattr(err,'__iter__'):
                observation_weights.append([1.0/v for v in err])
            else:
                observation_weights.append(1.0/err)

    #initial guesses
    if camera_initial_guess == None:
        camera_initial_guess = se3.identity()
        if any(v == 't' for v in marker_types.itervalues()):
            #estimate camera rotation from point estimates because rotations are more prone to initialization failures
            point_observations = []
            marker_point_rel = []
            for i,obs in enumerate(marker_observations):
                if len(obs)==2:
                    point_observations.append(obs[1])
                else:
                    point_observations.append(obs)
                marker_point_rel.append(se3.mul(se3.inv(camera_link_transforms[i]),marker_link_transforms[i])[1])
            camera_initial_guess = (point_fit_rotation_3d(point_observations,marker_point_rel),[0.0]*3)
            print "Estimated camera rotation from points:",camera_initial_guess
    if marker_initial_guess == None:
        marker_initial_guess = dict((l,(se3.identity() if marker_types[l]=='t' else [0.0]*3)) for l in marker_id_list)
    else:
        marker_initial_guess = marker_initial_guess.copy()
        for l in marker_id_list:
            if l not in marker_initial_guess:
                marker_initial_guess[l] = (se3.identity() if marker_types[l]=='t' else [0.0]*3)
    camera_transform = camera_initial_guess
    marker_transforms = marker_initial_guess.copy()

    if DO_VISUALIZATION:
        rgroup = coordinates.addGroup("calibration")
        rgroup.addFrame("camera link",worldCoordinates=camera_link_transforms[-1])
        rgroup.addFrame("marker link",worldCoordinates=marker_link_transforms[-1])
        rgroup.addFrame("camera estimate",parent="camera link",relativeCoordinates=camera_transform)
        rgroup.addFrame("marker estimate",parent="marker link",relativeCoordinates=marker_transforms.values()[0])
        for i,obs in enumerate(marker_observations):
            rgroup.addFrame("obs"+str(i)+" estimate",parent="camera estimate",relativeCoordinates=obs)
        visualization.add("coordinates",rgroup)
        visualization.dialog()

    point_observations_only = all(marker_types[marker] == 'p' for marker in marker_id_list)
    xform_observations_only = all(marker_types[marker] == 't' for marker in marker_id_list)
    if not point_observations_only and not xform_observations_only:
        raise NotImplementedError("Can't calibrate camera from mixed point/transform markers yet")
    for iters in range(maxIters):
        #attempt to minimize the error on the following over all observations i
        #camera_link_transform(q_i)*camera_transform*observation_i = marker_link_transform(l_i,q_i)*marker_transform(l_i)
        #first, we'll assume the camera transform is fixed and then optimize the marker transforms.
        #then, we'll assume the marker transforms are fixed and then optimize the camera transform.
        #finally we'll check the error to detect convergence
        #1. Estimate marker transforms from current camera transform
        new_marker_transforms = dict((l,(TransformStats(zero=marker_initial_guess[l],prior=regularizationFactor) if marker_types[l]=='t' else VectorStats(value,zero=[0.0]*3,prior=RegularizationFactor))) for l in marker_id_list)
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Trel = se3.mul(se3.inv(Tmlink),se3.mul(Tclink,camera_transform))
            if marker_types[marker] == 't':
                estimate = se3.mul(Trel,obs)
            else:
                estimate = se3.apply(Trel,obs)
            new_marker_transforms[marker].add(estimate,observation_weights[i])
        print "ITERATION",iters
        #print "  ESTIMATED MARKER TRANSFORMS:",dict((k,v.average) for (k,v) in new_marker_transforms.iteritems())
        #2. Estimate camera transform from current marker transforms
        new_camera_transform = TransformStats(zero=camera_initial_guess,prior=regularizationFactor)
        if point_observations_only:
            #TODO: weighted point fitting
            relative_points = []
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                pRel = se3.apply(se3.inv(Tclink),se3.apply(Tmlink,new_marker_transforms[marker].average))
                relative_points.append(pRel)
            new_camera_transform.add(point_fit_xform_3d(marker_observations,relative_points),sum(observation_weights))
        else:
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                Trel = se3.mul(se3.inv(Tclink),se3.mul(Tmlink,new_marker_transforms[marker].average))
                estimate = se3.mul(Trel,se3.inv(obs))
                new_camera_transform.add(estimate,observation_weights[i])
        #print "  ESTIMATED CAMERA TRANSFORMS:",new_camera_transform.average
        #3. compute difference between last and current estimates
        diff = 0.0
        diff += vectorops.normSquared(se3.error(camera_transform,new_camera_transform.average))
        for marker in marker_id_list:
            if marker_types[marker]=='t':
                diff += vectorops.normSquared(se3.error(marker_transforms[marker],new_marker_transforms[marker].average))
            else:
                diff += vectorops.distanceSquared(marker_transforms[marker],new_marker_transforms[marker].average)
        camera_transform = new_camera_transform.average
        for marker in marker_id_list:
            marker_transforms[marker] = new_marker_transforms[marker].average
        if math.sqrt(diff) < tolerance:
            #converged!
            print "Converged with diff %g on iteration %d"%(math.sqrt(diff),iters)
            break
        print "  ESTIMATE CHANGE:",math.sqrt(diff)
        error = 0.0
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Tc = se3.mul(Tclink,camera_transform)
            if marker_types[marker] == 't':
                Tm = se3.mul(Tmlink,marker_transforms[marker])
                error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
            else:
                Tm = se3.apply(Tmlink,marker_transforms[marker])
                error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
        print "  OBSERVATION ERROR:",math.sqrt(error)
        #raw_input()
        if DO_VISUALIZATION:
            rgroup.setFrameCoordinates("camera estimate",camera_transform)
            rgroup.setFrameCoordinates("marker estimate",marker_transforms.values()[0])
            for i,obs in enumerate(marker_observations):
                rgroup.setFrameCoordinates("obs"+str(i)+" estimate",obs)
            visualization.add("coordinates",rgroup)
            visualization.dialog()
    if math.sqrt(diff) >= tolerance:
        print "Max iters reached"
    error = 0.0
    for i in xrange(n):
        marker = marker_ids[i]
        Tclink = camera_link_transforms[i]
        Tmlink = marker_link_transforms[i]
        obs = marker_observations[i]
        Tc = se3.mul(Tclink,camera_transform)
        if marker_types[marker] == 't':
            Tm = se3.mul(Tmlink,marker_transforms[marker])
            error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
        else:
            Tm = se3.apply(Tmlink,marker_transforms[marker])
            error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
    return (math.sqrt(error),camera_transform,marker_transforms)
Exemplo n.º 24
0
    def solve(self,robot=None,params=IKSolverParams()):
        """Globally solves the given problem.  Returns the solution
        configuration or None if failed."""
        #set this to False if you want to run the local optimizer for each
        #random restart.
        postOptimize = True
        t0 = time.time()
        if len(self.objectives) == 0:
            if self.costFunction is not None or self.feasibilityTest is not None:
                raise NotImplementedError("Raw optimization without IK goals not done yet")
            return None
        if robot is None:
            if not hasattr(self.objectives[0],'robot'):
                print "The objectives passed to IKSolver should come from ik.objective() or have their 'robot' member manually set"
            robot = self.objectives[0].robot
        else:
            for obj in self.objectives:
                obj.robot = robot
        solver = ik.solver(self.objectives)
        if self.activeDofs is not None:
            solver.setActiveDofs(self.activeDofs)
            ikActiveDofs = self.activeDofs
        if self.jointLimits is not None: solver.setJointLimits(*self.jointLimits)
        qmin,qmax = solver.getJointLimits()
        if self.activeDofs is None:
            #need to distinguish between dofs that affect feasibility vs 
            ikActiveDofs = solver.getActiveDofs()
            if self.costFunction is not None or self.feasibilityTest is not None:
                activeDofs = [i for i in range(len(qmin)) if qmin[i] != qmax[i]]
                nonIKDofs = [i for i in activeDofs if i not in ikActiveDofs]
                ikToActive = [activeDofs.index(i) for i in ikActiveDofs]
            else:
                activeDofs = ikActiveDofs
                nonIKDofs = []
                ikToActive = range(len(activeDofs))
        else:
            activeDofs = ikActiveDofs
            nonIKDofs = []
            ikToActive = range(len(ikActiveDofs))
        #sample random start point
        if params.startRandom:
            solver.sampleInitial()
            if len(nonIKDofs)>0:
                q = robot.getConfig()
                for i in nonIKDofs:
                    q[i] = random.uniform(qmin[i],qmax[i])
                robot.setConfig(q)
        if params.localMethod is not None or params.globalMethod is not None:
            #set up optProblem, an instance of optimize.Problem
            optProblem = optimize.Problem()
            Jactive = [[0.0]*len(activeDofs)]*len(solver.getResidual())
            def ikConstraint(x):
                q = robot.getConfig()
                for d,v in zip(activeDofs,x):
                    q[d] = v
                robot.setConfig(q)
                return solver.getResidual()
            def ikConstraintJac(x):
                q = robot.getConfig()
                for d,v in zip(activeDofs,x):
                    q[d] = v
                robot.setConfig(q)
                Jikdofs = solver.getJacobian()
                for i in ikActiveDofs:
                    for j in xrange(len(Jactive)):
                        Jactive[j][ikToActive[i]] = Jikdofs[j][i]
                return Jactive
            def costFunc(x):
                q = robot.getConfig()
                for d,v in zip(activeDofs,x):
                    q[d] = v
                return self.costFunction(q)
            def feasFunc(x):
                q = robot.getConfig()
                for d,v in zip(activeDofs,x):
                    q[d] = v
                return self.feasibilityTest(q)
            optProblem.addEquality(ikConstraint,ikConstraintJac)
            if len(qmax) > 0:
                optProblem.setBounds([qmin[d] for d in activeDofs],[qmax[d] for d in activeDofs])
            if self.costFunction is None:
                optProblem.setObjective(lambda x:0)
            else:
                optProblem.setObjective(costFunc)
            if self.feasibilityTest is not None:
                optProblem.setFeasibilityTest(feasFunc)
            #optProblem is now ready to use

        if params.globalMethod is not None:
            #do global optimization and return
            method = params.globalMethod
            numIters = params.numIters
            tol = params.tol
            if params.globalMethod == 'random-restart':
                #use the GlobalOptimize version of random restarts
                assert params.localMethod is not None,"Need a localMethod for random-restart to work"
                method = params.globalMethod + '.' + params.localMethod
                numIters = [params.numRestarts,params.numIters]
                optSolver = optimize.GlobalOptimizer(method)
            elif params.localMethod is not None:
                #do a sequential optimization
                method = [params.globalMethod,params.localMethod]
                #co-opt params.numRestarts for the number of outer iterations?
                numIters = [params.numRestarts,params.numIters]
            optSolver = optimize.GlobalOptimizer(method=method)
            q = robot.getConfig()
            x0 = [q[d] for d in activeDofs]
            optSolver.setSeed(x0)
            (succ,res) = optSolver.solve(optProblem,numIters=numIters,tol=tol)
            if succ:
                q = robot.getConfig()
                for d,v in zip(activeDofs,res):
                    q[d] = v
                #check feasibility if desired
                if self.feasibilityTest is not None and not self.feasibilityTest(q):
                    print "Result from global optimize isn't feasible"
                    return None
                if max(abs(v) for v in solver.getResidual()) > params.tol:
                    print "Result from global optimize doesn't satisfy tolerance.  Residual",vectorops.norm(solver.getResidual())
                    return None
                #passed
                print "Global optimize succeeded! Cost",self.costFunction(q)
                return q
            else:
                print "Global optimize returned failure"
                return None

        #DONT DO THIS... much faster to do newton solves first, then local optimize.
        if not postOptimize and params.localMethod is not None:
            #random restart + local optimize
            optSolver = optimize.LocalOptimizer(method=params.localMethod)
            q = robot.getConfig()
            x0 = [q[d] for d in activeDofs]
            optSolver.setSeed(x0)
            best = None
            bestQuality = float('inf')
            for restart in xrange(params.numRestarts):
                if time.time() - t0 > params.timeout:
                    return best
                res = optSolver.solve(optProblem,params.numIters,params.tol)
                if res[0]:
                    q = robot.getConfig()
                    for d,v in zip(activeDofs,res[1]):
                        q[d] = v
                    #check feasibility if desired
                    if self.feasibilityTest is not None and not self.feasibilityTest(q):
                        continue
                    if self.costFunction is None:
                        #feasible
                        return q
                    else:
                        #optimize
                        quality = self.costFunction(q)
                        if quality < bestQuality:
                            best = q
                            bestQuality = quality
                #random restart
                solver.sampleInitial()
                q = robot.getConfig()
                if len(nonIKDofs)>0:
                    for i in nonIKDofs:
                        q[i] = random.uniform(qmin[i],qmax[i])
                x0 = [q[d] for d in activeDofs]
                optSolver.setSeed(x0)
        else:
            #random-restart newton-raphson
            best = None
            bestQuality = float('inf')
            for restart in xrange(params.numRestarts):
                if time.time() - t0 > params.timeout:
                    return best
                t0 = time.time()
                if solver.solve(params.numIters,params.tol)[0]:
                    q = robot.getConfig()
                    #check feasibility if desired
                    t0 = time.time()
                    if self.feasibilityTest is not None and not self.feasibilityTest(q):
                        if len(nonIKDofs) > 0:
                            u = float(restart+0.5)/params.numRestarts
                            q = robot.getConfig()
                            #perturbation sampling
                            for i in nonIKDofs:
                                delta = u*(qmax[i]-qmin[i])*0.5
                                q[i] = random.uniform(max(q[i]-delta,qmin[i]),min(q[i]+delta,qmax[i]))
                            robot.setConfig(q)
                            if not self.feasibilityTest(q):
                                solver.sampleInitial()
                                continue
                        else:
                            solver.sampleInitial()
                            continue
                    if self.costFunction is None:
                        #feasible
                        return q
                    else:
                        #optimize
                        quality = self.costFunction(q)
                        if quality < bestQuality:
                            best = q
                            bestQuality = quality
                #sample a new ik seed
                solver.sampleInitial()
        #post-optimize using local optimizer
        if postOptimize and best is not None and params.localMethod is not None:
            optSolver = optimize.LocalOptimizer(method=params.localMethod)
            x0 = [best[d] for d in activeDofs]
            optSolver.setSeed(x0)
            res = optSolver.solve(optProblem,params.numIters,params.tol)
            if res[0]:
                q = robot.getConfig()
                for d,v in zip(activeDofs,res[1]):
                    q[d] = v
                #check feasibility if desired
                if self.feasibilityTest is not None and not self.feasibilityTest(q):
                    pass
                elif self.costFunction is None:
                    #feasible
                    best = q
                else:
                    #optimize
                    quality = self.costFunction(q)
                    if quality < bestQuality:
                        #print "Optimization improvement",bestQuality,"->",quality
                        best = q
                        bestQuality = quality
                    elif quality > bestQuality + 1e-2:
                        print "Got worse solution by local optimizing?",bestQuality,"->",quality
        return best
Exemplo n.º 25
0
        rgroup.addFrame("marker (ground truth)",parent="marker link",relativeCoordinates=Tm0)
        for i,(obs,obs0) in enumerate(zip(observations,trueObservations)):
            rgroup.addFrame("obs"+str(i)+" (ground truth)",parent="camera (ground truth)",relativeCoordinates=obs0)
            rgroup.addFrame("obs"+str(i)+" (from camera)",parent="camera (ground truth)",relativeCoordinates=obs)
        vis.add("world",world)
        for i,q in enumerate(calibrationConfigs):
            vis.add("config"+str(i),q)
            app = lc.appearance().clone()
            app.setColor(0.5,0.5,0.5,0.1)
            vis.setAppearance("config"+str(i),app)
        vis.add("simulated coordinates",rgroup)
        vis.dialog()

    res = calibrate_robot_camera(robot,camera_link,
                                 calibrationConfigs,
                                 observations,
                                 [marker_link]*len(calibrationConfigs))
    print 
    print "Per-observation reconstruction error:",res[0]/numObs
    print "Estimated camera transform:",res[1]
    print "  total error:",vectorops.norm(se3.error(res[1],Tc0))
    print "  rotation errors:",se3.error(res[1],Tc0)[:3]
    print "  translation errors:",se3.error(res[1],Tc0)[3:]
    print "Estimated marker transform:",res[2][marker_link]
    print "  error:",vectorops.norm(se3.error(res[2][marker_link],Tm0))
    print "  rotation errors:",se3.error(res[2][marker_link],Tm0)[:3]
    print "  translation errors:",se3.error(res[2][marker_link],Tm0)[3:]

    vis.kill()
    
Exemplo n.º 26
0
def calibrate_xform_camera(camera_link_transforms,
                           marker_link_transforms,
                           marker_observations,
                           marker_ids,
                           observation_relative_errors=None,
                           camera_initial_guess=None,
                           marker_initial_guess=None,
                           regularizationFactor=0,
                           maxIters=100,
                           tolerance=1e-7):
    """Single camera calibration function for a camera and markers on some
    set of rigid bodies.
    
    Given body transforms and a list of estimated calibration marker observations
    in the camera frame, estimates both the camera transform relative to the
    camera link as well as the marker transforms relative to their links.

    M: is the set of m markers. By default there is at most one marker per link.
       Markers can either be point markers (e.g., a mocap ball), or transform
       markers (e.g., an AR tag or checkerboard pattern).
    O: is the set of n observations, consisting of a reading (Tc_i,Tm_i,o_i,l_i)
       where Tc_i is the camera link's transform, Tm_i is the marker link's
       transform, o_i is the reading which consists of either a point or transform
       estimate in the camera frame, and l_i is the ID of the marker (by default,
       just its link)

    Output: a tuple (err,Tc,marker_dict) where err is the norm of the
    reconstruction residual, Tc is the estimated camera transform relative to the
    camera's link, and marker_dict is a dict mapping each marker id to its
    estimated position or transform on the marker's link.

    Arguments:
    - camera_link: an integer index or a RobotModelLink instance on which
      the camera lies.
    - calibration_configs: a list of the RobotModel configurations q_1,...,q_n
      that generated the marker_observations list.
    - marker_observations: a list of estimated positions or transformations
      of calibration markers o_1,...,o_n, given in the camera's reference
      frame (z forward, x right, y down).

      If o_i is a 3-vector, the marker is considered to be a point marker.
      If a se3 element (R,t) is given, the marker is considered to be
      a transform marker.  You may not mix point and transform observations
      for a single marker ID.
    - marker_ids: a list of marker ID #'s l_1,...,l_n corresponding to
      each observation, e.g., the link index on which each marker lies.
    - observation_relative_errors: if you have an idea of the magnitude of
      each observation error, it can be placed into this list.  Must be
      a list of n floats, 3-lists (point markers), or 6-lists (transform
      markers).  By default, errors will be set proportionally to the
      observed distance between the camera and marker.
    - camera_initial_guess: if not None, an initial guess for the camera transform
    - marker_initial_guess: if not None, a dictionary containing initial guesses
      for the marker transforms
    - regularizationFactor: if nonzero, the optimization penalizes deviation
      of the estimated camera transform and marker transforms from zero
      proportionally to this factor.
    - maxIters: maximum number of iterations for optimization.
    - tolerance: optimization convergence tolerance.  Stops when the change of
      estimates falls below this threshold
    """
    if len(camera_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as camera transforms")
    if len(marker_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as marker transforms")
    if len(marker_observations) != len(marker_ids):
        raise ValueError("Must provide the same number of marker observations as marker transforms")
    #get all unique marker ids
    marker_id_list = list(set(marker_ids))
    #detect marker types
    marker_types = dict((v,None) for v in marker_id_list)
    for i,(obs,id) in enumerate(zip(marker_observations,marker_ids)):
        if len(obs)==3:
            if marker_types[id] == 't':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 'p'
        elif len(obs)==2 and len(obs[0])==9 and len(obs[1])==3:
            if marker_types[id] == 'p':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 't'
        else:
            raise ValueError("Invalid observation for observation #%d, id %s\n"%(i,str(id)))

    n = len(marker_observations)
    m = len(marker_id_list)

    #get all the observation weights
    observation_weights = []
    if observation_relative_errors is None:
        #default weights: proportional to distance
        for obs in marker_observations:
            if len(obs) == 3:
                observation_weights.append(1.0/vectorops.norm(obs))
            else:
                observation_weights.append(1.0/vectorops.norm(obs[1]))
        observation_weights = [1.0]*len(observation_weights)
    else:
        if len(observation_relative_errors) != n:
            raise ValueError("Invalid length of observation errors")
        for err in observation_relative_errors:
            if hasattr(err,'__iter__'):
                observation_weights.append([1.0/v for v in err])
            else:
                observation_weights.append(1.0/err)

    #initial guesses
    if camera_initial_guess == None:
        camera_initial_guess = se3.identity()
        if any(v == 't' for v in marker_types.itervalues()):
            #estimate camera rotation from point estimates because rotations are more prone to initialization failures
            point_observations = []
            marker_point_rel = []
            for i,obs in enumerate(marker_observations):
                if len(obs)==2:
                    point_observations.append(obs[1])
                else:
                    point_observations.append(obs)
                marker_point_rel.append(se3.mul(se3.inv(camera_link_transforms[i]),marker_link_transforms[i])[1])
            camera_initial_guess = (point_fit_rotation_3d(point_observations,marker_point_rel),[0.0]*3)
            print "Estimated camera rotation from points:",camera_initial_guess
    if marker_initial_guess == None:
        marker_initial_guess = dict((l,(se3.identity() if marker_types[l]=='t' else [0.0]*3)) for l in marker_id_list)
    else:
        marker_initial_guess = marker_initial_guess.copy()
        for l in marker_id_list:
            if l not in marker_initial_guess:
                marker_initial_guess[l] = (se3.identity() if marker_types[l]=='t' else [0.0]*3)
    camera_transform = camera_initial_guess
    marker_transforms = marker_initial_guess.copy()

    if DO_VISUALIZATION:
        rgroup = coordinates.addGroup("calibration")
        rgroup.addFrame("camera link",worldCoordinates=camera_link_transforms[-1])
        rgroup.addFrame("marker link",worldCoordinates=marker_link_transforms[-1])
        rgroup.addFrame("camera estimate",parent="camera link",relativeCoordinates=camera_transform)
        rgroup.addFrame("marker estimate",parent="marker link",relativeCoordinates=marker_transforms.values()[0])
        for i,obs in enumerate(marker_observations):
            rgroup.addFrame("obs"+str(i)+" estimate",parent="camera estimate",relativeCoordinates=obs)
        vis.add("coordinates",rgroup)
        vis.dialog()

    point_observations_only = all(marker_types[marker] == 'p' for marker in marker_id_list)
    xform_observations_only = all(marker_types[marker] == 't' for marker in marker_id_list)
    if not point_observations_only and not xform_observations_only:
        raise NotImplementedError("Can't calibrate camera from mixed point/transform markers yet")
    for iters in range(maxIters):
        #attempt to minimize the error on the following over all observations i
        #camera_link_transform(q_i)*camera_transform*observation_i = marker_link_transform(l_i,q_i)*marker_transform(l_i)
        #first, we'll assume the camera transform is fixed and then optimize the marker transforms.
        #then, we'll assume the marker transforms are fixed and then optimize the camera transform.
        #finally we'll check the error to detect convergence
        #1. Estimate marker transforms from current camera transform
        new_marker_transforms = dict((l,(TransformStats(zero=marker_initial_guess[l],prior=regularizationFactor) if marker_types[l]=='t' else VectorStats(value,zero=[0.0]*3,prior=RegularizationFactor))) for l in marker_id_list)
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Trel = se3.mul(se3.inv(Tmlink),se3.mul(Tclink,camera_transform))
            if marker_types[marker] == 't':
                estimate = se3.mul(Trel,obs)
            else:
                estimate = se3.apply(Trel,obs)
            new_marker_transforms[marker].add(estimate,observation_weights[i])
        print "ITERATION",iters
        #print "  ESTIMATED MARKER TRANSFORMS:",dict((k,v.average) for (k,v) in new_marker_transforms.iteritems())
        #2. Estimate camera transform from current marker transforms
        new_camera_transform = TransformStats(zero=camera_initial_guess,prior=regularizationFactor)
        if point_observations_only:
            #TODO: weighted point fitting
            relative_points = []
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                pRel = se3.apply(se3.inv(Tclink),se3.apply(Tmlink,new_marker_transforms[marker].average))
                relative_points.append(pRel)
            new_camera_transform.add(point_fit_xform_3d(marker_observations,relative_points),sum(observation_weights))
        else:
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                Trel = se3.mul(se3.inv(Tclink),se3.mul(Tmlink,new_marker_transforms[marker].average))
                estimate = se3.mul(Trel,se3.inv(obs))
                new_camera_transform.add(estimate,observation_weights[i])
        #print "  ESTIMATED CAMERA TRANSFORMS:",new_camera_transform.average
        #3. compute difference between last and current estimates
        diff = 0.0
        diff += vectorops.normSquared(se3.error(camera_transform,new_camera_transform.average))
        for marker in marker_id_list:
            if marker_types[marker]=='t':
                diff += vectorops.normSquared(se3.error(marker_transforms[marker],new_marker_transforms[marker].average))
            else:
                diff += vectorops.distanceSquared(marker_transforms[marker],new_marker_transforms[marker].average)
        camera_transform = new_camera_transform.average
        for marker in marker_id_list:
            marker_transforms[marker] = new_marker_transforms[marker].average
        if math.sqrt(diff) < tolerance:
            #converged!
            print "Converged with diff %g on iteration %d"%(math.sqrt(diff),iters)
            break
        print "  ESTIMATE CHANGE:",math.sqrt(diff)
        error = 0.0
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Tc = se3.mul(Tclink,camera_transform)
            if marker_types[marker] == 't':
                Tm = se3.mul(Tmlink,marker_transforms[marker])
                error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
            else:
                Tm = se3.apply(Tmlink,marker_transforms[marker])
                error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
        print "  OBSERVATION ERROR:",math.sqrt(error)
        #raw_input()
        if DO_VISUALIZATION:
            rgroup.setFrameCoordinates("camera estimate",camera_transform)
            rgroup.setFrameCoordinates("marker estimate",marker_transforms.values()[0])
            for i,obs in enumerate(marker_observations):
                rgroup.setFrameCoordinates("obs"+str(i)+" estimate",obs)
            vis.add("coordinates",rgroup)
            vis.dialog()
    if math.sqrt(diff) >= tolerance:
        print "Max iters reached"
    error = 0.0
    for i in xrange(n):
        marker = marker_ids[i]
        Tclink = camera_link_transforms[i]
        Tmlink = marker_link_transforms[i]
        obs = marker_observations[i]
        Tc = se3.mul(Tclink,camera_transform)
        if marker_types[marker] == 't':
            Tm = se3.mul(Tmlink,marker_transforms[marker])
            error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
        else:
            Tm = se3.apply(Tmlink,marker_transforms[marker])
            error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
    return (math.sqrt(error),camera_transform,marker_transforms)