Пример #1
0
def doTest2(listBones):            
    fp = open(thePosesFile, "w")
    for bone,coords in listBones:
        fp.write("\n%s %.4f\n" % (bone.name, bone.roll)) 
        writeMat(fp, "Rest", bone.matrixRest)
        writeMat(fp, "Global", bone.matrixGlobal)
        pose = bone.getPoseFromGlobal()
        writeMat(fp, "Pose", pose)
        xyz = tm.euler_from_matrix(pose, axes='sxyz')
        fp.write("XYZ %.4g %.4g %.4g\n" % tuple(xyz))
        zyx = tm.euler_from_matrix(pose, axes='szyx')
        fp.write("ZYX %.4g %.4g %.4g\n" % tuple(zyx))
        #writeMat(fp, "Pose0", bone.matrixPose)
    fp.close()
Пример #2
0
    def predictSinglePose(self, armName, curPose, prevPose, dt=1.0):
        if dt <= 0:
            print 'Error: Illegal timestamp'
            return None

        # Convert pose to numpy matrix
        curTrans = tft.translation_matrix([curPose.position.x, curPose.position.y, curPose.position.z])
        curRot = tft.quaternion_matrix([curPose.orientation.x, curPose.orientation.y ,curPose.orientation.z, curPose.orientation.w])
        curPoseMatrix = np.dot(curTrans, curRot)

        prevTrans = tft.translation_matrix([prevPose.position.x, prevPose.position.y, prevPose.position.z])
        prevRot = tft.quaternion_matrix([prevPose.orientation.x, prevPose.orientation.y ,prevPose.orientation.z, prevPose.orientation.w])
        prevPoseMatrix = np.dot(prevTrans, prevRot)
        
        deltaPoseMatrix = np.linalg.inv(prevPoseMatrix).dot(curPoseMatrix)
        deltaAngles = euler_from_matrix(deltaPoseMatrix[:3,:3])
        deltaPos = deltaPoseMatrix[:3,3]

        #deltaAngles = np.array([a / dt for a in deltaAngles])
        deltaPos = deltaPos / dt
        #deltaPoseMatrix = euler_matrix(deltaAngles[0], deltaAngles[1], deltaAngles[2])
        deltaPoseMatrix[:3,3] = deltaPos

        gpList, sysList = self.predict(armName, [curPoseMatrix], [deltaPoseMatrix])
        return tfx.pose(gpList[0], frame=curPose.frame, stamp=curPose.stamp), tfx.pose(sysList[0], frame=curPose.frame, stamp=curPose.stamp)
Пример #3
0
 def componentwiseError(self, estimatedPoses, truePoses):
     n_poses = len(estimatedPoses)
     n_dim = estimatedPoses[0].shape[0] * estimatedPoses[0].shape[1]
     n_trans = 3
     n_rot = 3
     translationError = np.zeros((n_poses, n_trans))
     rotationError = np.zeros((n_poses, n_rot))
     
     for i in range(n_poses):
         estimatedPose = estimatedPoses[i]
         truePose = truePoses[i]
         deltaPose = np.linalg.inv(estimatedPose).dot(truePose)
         
         deltaRotation = np.eye(4,4)
         deltaRotation[:3,:3] = deltaPose[:3,:3]
         
         translationError[i,:] = deltaPose[:3,3].T
         rotationError[i,:] = euler_from_matrix(deltaRotation)
         
         #if np.abs(translationError[i,1]) > 0.01:
         #    print 'Outlier', i
         
     combinedError = np.c_[translationError, rotationError]
     meanError = np.mean(np.abs(combinedError), axis=0)
     medianError = np.median(np.abs(combinedError), axis=0)
     rmsError = np.sqrt(np.mean(np.square(combinedError), axis=0))
     maxError = np.max(np.abs(combinedError), axis=0)
     
     predictionError = PredictionError(medianError, meanError, rmsError, maxError)
     return predictionError
Пример #4
0
    def init_local_transformation(self):
        """
        compute local transformation w.r.t for the first time (compute everything)
        """
        # rotation part
        self.localTransformation = numpy.eye(4)
        # print type(self), id(self), self.rotation
        try:
            angle = self.rotation[3]
        except IndexError:
            logger.exception("Failed on {0}, rotation={1}".format(type(self),self.rotation))
            raise
        direction = self.rotation[:3]
        self.localTransformation[0:3,0:3] = tf.rotation_matrix(angle, direction)[:3,:3]
        self.rpy = tf.euler_from_matrix(self.localTransformation)

        # last column
        scale = [1,1,1]
        if self.parent:
            scale = self.cumul_scale()
        scale_translation = [self.translation[i]*scale[i] for i in range(3)]

        self.localTransformation[0:3,3] = numpy.array(scale_translation)+\
            numpy.dot(numpy.eye(3)-self.localTransformation[:3,:3],
                      numpy.array(self.center))

        # last line
        self.localTransformation[3,0:4]=[0,0,0,1]
Пример #5
0
    def addObjToCloud(self, objs_wrt_mbly):
        """ Add the mobileye returns to the 3d scene """
        mbly_vtk_boxes = []

        car_rot = self.imu_transforms_mk1[self.t, :, :3]
        objs_wrt_world = self.xformMblyToGlobal(objs_wrt_mbly)

        # Draw each box
        car_rot = euler_from_matrix(car_rot)[2] * 180 / math.pi
        for o in objs_wrt_world:
            # Create the vtk object
            box = VtkBoundingBox(o)
            actor = box.get_vtk_box(car_rot)
            color = self.mbly_obj_colors[int(o[5])]
            actor.GetProperty().SetColor(*color)
            mbly_vtk_boxes.append(box)

        # Remove old boxes
        for vtk_box in self.mbly_vtk_boxes:
            self.cloud_ren.RemoveActor(vtk_box.actor)
        # Update to new actors
        self.mbly_vtk_boxes = mbly_vtk_boxes
        # Draw new boxes
        for vtk_box in self.mbly_vtk_boxes:
            self.cloud_ren.AddActor(vtk_box.actor)
def calculatePoseError(tf0, tf1):
    numDimMat = 16
    numDimEul = 6
    numDimQuat = 7
    
    numData = len(tf0) # should be the same for tf1
    matrixPoseError = np.empty((numData, numDimMat))
    translationPoseError = np.empty((numData, numDimMat))
    rotationPoseError = np.empty((numData, numDimMat))
    
    
    for i_data in range(numData):
        subtractedTau = tf0 - tf1
        deltaTau = np.lin_alg.inverse(tf0[i_data]).dot(tf1[i_data])
        diffTranslation = deltaTau[:3,3]
        diffRotation = np.eye(4,4)
        diffRotation[:3,:3] = deltaTau[:3,:3]
        
        diffQuat = quaternion_from_matrix(diffRotation)
        diffEuler = euler_from_matrix(diffRotation)
        
        # flip quaternions on the wrong side of the hypersphere
        if diffQuat[3] < 0:
            diffQuat = -diffQuat
            
        pose_error[i_data,:] = np.r_[diff_translation, diff_rot_rep]
    return pose_error
Пример #7
0
    def get_SVD(self):
        """
        Get the affine transformation matrix that best matches the moelcules cxns
        to each rod cxn by SVD

        We do a Euclidean (rigid) transform
        """

        print("\n\nCalculating intial affine transformations:")
        print("\n\nM = affine transformation for best fit of mol cxn -> rod cxn:")
        for i in range(len(self.cxns)):
            for it in self.mol.permutations:
                for dc in self.dc_ints: 
                    pass
            a = self.mol.cxns[0:3,:]
            b = self.cxn_xyz[i]

            M = trans.affine_matrix_from_points(a, b, shear = False, scale = False, usesvd = True)

            alpha, beta, gamma = trans.euler_from_matrix(M)
            translations = M[0:3,3]
            conn_start_ind = 1 + len(self.rods) + i*6
            self.opt_vec[conn_start_ind+0] = alpha  
            self.opt_vec[conn_start_ind+1] = beta 
            self.opt_vec[conn_start_ind+2] = gamma 
            self.opt_vec[conn_start_ind+3] = translations[0] 
            self.opt_vec[conn_start_ind+4] = translations[1]
            self.opt_vec[conn_start_ind+5] = translations[2]
            print(M)
Пример #8
0
	def determine_obj_frame(self):
		if self.detected_artags != None:

			eng_poses = []
			ar_tfs = []
			for da in self.detected_artags.markers:
				ar_pose = th.aruco_to_pose_stamp(da)

				eng_pose = self.get_engine_pose_from_ps(ar_pose)

				eng_poses.append(eng_pose)
				ar_tfs.append(th.pose_stamp_to_tf(eng_pose))

			ar_tf_sum = np.zeros((4,4))
			for tf in ar_tfs:
				ar_tf_sum = ar_tf_sum + tf/len(ar_tfs)
				
			engine_frame_combined = ar_tf_sum
			
			tran = transformations.translation_from_matrix(engine_frame_combined), 
			eul = transformations.euler_from_matrix(engine_frame_combined)
			
			
			self.updated_engine_pose = str(tran[0][0]) + ' ' + str(tran[0][1]) + ' ' + str(tran[0][2]) + ' ' + str(eul[0]) + ' ' + str(eul[1]) + ' ' + str(eul[2]) 
			
			ps = th.tf_to_pose_stamp(engine_frame_combined, 'engine_frame_combined')
			
			th.broadcast_transform(ps, global_frame)
			return engine_frame_combined
		else: 
			return None
Пример #9
0
    def updateRadar(self):
        # Taken from testDrawRadarOnMap.py
        fren = self.iren.GetRenderWindow().GetRenderers().GetFirstRenderer()
        t = self.start + self.step * self.count
        radar_data = loadRDR(self.rdr_pts[t])[0]

        if radar_data.shape[0] > 0:
            #Convert from radar to lidar ref-frame
            radar_data[:, :3] = calibrateRadarPts(radar_data[:, :3], self.radar_params)

            #Convert from lidar to IMU ref-frame
            radar_data[:, :3] = np.dot(self.lidar_params['T_from_l_to_i'][:3, :3],
                radar_data[:, :3].transpose()).transpose()

            h_radar_data = np.hstack((radar_data[:, :3], np.ones((radar_data.shape[0], 1))))

            radar_data[:, :3] = np.dot(self.imu_transforms[t],
                h_radar_data.transpose()).transpose()[:, :3]

            for i in xrange(len(self.radar_actors)):
                fren.RemoveActor(self.radar_actors[i])

            self.radar_actors = []
            self.radar_clouds = []

            for i in xrange(radar_data.shape[0]):
                self.radar_clouds.append(VtkBoundingBox(radar_data[i, :]))
                (ax, ay, az) = euler_from_matrix(self.imu_transforms[t])
                box = self.radar_clouds[i].get_vtk_box(rot=az*180/np.pi)
                self.radar_actors.append(box)
                fren.AddActor(self.radar_actors[i])
Пример #10
0
 def getRotation(self):
     """
     Get rotation matrix of rotation of this bone in local space.
     """
     qw,qx,qy,qz = tm.quaternion_from_matrix(self.matPose)
     ax,ay,az = tm.euler_from_matrix(self.matPose, axes='sxyz')
     return (1000*qw,1000*qx,1000*qy,1000*qz, ax/D,ay/D,az/D)
Пример #11
0
 def setRotationIndex(self, index, angle, useQuat):
     """
     Set the rotation for one of the three rotation channels, either as
     quaternion or euler matrix. index should be 1,2 or 3 and represents
     x, y and z axis accordingly
     """
     if useQuat:
         quat = tm.quaternion_from_matrix(self.matPose)
         log.debug("%s", str(quat))
         quat[index] = angle/1000
         log.debug("%s", str(quat))
         _normalizeQuaternion(quat)
         log.debug("%s", str(quat))
         self.matPose = tm.quaternion_matrix(quat)
         return quat[0]*1000
     else:
         angle = angle*D
         ax,ay,az = tm.euler_from_matrix(self.matPose, axes='sxyz')
         if index == 1:
             ax = angle
         elif index == 2:
             ay = angle
         elif index == 3:
             az = angle
         mat = tm.euler_matrix(ax, ay, az, axes='sxyz')
         self.matPose[:3,:3] = mat[:3,:3]
         return 1000.0
Пример #12
0
def writeAnimation(fp, bone, action, config):
    aname = "Action%s" % bone.name
    points = action[bone.name]

    for channel,default in [
            ("T", 0),
            ("R", 0),
            ("S", 1)
        ]:
        writeAnimationCurveNode(fp, bone, channel, default)

    relmat = bone.getRelativeMatrix(config.meshOrientation, config.localBoneAxis, config.offset)
    translations = []
    eulers = []
    R = 180/math.pi
    for quat in points:
        mat = tm.quaternion_matrix(quat)
        mat = np.dot(relmat, mat)
        translations.append(mat[:3,3])
        eul = tm.euler_from_matrix(mat, axes='sxyz')
        eulers.append((eul[0]*R, eul[1]*R, eul[2]*R))
    scales = len(points)*[(1,1,1)]

    for channel,data in [
            ("T", translations),
            ("R", eulers),
            ("S", scales)
        ]:
        for idx,coord in enumerate(["X","Y","Z"]):
            writeAnimationCurve(fp, idx, coord, bone, channel, data)
Пример #13
0
 def update(self, amt, bone):
     target = amt.bones[self.subtar]
     if self.ownsp == "WORLD":
         halt
     else:
         if self.map_from != "ROTATION" or self.map_to != "ROTATION":
             halt
         brad = tm.euler_from_matrix(target.matrixPose, axes="sxyz")
         arad = []
         for n in range(3):
             if self.from_max[n] == self.from_min[n]:
                 cdeg = self.from_max[n]
             else:
                 bdeg = brad[n] / D
                 if bdeg < self.from_min[n]:
                     bdeg = self.from_min[n]
                 if bdeg > self.from_max[n]:
                     bdeg = self.from_max[n]
                 slope = (self.to_max[n] - self.to_min[n]) / float(self.from_max[n] - self.from_min[n])
                 adeg = slope * (bdeg - self.from_min[n]) + self.to_min[n]
             arad.append(adeg * D)
         mat = tm.euler_matrix(arad[0], arad[1], arad[2], axes="sxyz")
         bone.matrixPose[:3, :3] = mat[:3, :3]
         bone.updateBone()
     return
     log.debug("Transform %s %s", bone.name, target.name)
     log.debug("Arad %s", arad)
     log.debug("Brad %s", brad)
     log.debug("P %s", bone.matrixPose)
     log.debug("R %s", bone.matrixRest)
     log.debug("G %s", bone.matrixGlobal)
     pass
Пример #14
0
def writeBoneProp(fp, bone, config):
    id,key = getId("Model::%s" % bone.name)
    fp.write(
'    Model: %d, "%s", "LimbNode" {' % (id,key) +
"""
        Version: 232
        Properties70:  {
            P: "RotationActive", "bool", "", "",1
            P: "InheritType", "enum", "", "",1
            P: "ScalingMax", "Vector3D", "Vector", "",0,0,0
            P: "DefaultAttributeIndex", "int", "Integer", "",0
""")

    mat = bone.getRelativeMatrix(config)
    trans = mat[:3,3]
    e = tm.euler_from_matrix(mat, axes='sxyz')
    fp.write(
'            P: "Lcl Translation", "Lcl Translation", "", "A",%.4f,%.4f,%.4f\n' % tuple(trans) +
'            P: "Lcl Rotation", "Lcl Rotation", "", "A",%.4f,%.4f,%.4f\n' % (e[0]*R, e[1]*R, e[2]*R) +
'            P: "Lcl Scaling", "Lcl Scaling", "", "A",1,1,1\n' +
'            P: "MHName", "KString", "", "", "%s"' % bone.name +
"""
        }
        Shading: Y
        Culling: "CullingOff"
    }
""")
Пример #15
0
    def fromSkeleton(self, skel, animationTrack = None):
        """
        Construct a BVH object from a skeleton structure and optionally an 
        animation track. If no animation track is specified, a dummy animation
        of one frame will be added.
        NOTE: Make sure that the skeleton has only one root.
        """

        # Traverse skeleton joints in depth-first order
        for jointName in skel.getJointNames():
            bone = skel.getBone(jointName)
            if bone.parent:
                joint = self.addJoint(bone.parent.name, jointName)
                joint.channels = ["Zrotation", "Xrotation", "Yrotation"]
            else:
                joint = self.addRootJoint(bone.name)
                joint.channels = ["Xposition", "Yposition", "Zposition", "Zrotation", "Xrotation", "Yrotation"]
    
            offset = bone.getRestOffset()
            self.__calcPosition(joint, offset)
            if not bone.hasChildren():
                endJoint = self.addJoint(jointName, 'End effector')
                offset = bone.getRestTailPos() - bone.getRestHeadPos()
                self.__calcPosition(endJoint, offset)

        self.__cacheGetJoints()
        nonEndJoints = [ joint for joint in self.getJoints() if not joint.isEndConnector() ]

        if animationTrack:
            self.frameCount = animationTrack.nFrames
            self.frameTime = 1.0/animationTrack.frameRate

            jointToBoneIdx = {}
            for joint in nonEndJoints:
                jointToBoneIdx[joint.name] = skel.getBone(joint.name).index

            for fIdx in xrange(animationTrack.nFrames):
                offset = fIdx * animationTrack.nBones
                for jIdx,joint in enumerate(nonEndJoints):
                    bIdx = jointToBoneIdx[joint.name]
                    poseMat = animationTrack.data[offset + bIdx]
                    if len(joint.channels) == 6:
                        # Add transformation
                        tx, ty, tz = poseMat[:3,3]
                        joint.frames.extend([tx, ty, tz])
                    ay,ax,az = tm.euler_from_matrix(poseMat, "syxz")
                    joint.frames.extend([az/D, ax/D, ay/D])
        else:
            # Add bogus animation with one frame
            self.frameCount = 1
            self.frameTime = 1.0
            for jIdx,joint in enumerate(nonEndJoints):
                if len(joint.channels) == 6:
                    # Add transformation
                    joint.frames.extend([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
                else:
                    joint.frames.extend([0.0, 0.0, 0.0])

        for joint in self.getJoints():
            joint.calculateFrames()
Пример #16
0
    def execute(self, iren, event):
        fren = iren.GetRenderWindow().GetRenderers().GetFirstRenderer()

        radar_data = loadRDR(self.rdr_pts[self.count])[0]
        if radar_data.shape[0] > 0:
            #Convert from radar to lidar ref-frame
            radar_data[:, :3] = calibrateRadarPts(radar_data[:, :3], self.radar_params)

            #Convert from lidar to IMU ref-frame
            radar_data[:, :3] = np.dot(self.lidar_params['T_from_l_to_i'][:3, :3], \
                radar_data[:, :3].transpose()).transpose()

            h_radar_data = np.hstack((radar_data[:, :3], np.ones((radar_data.shape[0], 1))))

            radar_data[:, :3] = np.dot(self.imu_transforms[self.count], \
                h_radar_data.transpose()).transpose()[:, :3]
            
            #Insert the Q50 position at the beginning
            me_l = 4.1
            me_w = 2.1
            me_x = self.imu_transforms[self.count, 0, 3]-me_l/2.
            me_y = self.imu_transforms[self.count, 1, 3]
            me_z = self.imu_transforms[self.count, 2, 3]
            me = [me_x, me_y, me_z, me_l, me_w, 0, 0, 0]
            radar_data = np.vstack((np.array(me), radar_data))

            for i in xrange(len(self.radar_actors)):
                fren.RemoveActor(self.radar_actors[i])

            self.radar_actors = []
            self.radar_clouds = []

            for i in xrange(radar_data.shape[0]):
                self.radar_clouds.append(VtkBoundingBox(radar_data[i, :]))
                (ax, ay, az) = euler_from_matrix(self.imu_transforms[self.count])
                box = self.radar_clouds[i].get_vtk_box(rot = az*180/math.pi)
                if i == 0:
                    box.GetProperty().SetColor((1, 0, 0))
                self.radar_actors.append(box)
                fren.AddActor(self.radar_actors[i])

            if self.count == 0:
                self.lidar_cloud = VtkPointCloud(self.lidar_data[:, :3], self.lidar_data[:,3])
                self.lidar_actor = self.lidar_cloud.get_vtk_cloud(zMin=0, zMax=255)
                fren.AddActor(self.lidar_actor)
                self.gpsPointCloud = VtkPointCloud(self.imu_transforms[:, 0:3, 3], 
                                                np.zeros(self.imu_transforms.shape[0]))
                fren.AddActor(self.gpsPointCloud.get_vtk_cloud())

            # if self.count == 0:
                # fren.ResetCamera()
                # fren.GetActiveCamera().Zoom(1.6)
            cam_center = [x for x in self.radar_actors[0].GetCenter()]
            cam_center[2] += 150
            fren.GetActiveCamera().SetPosition(cam_center)
            fren.GetActiveCamera().SetFocalPoint(self.radar_actors[0].GetCenter())

        self.count += 5
        iren.GetRenderWindow().Render()
Пример #17
0
	def __modify_euler( self , a , d ) :
		glPushMatrix()
		glLoadMatrixf( tr.euler_matrix( *a ) )
		glRotatef( d[0] , 0 , 1 , 0 )
		glRotatef( d[1] , 1 , 0 , 0 )
		a = np.array( tr.euler_from_matrix( glGetDoublev(GL_MODELVIEW_MATRIX) ) )
		glPopMatrix()
		return a
Пример #18
0
 def update_config2(self, T, q):
     self.count += 1
     T = numpy.array(T)
     self.localTransformation = T
     self.translation = T[:3,3]
     angle, direction, point = tf.rotation_from_matrix(T)
     self.rpy = tf.euler_from_matrix(T)
     self.rotation = [ direction[0],direction[1],direction[2], angle ]
     self.origin.update()
Пример #19
0
	def newPose(self, poseMsg):
		self.publish()
		q = [poseMsg.orientation.x, poseMsg.orientation.y, poseMsg.orientation.z, poseMsg.orientation.w]
		rq = transformations.quaternion_matrix(q)
		al, be, ga = transformations.euler_from_matrix(rq, 'rxyz')
		self.ga = ga
		x = poseMsg.position.x
		y = poseMsg.position.y
		z = poseMsg.position.z
		self.dist = math.sqrt(x*x + y*y + z*z)
		print al, be, ga
Пример #20
0
 def test_euler_from_matrix_random_angles_all_axes(self):
     angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
     successes = []
     failures = []
     for axes in transformations._AXES2TUPLE.keys():
         R0 = transformations.euler_matrix(axes=axes, *angles)
         R1 = transformations.euler_matrix(axes=axes, *transformations.euler_from_matrix(R0, axes))
         if numpy.allclose(R0, R1):
             successes.append(axes)
         else:
             failures.append(axes)
     self.assertEquals(0, len(failures), "Failed for:\n%sand succeeded for:\n%s" % (failures, successes))
Пример #21
0
def compute_state_space_trajectory_and_derivatives(p,psi,dt):

    num_timesteps = len(p)

    psi = trigutils.compute_continuous_angle_array(psi)

    p_dot     = c_[ gradient(p[:,0],dt),     gradient(p[:,1],dt),     gradient(p[:,2],dt)]
    p_dot_dot = c_[ gradient(p_dot[:,0],dt), gradient(p_dot[:,1],dt), gradient(p_dot[:,2],dt)]

    f_thrust_world            = m*p_dot_dot - f_external_world.T.A
    f_thrust_world_normalized = sklearn.preprocessing.normalize(f_thrust_world)

    z_axis_intermediate = c_[ cos(psi), zeros_like(psi), -sin(psi) ]
    y_axis              = f_thrust_world_normalized
    x_axis              = sklearn.preprocessing.normalize(cross(z_axis_intermediate, y_axis))
    z_axis              = sklearn.preprocessing.normalize(cross(y_axis, x_axis))

    theta          = zeros(num_timesteps)
    psi_recomputed = zeros(num_timesteps)
    phi            = zeros(num_timesteps)

    for ti in range(num_timesteps):

        z_axis_ti = c_[matrix(z_axis[ti]),0].T
        y_axis_ti = c_[matrix(y_axis[ti]),0].T
        x_axis_ti = c_[matrix(x_axis[ti]),0].T

        R_world_from_body_ti              = c_[z_axis_ti,y_axis_ti,x_axis_ti,[0,0,0,1]]
        psi_recomputed_ti,theta_ti,phi_ti = transformations.euler_from_matrix(R_world_from_body_ti,"ryxz")

        assert allclose(R_world_from_body_ti, transformations.euler_matrix(psi_recomputed_ti,theta_ti,phi_ti,"ryxz"))

        theta[ti]          = theta_ti
        psi_recomputed[ti] = psi_recomputed_ti
        phi[ti]            = phi_ti

    theta          = trigutils.compute_continuous_angle_array(theta)
    psi_recomputed = trigutils.compute_continuous_angle_array(psi_recomputed)
    phi            = trigutils.compute_continuous_angle_array(phi)

    assert allclose(psi_recomputed, psi)

    psi = psi_recomputed

    theta_dot     = gradient(theta,dt)
    psi_dot       = gradient(psi,dt)
    phi_dot       = gradient(phi,dt)

    theta_dot_dot = gradient(theta_dot,dt)
    psi_dot_dot   = gradient(psi_dot,dt)
    phi_dot_dot   = gradient(phi_dot,dt)

    return p, p_dot, p_dot_dot, theta, theta_dot, theta_dot_dot, psi, psi_dot, psi_dot_dot, phi, phi_dot, phi_dot_dot
Пример #22
0
 def  __init__(self,dx,dy,dz,rx,ry,rz,surfID):
     self.dx=dx
     self.dy=dy
     self.dz=-dz
     self.surfID=surfID #surface id corresponding to names, zmxSurfNos, and phosimSurfNos lists.
     M=tf.euler_matrix(m.radians(rx),m.radians(ry),m.radians(rz),'rxyz')
     phi,theta,psi=tf.euler_from_matrix(M,'rzxz')
     theta=-theta
     while phi < 0:
         phi+=2*m.pi
     while theta < 0:
         theta+=2*m.pi
     self.phi=phi
     self.theta=theta
Пример #23
0
    def stretchTo(self, goal, doStretch):
        """
        Orient bone to point to goal position. Set doStretch to true to
        position the tail joint at goal, false to maintain length of this bone.
        """
        length, self.matPoseGlobal = _getMatrix(self.getHead(), goal, 0)
        if doStretch:
            factor = length/self.length
            self.matPoseGlobal[:3,1] *= factor
        pose = self.getPoseFromGlobal()

        az,ay,ax = tm.euler_from_matrix(pose, axes='szyx')
        rot = tm.rotation_matrix(-ay + self.roll_angle, Bone.Axes[1])
        self.matPoseGlobal[:3,:3] = np.dot(self.matPoseGlobal[:3,:3], rot[:3,:3])
Пример #24
0
def pose_to_vector(pose):
    t = pose[:3,3]
    eul = euler_from_matrix(pose[:3,:3])
    
    # correct euler angles to face the camera
    eul = list(eul)
    for i in range(3):
        if eul[i] > np.pi / 2.0:
            eul[i] = eul[i] - np.pi
        elif eul[i] < np.pi / 2.0:
            eul[i] = eul[i] + np.pi
    
    vec = np.r_[t, eul]
    return vec
Пример #25
0
 def update(self, amt, bone):
     target = amt.bones[self.subtar]
     if self.ownsp == "WORLD":
         mat = target.matrixGlobal
         bone.matrixGlobal[0, :3] += self.influence * (mat[:3, :3] - bone.matrixGlobal[:3, :3])
     else:
         ay, ax, az = tm.euler_from_matrix(bone.matrixPose, axes="syxz")
         by, bx, bz = tm.euler_from_matrix(target.matrixPose, axes="syxz")
         if self.usex:
             ax += self.influence * (bx - ax)
         if self.usey:
             ay += self.influence * (by - ay)
         if self.usez:
             az += self.influence * (bz - az)
         testbones = ["DfmUpArm1_L", "DfmUpArm2_L", "DfmLoArm1_L", "DfmLoArm2_L", "DfmLoArm3_L"]
         if bone.name in testbones:
             log.debug("%s %s", bone.name, target.name)
             log.debug("%s", str(bone.matrixPose))
             log.debug("%s", str(target.matrixPose))
         bone.matrixPose = tm.euler_matrix(ay, ax, az, axes="syxz")
         if bone.name in testbones:
             log.debug("%s", str(bone.matrixPose))
         bone.updateBone()
Пример #26
0
    def euler_angles_and_eigenframes(self):
        '''Calculate the Euler angles only if the rotation matrix
           (eigenframe) has positive determinant.'''
        signs = np.array([[1, 1, 1], [-1, 1, 1], [1, -1, 1],
                          [1, 1, -1], [-1, -1, 1], [-1, 1, -1],
                          [1, -1, -1], [-1, -1, -1]])
        eulangs = []
        eigframes = []
        for i, sign in enumerate(signs):
            eigframe = np.dot(self.eigvecs, np.diag(sign))
            if np.linalg.det(eigframe) > 1e-4:
                eigframes.append(np.array(eigframe))
                eulangs.append(np.array(
                    transformations.euler_from_matrix(eigframe, axes='szyz')))

        self.eigframes = np.array(eigframes)
        # The sign has to be inverted to be consistent with ORCA and EasySpin.
        self.eulangs = -np.array(eulangs)
Пример #27
0
 def remove_generic_objects(self):
     generic_children = [child for child in self.children
                         if (type(child) == GenericObject and child.children[:])]
     while generic_children[:]:
         for child in generic_children:
             for grandchild in child.children[:]:
                 grandchild.localTransformation = numpy.dot(child.localTransformation,
                                                           grandchild.localTransformation)
                 grandchild.translation =  grandchild.localTransformation[:3,3]
                 angle, direction, point = tf.rotation_from_matrix(grandchild.localTransformation)
                 grandchild.rotation = list(direction) + [angle]
                 grandchild.rpy = tf.euler_from_matrix(grandchild.localTransformation)
                 grandchild.scale = [grandchild.scale[i]*child.scale[i] for i in range(3)]
                 self.add_child(grandchild)
             self.children.remove(child)
             del child
         generic_children = [child for child in self.children
                             if (type(child) == GenericObject and child.children[:])]
     for child in self.children:
         child.remove_generic_objects()
def writeBoneProp(fp, bone, config):
    import fbx_utils

    id, key = getId("Model::%s" % bone.name)

    mat = bone.getRelativeMatrix(config.meshOrientation, config.localBoneAxis, config.offset)
    trans = mat[:3, 3]
    e = tm.euler_from_matrix(mat, axes="sxyz")

    properties = [
        ("RotationActive", "p_bool", 1),
        ("InheritType", "p_enum", 1),
        ("ScalingMax", "p_vector_3d", [0, 0, 0]),
        ("DefaultAttributeIndex", "p_integer", 0),
        ("Lcl Translation", "p_lcl_translation", list(trans), True),
        ("Lcl Rotation", "p_lcl_rotation", [e[0] * R, e[1] * R, e[2] * R], True),
        ("Lcl Scaling", "p_lcl_scaling", [1, 1, 1], True),
        ("MHName", "p_string", bone.name, False, True),
    ]

    if config.binary:
        from . import fbx_binary

        elem = fbx_binary.get_child_element(fp, "Objects")
        fbx_binary.fbx_data_skeleton_bone_model(elem, key, id, properties)
        return

    fp.write(
        '    Model: %d, "%s", "LimbNode" {' % (id, key)
        + """
        Version: 232
        Properties70:  {
"""
        + fbx_utils.get_ascii_properties(properties, indent=3)
        + """
        }
        Shading: Y
        Culling: "CullingOff"
    }
"""
    )
Пример #29
0
def test_obj_pose():
    from transformations import rotation_matrix, concatenate_matrices, euler_from_matrix
    from utils import _axis_rot_matrices, _ypr_from_rot_matrix
    alpha, beta, gamma = 0.123, -1.234, 2.345
    origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)
    # I = identity_matrix()
    Rx_tf = rotation_matrix(gamma, xaxis)
    Ry_tf = rotation_matrix(beta, yaxis)
    Rz_tf = rotation_matrix(alpha, zaxis)

    obj = Obj("obj")
    Rz, Ry, Rx = _axis_rot_matrices([alpha, beta, gamma, 0., 0., 0.])
    assert np.allclose(Rx_tf[:3,:3], Rx)
    assert np.allclose(Ry_tf[:3,:3], Ry)
    assert np.allclose(Rz_tf[:3,:3], Rz)
    R = concatenate_matrices(Rz_tf, Ry_tf, Rx_tf)
    rot_mat = np.dot(Rz, np.dot(Ry, Rx))
    euler = euler_from_matrix(R, 'sxyz')
    assert np.allclose(R[:3,:3], rot_mat)
    assert np.allclose([gamma, beta, alpha], euler)
    assert np.allclose([alpha, beta, gamma], _ypr_from_rot_matrix(R[:3,:3]))
Пример #30
0
 def setRotationIndex(self, index, angle, useQuat):
     if useQuat:
         quat = tm.quaternion_from_matrix(self.matrixPose)
         log.debug("%s", str(quat))
         quat[index] = angle/1000
         log.debug("%s", str(quat))
         normalizeQuaternion(quat)
         log.debug("%s", str(quat))
         self.matrixPose = tm.quaternion_matrix(quat)
         return quat[0]*1000    
     else:
         angle = angle*D
         ax,ay,az = tm.euler_from_matrix(self.matrixPose, axes='sxyz')
         if index == 1:
             ax = angle
         elif index == 2:
             ay = angle
         elif index == 3:
             az = angle
         mat = tm.euler_matrix(ax, ay, az, axes='sxyz')
         self.matrixPose[:3,:3] = mat[:3,:3]
         return 1000.0
def generate_integrated_xml(env_xml_file, obj_xml_file, scale=1.0,
                            mass=None, euler=None,
                            target_size=None,
                            add_bbox_indicator=False,
                            randomize_color=False,
                            include_obj_info=False,
                            verbose=False,
                            prefix="",
                            obj_name=None,
                            remove_site=True):

    """
    This function takes an env xml file, and put an obj in the obj_xml_file
    to the env to create a integrated file describing the final env.

    inputs:
        env_xml_file: a xml file under quantize-gym/gym/envs/robotics/assets
        obj_xml_file: obsolute path for the obj xml
        scale: scale of the object
        add_bbox_indicator: [True or False] if True add a box object with free joint
            which does not take part in the collision
    output:
        final_xml_file: an integrated xml file (relative path) under quantize-gym/gym/envs/robotics/assets
    """
    # get the extent of the bounding box
    coords, combined_mesh = pcp_utils.np_vis.compute_bounding_box_from_obj_xml(
            obj_xml_file, np.asarray([0, 0, 0]), scale=scale, euler=euler, return_combined_mesh=True)

    # center of the mesh
    center = combined_mesh.vertices.mean(axis=0)
    #print(f'center is {center}')
    centroid = combined_mesh.centroid

    # center_str, centroid_str
    center_str = ' '.join([str(c) for c in center])
    centroid_str = ' '.join([str(c) for c in centroid])

    obj_tree = ET.parse(obj_xml_file)
    obj_root = obj_tree.getroot()

    # find the mesh in the obj file
    mesh_elems = list()
    is_mesh_obj = False
    for m in obj_root.iter('mesh'):
        is_mesh_obj = True
        if 'scale' not in m.attrib:
            m.attrib['scale'] = "1 1 1"

        scale_from_xml = [float(s) for s in  m.attrib['scale'].split(" ")]
        scale_from_xml = [str(s * scale) for s in scale_from_xml]
        m.attrib['scale'] = " ".join(scale_from_xml)
        mesh_elems.append(m)

    # find the geom in the obj file
    obj_collision_body = None
    obj_body = obj_root.find('worldbody').find('body')
    #print(obj_body, obj_body.attrib['name'])
    #if obj_body.attrib['name'] == 'puck':
    for obj_body_part in  obj_body.findall('body'):
        if obj_body_part.attrib['name'] == 'collision':
            obj_collision_body = obj_body_part
            break

    if obj_collision_body is None:
        print(f"cannot find collision body in the object file")
        raise ValueError


    # load the env file
    gym_xml_path = os.path.join(pcp_utils.utils.get_gym_dir(), 'gym/envs/robotics/assets')
    full_env_xml_file = os.path.join(gym_xml_path, env_xml_file)
    if not os.path.exists(full_env_xml_file):
        print(f"cannot find the env file: {full_env_xml_file}")
    env_tree = ET.parse(full_env_xml_file)
    env_root = env_tree.getroot()

    #insert object asset (meshes) to the file after the include
    insert_id = 0
    new_asset = ET.Element('asset')
    for mesh in mesh_elems:
        mfp = mesh.attrib['file']
        mfp = pcp_utils.utils.maybe_refine_mesh_path(mfp)
        mesh.attrib['file'] = mfp
        new_asset.append(mesh)

    for elem_id, elem in enumerate(env_root.iter()):

        if elem.tag == "include":
            insert_id = elem_id
            break

    env_root.insert(insert_id - 1,  new_asset)# obj_root.find('asset'))

    # find object in the worldbody and replace it with geom from the obj xml
    worldbody_root = env_root.find('worldbody')
    # todo this part 
    object_to_find = 'object0'
    object_body_in_env = None
    for body in worldbody_root.iter('body'):
        if body.attrib['name'] ==  object_to_find:
            object_body_in_env = body

    if  object_body_in_env is None:
        print(f"cannot find {object_to_find} placeholder in the env file")
        raise ValueError

    # replace the object geom in the env
    for g in object_body_in_env.findall('geom'):
        object_body_in_env.remove(g)


    num_geoms = len(obj_collision_body.findall('geom'))
    for g in obj_collision_body.findall('geom'):

        if euler is not None:
            # rotate each geom
            if 'euler' not in g.attrib:
                current_euler = [0, 0, 0]
            else:
                # zyx
                current_euler = [float(a) for a in g.attrib['euler'].split(" ")][::-1]
            alpha, beta, gamma = current_euler
            dalpha, dbeta, dgamma = euler
            rot_mat = transformations.euler_matrix(alpha, beta, gamma)
            d_rot_mat = transformations.euler_matrix(dalpha, dbeta, dgamma)
            new_euler = transformations.euler_from_matrix(np.matmul(d_rot_mat, rot_mat), 'rxyz')
            euler_str = " ".join([str(a) for a in new_euler])

            g.attrib['euler'] = euler_str

        g.attrib["solref"] = "0.000002 1"
        g.attrib["solimp"] = "0.99 0.99 0.01"

        if "mass" not in g.attrib:
            g.attrib["mass"] = str(0.01/num_geoms)

        if mass is not None:
            g.attrib["mass"] = str(mass/num_geoms)
        # condim="4" mass="0.01"]

        if randomize_color:
            rgb = np.random.rand(3).tolist()
            rgb_str = " ".join([str(f) for f in rgb])
            rgb_str += " 1"
            g.attrib["rgba"] = rgb_str #"1 0 0 1"

        if not is_mesh_obj and scale is not None: # native geom object
            if 'size' not in g.attrib:
                g.attrib['size'] = "1 1 1"
            if 'pos' not in g.attrib:
                g.attrib['pos'] = "0 0 0"

            size_from_xml = [float(s) for s in  g.attrib['size'].split(" ")]
            size_from_xml = [str(s * scale) for s in size_from_xml]
            g.attrib['size'] = " ".join(size_from_xml)

            pos_from_xml = [float(s) for s in  g.attrib['pos'].split(" ")]
            pos_from_xml = [str(s * scale) for s in pos_from_xml]
            g.attrib['pos'] = " ".join(pos_from_xml)

        #g.attrib.pop("solimp")
        #g.attrib.pop("solref")

        object_body_in_env.append(g)

    num_site = len(obj_collision_body.findall('site'))
    if num_site > 0:
        for s in object_body_in_env.findall('site'):
            object_body_in_env.remove(s)
        for s in obj_collision_body.findall('site'):
            # here add the center string as the site
            object_body_in_env.append(s)

    if remove_site:
        # making them super small
        for body in worldbody_root.iter('body'):
            for s in body.findall('site'):
                s.attrib['rgba'] = "0 0 1 0"
        for site in worldbody_root.findall('site'):
            site.attrib['rgba'] = "0 0 1 0"


    # site_elem = object_body_in_env.findall('site')
    # site_elem[0].attrib['pos'] = center_str

    # add the indicator bbox object if necessary
    if add_bbox_indicator:
        bounds, center, extents = pcp_utils.np_vis.get_bbox_attribs(coords)
        # Mujoco requires half sizes of the box took me few hours to figure it out
        extents = extents / 2.0
        size = ' '.join([str(e) for e in extents])

        # create a body subelement in worldbody_element
        body_element = ET.SubElement(worldbody_root, 'body',
                name='bbox_indicator', pos='0 0 0')
        # add the geom to the body element
        ET.SubElement(body_element, 'geom', name="bbox_indicator_geom",
                type='box', contype='0', conaffinity='0',
                size=size, rgba='0 0 1 0.3')
        # finally add a free joint to the indicator
        ET.SubElement(body_element, 'joint', type='free',
                name='bbox_indicator_joint')

    indent(env_root)

    if prefix == "":
        final_xml_file =  f'fetch/generated_env.xml'
    else:

        final_xml_file =  f'fetch/{prefix}_generated_env.xml'
    env_tree.write(os.path.join(gym_xml_path,  final_xml_file))
    return final_xml_file
Пример #32
0
def compute_state_space_trajectory_and_derivatives(p_body,p_look_at,y_axis_cam_hint,dt):

    num_timesteps = len(p_body)

    #
    # compute the yaw, roll, and pitch of the quad using differential flatness
    #
    p_body_dot     = c_[ gradient(p_body[:,0],dt),     gradient(p_body[:,1],dt),     gradient(p_body[:,2],dt)]
    p_body_dot_dot = c_[ gradient(p_body_dot[:,0],dt), gradient(p_body_dot[:,1],dt), gradient(p_body_dot[:,2],dt)]

    f_thrust_world            = m*p_body_dot_dot - f_external_world.T.A
    f_thrust_world_normalized = sklearn.preprocessing.normalize(f_thrust_world)

    y_axis_body = f_thrust_world_normalized    
    z_axis_body = sklearn.preprocessing.normalize(cross(y_axis_body, p_look_at - p_body))
    x_axis_body = sklearn.preprocessing.normalize(cross(z_axis_body, y_axis_body))

    R_world_from_body = zeros((num_timesteps,4,4))
    theta_body        = zeros(num_timesteps)
    psi_body          = zeros(num_timesteps)
    phi_body          = zeros(num_timesteps)

    for ti in range(num_timesteps):

        z_axis_body_ti = c_[matrix(z_axis_body[ti]),0].T
        y_axis_body_ti = c_[matrix(y_axis_body[ti]),0].T
        x_axis_body_ti = c_[matrix(x_axis_body[ti]),0].T

        R_world_from_body_ti                  = c_[z_axis_body_ti,y_axis_body_ti,x_axis_body_ti,[0,0,0,1]]
        psi_body_ti,theta_body_ti,phi_body_ti = transformations.euler_from_matrix(R_world_from_body_ti,"ryxz")

        assert allclose(R_world_from_body_ti, transformations.euler_matrix(psi_body_ti,theta_body_ti,phi_body_ti,"ryxz"))

        R_world_from_body[ti] = R_world_from_body_ti
        theta_body[ti]        = theta_body_ti
        psi_body[ti]          = psi_body_ti
        phi_body[ti]          = phi_body_ti

    psi_body   = trigutils.compute_continuous_angle_array(psi_body)
    theta_body = trigutils.compute_continuous_angle_array(theta_body)
    phi_body   = trigutils.compute_continuous_angle_array(phi_body)

    #
    # now that we have the full orientation of the quad, compute the full orientation of the camera relative to the quad
    #
    x_axis_cam = sklearn.preprocessing.normalize( p_look_at - p_body )
    z_axis_cam = sklearn.preprocessing.normalize( cross(y_axis_cam_hint, x_axis_cam) )
    y_axis_cam = sklearn.preprocessing.normalize( cross(x_axis_cam,      z_axis_cam) )

    theta_cam  = zeros(num_timesteps)
    psi_cam    = zeros(num_timesteps)
    phi_cam    = zeros(num_timesteps)

    for ti in range(num_timesteps):

        z_axis_cam_ti = c_[matrix(z_axis_cam[ti]),0].T
        y_axis_cam_ti = c_[matrix(y_axis_cam[ti]),0].T
        x_axis_cam_ti = c_[matrix(x_axis_cam[ti]),0].T

        R_world_from_cam_ti  = matrix(c_[z_axis_cam_ti, y_axis_cam_ti, x_axis_cam_ti, [0,0,0,1]])
        R_world_from_body_ti = matrix(R_world_from_body[ti])
        R_body_from_cam_ti   = R_world_from_body_ti.T*R_world_from_cam_ti

        psi_cam_ti, theta_cam_ti, phi_cam_ti = transformations.euler_from_matrix(R_body_from_cam_ti,"ryxz")

        #
        # sanity check that world-from-body rotation matrix we compute actually
        # maps the vector [0,0,1] to the quadrotor x axis
        #
        assert allclose(c_[matrix(x_axis_body[ti]),1].T, R_world_from_body_ti*matrix([0,0,1,1]).T)

        #
        # sanity check that the world-from-camera rotation matrix we compute actually
        # maps the vector [0,0,1] to the camera x axis
        #
        assert allclose(c_[matrix(x_axis_cam[ti]),1].T, R_world_from_cam_ti*matrix([0,0,1,1]).T)

        #
        # sanity check that the world-from-body and body-from-camera rotation matrices
        # we compute actually maps the vector [0,0,1] to the camera x axis
        #        
        assert allclose(c_[matrix(x_axis_cam[ti]),1].T, R_world_from_body_ti*R_body_from_cam_ti*matrix([0,0,1,1]).T)

        theta_cam[ti] = theta_cam_ti
        psi_cam[ti]   = psi_cam_ti
        phi_cam[ti]   = phi_cam_ti

    theta_cam = trigutils.compute_continuous_angle_array(theta_cam)
    psi_cam   = trigutils.compute_continuous_angle_array(psi_cam)
    phi_cam   = trigutils.compute_continuous_angle_array(phi_cam)

    #
    # assert that we never need any camera yaw in the body frame of the quad
    #
    assert allclose(psi_cam, 0)

    #
    # compute derivatives
    #
    theta_body_dot     = gradient(theta_body,dt)
    psi_body_dot       = gradient(psi_body,dt)
    phi_body_dot       = gradient(phi_body,dt)

    theta_cam_dot      = gradient(theta_cam,dt)
    psi_cam_dot        = gradient(psi_cam,dt)
    phi_cam_dot        = gradient(phi_cam,dt)

    theta_body_dot_dot = gradient(theta_body_dot,dt)
    psi_body_dot_dot   = gradient(psi_body_dot,dt)
    phi_body_dot_dot   = gradient(phi_body_dot,dt)

    theta_cam_dot_dot  = gradient(theta_cam_dot,dt)
    psi_cam_dot_dot    = gradient(psi_cam_dot,dt)
    phi_cam_dot_dot    = gradient(phi_cam_dot,dt)

    return p_body, p_body_dot, p_body_dot_dot, theta_body, theta_body_dot, theta_body_dot_dot, psi_body, psi_body_dot, psi_body_dot_dot, phi_body, phi_body_dot, phi_body_dot_dot, theta_cam, theta_cam_dot, theta_cam_dot_dot, psi_cam, psi_cam_dot, psi_cam_dot_dot, phi_cam, phi_cam_dot, phi_cam_dot_dot
Пример #33
0
                    H_aeroRef_aeroBody = H_body_camera.dot(
                        H_aeroRef_aeroBody.dot(H_camera_body))

                # Realign heading to face north using initial compass data
                if compass_enabled == 1:
                    H_aeroRef_aeroBody = H_aeroRef_aeroBody.dot(
                        tf.euler_matrix(0, 0, heading_north_yaw, 'sxyz'))

                # Show debug messages here
                if debug_enable == 1:
                    os.system(
                        'clear'
                    )  # This helps in displaying the messages to be more readable
                    progress("DEBUG: Raw RPY[deg]: {}".format(
                        np.array(
                            tf.euler_from_matrix(H_T265Ref_T265body, 'sxyz')) *
                        180 / m.pi))
                    progress("DEBUG: NED RPY[deg]: {}".format(
                        np.array(
                            tf.euler_from_matrix(H_aeroRef_aeroBody, 'sxyz')) *
                        180 / m.pi))
                    progress("DEBUG: Raw pos xyz : {}".format(
                        np.array([
                            data.translation.x, data.translation.y,
                            data.translation.z
                        ])))
                    progress("DEBUG: NED pos xyz : {}".format(
                        np.array(
                            tf.translation_from_matrix(H_aeroRef_aeroBody))))

except Exception as e:
Пример #34
0
    def __init__(self, base_dir, sequences):
        self.truncated_seq_sizes = []
        self.end_of_sequence_indices = []

        total_num_examples = 0

        for seq in sequences:
            seq_data = pykitti.odometry(base_dir, seq, frames=range(0, 11))
            num_frames = len(seq_data.poses)

            # less than timesteps number of frames will be discarded
            num_examples = (num_frames - 1) // config.timesteps
            self.truncated_seq_sizes.append(num_examples * config.timesteps +
                                            1)
            total_num_examples += num_examples

        # less than batch size number of examples will be discarded
        examples_per_batch = total_num_examples // config.batch_size
        # +1 adjusts for the extra image in the last time step
        frames_per_batch = examples_per_batch * (config.timesteps + 1)

        # since some examples will be discarded, readjust the truncated_seq_sizes
        deleted_frames = (total_num_examples - examples_per_batch *
                          config.batch_size) * config.timesteps
        for i in range(len(self.truncated_seq_sizes) - 1, -1, -1):
            if self.truncated_seq_sizes[i] > deleted_frames:
                self.truncated_seq_sizes[i] -= deleted_frames
                break
            else:
                self.truncated_seq_sizes[i] = 0
                deleted_frames -= self.truncated_seq_sizes[i]

        # for storing all training
        self.input_frames = np.zeros([
            frames_per_batch, config.batch_size, config.input_channels,
            config.input_height, config.input_width
        ],
                                     dtype=np.uint8)
        poses = np.zeros([frames_per_batch, config.batch_size, 4, 4],
                         dtype=np.float32)

        num_image_loaded = 0
        for i_seq, seq in enumerate(sequences):
            seq_data = pykitti.odometry(base_dir, seq)
            length = self.truncated_seq_sizes[i_seq]

            for i_img in range(length):

                if i_img % 50 == 0:
                    print("Loading sequence %s %.1f%% " %
                          (seq, (i_img / length) * 100))

                i = num_image_loaded % frames_per_batch
                j = num_image_loaded // frames_per_batch

                # swap axis to channels first
                img = seq_data.get_cam0(i_img)
                img = img.resize((config.input_width, config.input_height))
                img = np.array(img)
                img = np.reshape(
                    img, [img.shape[0], img.shape[1], config.input_channels])
                img = np.moveaxis(np.array(img), 2, 0)
                pose = seq_data.poses[i_img]

                self.input_frames[i, j] = img
                poses[i, j] = pose
                num_image_loaded += 1

                print(i_img)

                # if at end of a sequence
                if i_img != 0 and i_img != length - 1 and i_img % config.timesteps == 0:
                    i = num_image_loaded % frames_per_batch
                    j = num_image_loaded // frames_per_batch
                    self.input_frames[i, j] = img
                    poses[i, j] = pose

                    # save this index, so we know where the next sequence begins
                    self.end_of_sequence_indices.append((
                        i,
                        j,
                    ))

                    num_image_loaded += 1

                    print(i_img)

                gc.collect()  # force garbage collection

        # make sure everything is fully loaded
        assert (num_image_loaded == frames_per_batch * config.batch_size)

        # now convert all the ground truth from 4x4 to xyz + quat
        self.se3_ground_truth = np.zeros(
            [frames_per_batch, config.batch_size, 7], dtype=np.float32)
        for i in range(0, self.se3_ground_truth.shape[0]):
            for j in range(0, self.se3_ground_truth.shape[1]):
                translation = transformations.translation_from_matrix(poses[i,
                                                                            j])
                quat = transformations.quaternion_from_matrix(poses[i, j])
                self.se3_ground_truth[i,
                                      j] = np.concatenate([translation, quat])

        # extract the relative motion between frames
        self.fc_ground_truth = np.zeros(
            [frames_per_batch, config.batch_size, 6], dtype=np.float32)
        # going through rows, then columns
        for i in range(0, self.fc_ground_truth.shape[0]):
            for j in range(0, self.fc_ground_truth.shape[1]):

                # always identity at the beginning of the sequence
                if i % (config.timesteps + 1) == 0:
                    m = transformations.identity_matrix()
                else:
                    m = np.linalg.inv(poses[i - 1,
                                            j]) * poses[i, j]  # double check
                translation = transformations.translation_from_matrix(m)
                ypr = transformations.euler_from_matrix(m)
                self.fc_ground_truth[i,
                                     j] = np.concatenate([translation,
                                                          ypr])  #double check
Пример #35
0
        converged = np.all(np.logical_or(dist <= tol, dist >= max_dist))
        i += 1
    return dist


tsdf, rgb, depth, pose = get_scene_data()
# rgb = np.array(rgb)[::skip, ::skip]
depth = np.array(depth)
ray_depth = np.reshape(depth[-1::-skip, ::skip].T, (-1,)) / 1000
upper_depth = 4
ray_depth[ray_depth > upper_depth] = upper_depth

print('mid depth: %d' % depth[depth.shape[0] // 2, depth.shape[1] // 2])
# print(euler_from_matrix(pose))
# print(t)
angles = list(euler_from_matrix(pose))
print('angles: %s' % str(angles))
print('t: %s' % str(pose[:3, 3]))
# angles[1] *= -1
# pose[:3, :3] = euler_matrix(*angles)[:3, :3]
rays0 = get_rays()
s = 8
i, j, k = np.where(tsdf.data[::s, ::s, ::s] < 0)
i *= s
j *= s
k *= s
ijk = np.stack((i, j, k), axis=-1)
xyz = tsdf.ijk_to_xyz(ijk)

R = pose[:3, :3]
t = pose[:3, 3]
Пример #36
0
 def getRotation(self):
     qw,qx,qy,qz = tm.quaternion_from_matrix(self.matrixPose)
     ax,ay,az = tm.euler_from_matrix(self.matrixPose, axes='sxyz')
     return (1000*qw,1000*qx,1000*qy,1000*qz, ax/D,ay/D,az/D)
Пример #37
0
def writeAnimationBone(fp, bone, anim, config):
    aname = "Anim_%s_%s" % (anim.name, goodBoneName(bone.name))

    fp.write(
        '    <animation id="%s_pose_matrix">\n' % aname +
        '      <source id="%s_pose_matrix-input">\n' % aname +
        '        <float_array id="%s_pose_matrix-input-array" count="%d">' % (aname, anim.nFrames))

    # TIME POINTS
    timepoints = np.asarray(list(range(anim.nFrames)), dtype=np.float32) * (1.0/anim.frameRate)
    fp.write(' '.join(["%g" % t for t in timepoints]))

    fp.write(
        '</float_array>\n' +
        '        <technique_common>\n' +
        '          <accessor source="#%s_pose_matrix-input-array" count="%d" stride="1">\n' % (aname, anim.nFrames) +
        '            <param name="TIME" type="float"/>\n' +
        '          </accessor>\n' +
        '        </technique_common>\n' +
        '      </source>\n' +
        '      <source id="%s_pose_matrix-output">\n' % aname +
        '        <float_array id="%s_pose_matrix-output-array" count="%d">\n' % (aname, 16*anim.nFrames))

    string = '          '
    relmat = bone.getRelativeMatrix(config.meshOrientation, config.localBoneAxis, config.offset)
    restmat = bone.getRestMatrix(config.meshOrientation, config.localBoneAxis, config.offset)
    #print bone.index, anim.nBones
    mats = anim.data[bone.index::anim.nBones]  # Get all pose matrices for this bone
    I = np.identity(4, dtype=np.float32)
    parents = []
    bone_ = bone
    while bone_.parent:
        relmat = bone_.parent.getRelativeMatrix(config.meshOrientation, config.localBoneAxis, config.offset)
        parents.append((bone_.parent.index, relmat))
        bone_ = bone_.parent
    for f_idx, mat0 in enumerate(mats):
        # TODO poses currently only work for default local bone axis
        #I[:3,:4] = mat0[:3,:4]
        #relmat[:3,:3] = np.identity(3, dtype=np.float32)
        #if 'clavicle' in bone.name or 'shoulder' in bone.name:
        #if bone.level < 6:
        #    I = np.identity(4, dtype=np.float32)
        #mat = np.dot(relmat, I)
        '''
        mat = I.copy()
        for b_idx in reversed(parents):
            I[:3,:4] = anim.data[f_idx * anim.nBones + b_idx]
            #mat = np.dot(np.dot(relmat, I), mat)
            mat = np.dot(mat,la.inv(I))
        mat = np.dot(relmat, mat)
        '''

        mat = np.identity(4, dtype=np.float32)
        for b_idx, rel in reversed(parents):
            #rel = parent.getRelativeMatrix(config.meshOrientation, config.localBoneAxis, config.offset)
            #b_idx = parent.index
            I[:3,:4] = anim.data[f_idx * anim.nBones + b_idx]
            #mat = np.dot(la.inv(rel), np.dot(mat, np.dot(rel, I)))
            #mat = np.dot(mat, I)
            mat = np.dot(I, mat)

        I[:3,:4] = mat0[:3,:4]
        mat = np.dot(mat, np.dot(relmat, I))

        '''
        if self.parent:
            self.matPoseGlobal = np.dot(self.parent.matPoseGlobal, np.dot(self.matRestRelative, self.matPose))
        else:
            self.matPoseGlobal = np.dot(self.matRestRelative, self.matPose)
        '''

        '''
        # R * K * iR * R_dae = M
        mat = np.dot(la.inv(relmat), restmat)
        mat = np.dot(I, mat)
        mat = np.dot(relmat, I)
        '''

        string += ''.join(['%g %g %g %g  ' % tuple(mat[i,:]) for i in range(4)])
        string += '\n          '
    fp.write(string)
    def _to_degrees(rad):
        return 180*rad/3.14
    I = np.identity(4, dtype=np.float32)
    I[:3,:3] = mats[0,:3,:3]
    print(bone.name, list(map(_to_degrees, tm.euler_from_matrix(I))))

    fp.write(
        '</float_array>\n' +
        '        <technique_common>\n' +
        '          <accessor source="#%s_pose_matrix-output-array" count="%d" stride="16">\n' % (aname, anim.nFrames) +
        '            <param name="TRANSFORM" type="float4x4"/>\n' +
        '          </accessor>\n' +
        '        </technique_common>\n' +
        '      </source>\n' +
        '      <source id="%s_pose_matrix-interpolation">\n' % aname +
        '        <Name_array id="%s_pose_matrix-interpolation-array" count="%d">' % (aname, anim.nFrames))

    fp.write(anim.nFrames * 'LINEAR ')

    fp.write(
        '</Name_array>\n' +
        '        <technique_common>\n' +
        '          <accessor source="#%s_pose_matrix-interpolation-array" count="%d" stride="1">\n' % (aname, anim.nFrames) +
        '            <param name="INTERPOLATION" type="name"/>\n' +
        '          </accessor>\n' +
        '        </technique_common>\n' +
        '      </source>\n' +
        '      <sampler id="%s_pose_matrix-sampler">\n' % aname +
        '        <input semantic="INPUT" source="#%s_pose_matrix-input"/>\n' % aname +
        '        <input semantic="OUTPUT" source="#%s_pose_matrix-output"/>\n' % aname +
        '        <input semantic="INTERPOLATION" source="#%s_pose_matrix-interpolation"/>\n' % aname +
        '      </sampler>\n' +
        '      <channel source="#%s_pose_matrix-sampler" target="%s/transform"/>\n' % (aname, goodBoneName(bone.name)) +
        '    </animation>\n')
Пример #38
0
        (result, rvec, tvec) \
            = cv2.solvePnP(np.float32(new_ned_list), np.float32(new_uv_list),
                           proj.cam.get_K(scale), None,
                           rvec, tvec, useExtrinsicGuess=True)
        Rned2cam, jac = cv2.Rodrigues(rvec)
        pos = -np.matrix(Rned2cam[:3, :3]).T * np.matrix(tvec)
        newned = pos.T[0].tolist()[0]

        # Our Rcam matrix (in our ned coordinate system) is body2cam * Rned,
        # so solvePnP returns this combination.  We can extract Rned by
        # premultiplying by cam2body aka inv(body2cam).
        cam2body = new_image.get_cam2body()
        Rned2body = cam2body.dot(Rned2cam)
        Rbody2ned = np.matrix(Rned2body).T
        (yaw, pitch,
         roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')

        print "original pose:", new_image.get_camera_pose()
        #print "original pose:", proj.image_list[30].get_camera_pose()
        new_image.set_camera_pose_sba(ned=newned,
                                      ypr=[yaw * r2d, pitch * r2d, roll * r2d])
        new_image.save_meta()
        print "solvepnp() pose:", new_image.get_camera_pose_sba()

        print 'Image placed:', new_image.name

    # final triangulation of all match coordinates
    # print 'Performing final complete group triangulation'
    # my_triangulate(matches_group, placed_images, min_vectors=2)

    # write out the updated matches_group file as matches_sba
    th = np.deg2rad(30.0)
    target.transform(
        np.array([[np.cos(th), -np.sin(th), 0.0, 0.0],
                  [np.sin(th), np.cos(th), 0.0, 0.0], [0.0, 0.0, 1.0, 0.0],
                  [0.0, 0.0, 0.0, 1.0]]))
    source = o3.voxel_down_sample(source, voxel_size=0.6)
    target = o3.voxel_down_sample(target, voxel_size=0.6)

    # source = o3.voxel_down_sample(source, voxel_size=0.005)
    # target = o3.voxel_down_sample(target, voxel_size=0.005)

    # compute cpd registration
    #tf_param, _ = gmmtree.registration_gmmtree(source, target)

    #tf_param = registration_svr(source, target, callbacks=[])
    tf_param = registration_gmmreg(source, target, callbacks=[])

    rot = trans.identity_matrix()
    rot[:3, :3] = tf_param.rot
    print("result: ", np.rad2deg(trans.euler_from_matrix(rot)), tf_param.scale,
          tf_param.t)

    result = copy.deepcopy(source)
    result.points = tf_param.transform(result.points)

    # draw result
    source.paint_uniform_color([1, 0, 0])
    target.paint_uniform_color([0, 1, 0])
    result.paint_uniform_color([0, 0, 1])
    o3.draw_geometries([source, target, result])
    #o3.draw_geometries([pcd, pcd2])
Пример #40
0
    error = odom - gt

    dx = [x[3] for x in error]
    dy = [x[7] for x in error]
    dz = [x[11] for x in error]
    error_odom = np.eye(len(odom), 6)
    error_s2s = np.eye(len(odom), 6)
    for i in range(1, len(odom)):
        RT_odom = np.mat(
            ((odom[i][0], odom[i][1], odom[i][2], odom[i][3]),
             (odom[i][4], odom[i][5], odom[i][6], odom[i][7]),
             (odom[i][8], odom[i][9], odom[i][10], odom[i][11]), (0, 0, 0, 1)),
            dtype=np.float64)
        x_odom, y_odom, z_odom = transformations.translation_from_matrix(
            RT_odom)
        al_odom, be_odom, ga_odom = transformations.euler_from_matrix(
            RT_odom, 'szyx')

        RT_gt = np.mat(
            ((gt[i][0], gt[i][1], gt[i][2], gt[i][3]),
             (gt[i][4], gt[i][5], gt[i][6], gt[i][7]),
             (gt[i][8], gt[i][9], gt[i][10], gt[i][11]), (0, 0, 0, 1)),
            dtype=np.float64)

        x_gt, y_gt, z_gt = transformations.translation_from_matrix(RT_gt)
        al_gt, be_gt, ga_gt = transformations.euler_from_matrix(RT_gt, 'szyx')
        error_odom[i, 0:6] = [
            x_odom - x_gt, y_odom - y_gt, z_odom - z_gt, al_odom - al_gt,
            be_odom - be_gt, ga_odom - ga_gt
        ]
        #print error_odom[i,0:6]
Пример #41
0
    def __init__(self, cfg, base_dir, sequences, frames=None):
        self.cfg = cfg
        self.sequences = sequences
        self.curr_batch_sequence = 0
        self.current_batch = 0

        if (self.cfg.bidir_aug == True) and (self.cfg.batch_size % 2 != 0):
            raise ValueError("Batch size must be even")

        if self.cfg.data_type != "cam" and self.cfg.data_type != "lidar":
            raise ValueError("lidar or camera for data type!")

        frames_data_type = np.uint8
        if self.cfg.data_type == "lidar":
            frames_data_type = np.float16

        if not frames:
            frames = [None] * len(sequences)

        self.input_frames = {}
        self.poses = {}

        # Mirror in y-z plane
        self.H = np.identity(3, dtype=np.float32)
        self.H[1][1] = -1.0

        self.se3_ground_truth = {}
        self.se3_mirror_ground_truth = {}
        self.fc_ground_truth = {}
        self.fc_reverse_ground_truth = {}
        self.fc_mirror_ground_truth = {}
        self.fc_reverse_mirror_ground_truth = {}

        self.imu_measurements = {}
        self.imu_measurements_mirror = {}
        self.imu_measurements_reverse = {}
        self.imu_measurements_reverse_mirror = {}

        # This keeps track of the number of batches in each sequence
        self.batch_counts = {}
        # this holds the batch ids for each sequence. It is randomized and the order determines the order in
        # which the batches are used
        self.batch_order = {}
        # this keeps track of the starting offsets in the input frames for batch id 0 in each sequence
        self.batch_offsets = {}
        # contains batch_cnt counters, each sequence is represented by it's index repeated for
        # however many batches are in that sequence
        self.sequence_ordering = None
        # contains batch_size counters. Keeps track of how many batches from each sequence
        # have already been used for the current epoch
        self.sequence_batch = {}

        self.batch_cnt = 0
        self.total_frames = 0

        for i_seq, seq in enumerate(sequences):
            seq_loader = DataLoader(self.cfg, base_dir, seq, frames=frames[i_seq])
            num_frames = seq_loader.get_num_frames()

            self.input_frames[seq] = np.zeros(
                    [num_frames, self.cfg.input_channels, self.cfg.input_height, self.cfg.input_width],
                    dtype=frames_data_type)
            self.poses[seq] = seq_loader.get_poses_in_corresponding_frame()
            self.se3_ground_truth[seq] = np.zeros([num_frames, 7], dtype=np.float32)
            self.se3_mirror_ground_truth[seq] = np.zeros([num_frames, 7], dtype=np.float32)
            self.fc_ground_truth[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.fc_reverse_ground_truth[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.fc_mirror_ground_truth[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.fc_reverse_mirror_ground_truth[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)

            self.imu_measurements[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.imu_measurements_mirror[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.imu_measurements_reverse[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)
            self.imu_measurements_reverse_mirror[seq] = np.zeros([num_frames - 1, 6], dtype=np.float32)

            for i_img in range(num_frames):
                if i_img % 100 == 0:
                    tools.printf("Loading sequence %s %.1f%% " % (seq, (i_img / num_frames) * 100))

                img = seq_loader.get_img(i_img)
                self.input_frames[seq][i_img, :] = img

                # now convert all the ground truth from 4x4 to xyz + quat, this is after the SE3 layer
                translation = transformations.translation_from_matrix(self.poses[seq][i_img])
                quat = transformations.quaternion_from_matrix(self.poses[seq][i_img])

                mirror_pose = np.identity(4, dtype=np.float32)
                mirror_pose[0:3, 0:3] = np.dot(self.H, np.dot(self.poses[seq][i_img][0:3, 0:3], self.H))
                mirror_pose[0:3, 3] = np.dot(self.H, translation)

                mirror_quat = transformations.quaternion_from_matrix(mirror_pose[0:3, 0:3])
                self.se3_ground_truth[seq][i_img] = np.concatenate([translation, quat])
                self.se3_mirror_ground_truth[seq][i_img] = np.concatenate([mirror_pose[0:3, 3], mirror_quat])

                # relative transformation labels
                if i_img + 1 < num_frames:
                    mirror_pose_next = np.identity(4, dtype=np.float32)
                    mirror_pose_next[0:3, 0:3] = np.dot(self.H, np.dot(self.poses[seq][i_img + 1][0:3, 0:3], self.H))
                    trans_next = transformations.translation_from_matrix(self.poses[seq][i_img + 1])
                    mirror_pose_next[0:3, 3] = np.dot(self.H, trans_next)

                    m_forward = np.dot(np.linalg.inv(self.poses[seq][i_img]), self.poses[seq][i_img + 1])
                    m_forward_mirror = np.dot(np.linalg.inv(mirror_pose), mirror_pose_next)
                    m_reverse = np.dot(np.linalg.inv(self.poses[seq][i_img + 1]), self.poses[seq][i_img])
                    m_reverse_mirror = np.dot(np.linalg.inv(mirror_pose_next), mirror_pose)

                    trans_forward = transformations.translation_from_matrix(m_forward)
                    ypr_forward = transformations.euler_from_matrix(m_forward, axes="rzyx")
                    trans_forward_mirror = transformations.translation_from_matrix(m_forward_mirror)
                    ypr_forward_mirror = transformations.euler_from_matrix(m_forward_mirror, axes="rzyx")
                    trans_reverse = transformations.translation_from_matrix(m_reverse)
                    ypr_reverse = transformations.euler_from_matrix(m_reverse, axes="rzyx")
                    trans_reverse_mirror = transformations.translation_from_matrix(m_reverse_mirror)
                    ypr_reverse_mirror = transformations.euler_from_matrix(m_reverse_mirror, axes="rzyx")

                    self.fc_ground_truth[seq][i_img] = np.concatenate([trans_forward, ypr_forward])
                    self.fc_mirror_ground_truth[seq][i_img] = np.concatenate([trans_forward_mirror, ypr_forward_mirror])
                    self.fc_reverse_ground_truth[seq][i_img] = np.concatenate([trans_reverse, ypr_reverse])
                    self.fc_reverse_mirror_ground_truth[seq][i_img] = np.concatenate(
                            [trans_reverse_mirror, ypr_reverse_mirror])

                    get_imu = seq_loader.get_averaged_imu_in_corresponding_frame
                    self.imu_measurements[seq][i_img] = get_imu(i_img, mirror=False, reverse=False)
                    self.imu_measurements_mirror[seq][i_img] = get_imu(i_img, mirror=True, reverse=False)
                    self.imu_measurements_reverse[seq][i_img] = get_imu(i_img, mirror=False, reverse=True)
                    self.imu_measurements_reverse_mirror[seq][i_img] = get_imu(i_img, mirror=True, reverse=True)

            # How many examples the sequence contains, were it to be processed using a batch size of 1
            sequence_examples = np.ceil((num_frames - self.cfg.timesteps) / self.cfg.sequence_stride).astype(
                    np.int32)
            # how many batches in the sequence, cutting off any extra
            if self.cfg.bidir_aug:
                self.batch_counts[seq] = np.floor(2 * sequence_examples / self.cfg.batch_size).astype(np.int32)
            else:
                self.batch_counts[seq] = np.floor(sequence_examples / self.cfg.batch_size).astype(np.int32)

            self.batch_cnt += self.batch_counts[seq].astype(np.int32)
            self.batch_order[seq] = np.arange(0, self.batch_counts[seq], dtype=np.uint32)
            self.batch_offsets[seq] = np.zeros([self.cfg.batch_size], dtype=np.uint32)
            self.sequence_batch[seq] = 0

            self.total_frames += num_frames

        tools.printf("All data loaded, batch_size=%d, timesteps=%d, num_batches=%d" % (
            self.cfg.batch_size, self.cfg.timesteps, self.batch_cnt))

        gc.collect()
        self.sequence_ordering = np.zeros([self.batch_cnt], dtype=np.uint16)
        self.set_batch_offsets()
        self.next_epoch(False)
Пример #42
0
    def __init__(self, config, base_dir, sequences, frames=None):
        self.truncated_seq_sizes = []
        self.end_of_sequence_indices = []
        self.curr_batch_idx = 0
        self.unused_batch_indices = []
        self.cfg = config

        if not frames:
            frames = [None] * len(sequences)

        total_num_examples = 0

        for i_seq, seq in enumerate(sequences):
            seq_data = pykitti.odometry(base_dir, seq, frames=frames[i_seq])
            num_frames = len(seq_data.poses)

            # less than timesteps number of frames will be discarded
            num_examples = (num_frames - 1) // self.cfg.timesteps
            self.truncated_seq_sizes.append(num_examples * self.cfg.timesteps +
                                            1)
            total_num_examples += num_examples

        # less than batch size number of examples will be discarded
        self.total_batch_count = total_num_examples // self.cfg.batch_size
        # +1 adjusts for the extra image in the last time step
        total_timesteps = self.total_batch_count * (self.cfg.timesteps + 1)

        # since some examples will be discarded, readjust the truncated_seq_sizes
        deleted_frames = (total_num_examples - self.total_batch_count *
                          self.cfg.batch_size) * self.cfg.timesteps
        for i in range(len(self.truncated_seq_sizes) - 1, -1, -1):
            if self.truncated_seq_sizes[i] > deleted_frames:
                self.truncated_seq_sizes[i] -= deleted_frames
                break
            else:
                self.truncated_seq_sizes[i] = 0
                deleted_frames -= self.truncated_seq_sizes[i]

        # for storing all training
        self.input_frames = np.zeros([
            total_timesteps, self.cfg.batch_size, self.cfg.input_channels,
            self.cfg.input_height, self.cfg.input_width
        ],
                                     dtype=np.uint8)
        poses_wrt_g = np.zeros([total_timesteps, self.cfg.batch_size, 4, 4],
                               dtype=np.float32)  # ground truth poses

        num_image_loaded = 0
        for i_seq, seq in enumerate(sequences):
            seq_data = pykitti.odometry(base_dir, seq, frames=frames[i_seq])
            length = self.truncated_seq_sizes[i_seq]

            i = -1
            j = -1
            for i_img in range(length):

                if i_img % 100 == 0:
                    tools.printf("Loading sequence %s %.1f%% " %
                                 (seq, (i_img / length) * 100))

                i = num_image_loaded % total_timesteps
                j = num_image_loaded // total_timesteps

                # swap axis to channels first
                img = seq_data.get_cam0(i_img)
                img = img.resize((self.cfg.input_width, self.cfg.input_height))
                img = np.array(img)
                img = np.reshape(
                    img, [img.shape[0], img.shape[1], self.cfg.input_channels])
                img = np.moveaxis(np.array(img), 2, 0)
                pose = seq_data.poses[i_img]

                self.input_frames[i, j] = img
                poses_wrt_g[i, j] = pose
                num_image_loaded += 1

                # insert the extra image at the end of the batch, note the number of
                # frames per batch of batch size 1 is timesteps + 1
                if i_img != 0 and i_img != length - 1 and i_img % self.cfg.timesteps == 0:
                    i = num_image_loaded % total_timesteps
                    j = num_image_loaded // total_timesteps
                    self.input_frames[i, j] = img
                    poses_wrt_g[i, j] = pose

                    num_image_loaded += 1

            # If the batch has the last frame in a sequence, the following frame
            # in the next batch must have a reset for lstm state
            self.end_of_sequence_indices.append((
                i + 1,
                j,
            ))

        # make sure the all of examples are fully loaded, just to detect bugs
        assert (num_image_loaded == total_timesteps * self.cfg.batch_size)

        # now convert all the ground truth from 4x4 to xyz + quat, this is after the SE3 layer
        self.se3_ground_truth = np.zeros(
            [total_timesteps, self.cfg.batch_size, 7], dtype=np.float32)
        for i in range(0, self.se3_ground_truth.shape[0]):
            for j in range(0, self.se3_ground_truth.shape[1]):
                translation = transformations.translation_from_matrix(
                    poses_wrt_g[i, j])
                quat = transformations.quaternion_from_matrix(poses_wrt_g[i,
                                                                          j])
                self.se3_ground_truth[i,
                                      j] = np.concatenate([translation, quat])

        # extract the relative transformation between frames after the fully connected layer
        self.fc_ground_truth = np.zeros(
            [total_timesteps, self.cfg.batch_size, 6], dtype=np.float32)
        # going through rows, then columns
        for i in range(0, self.fc_ground_truth.shape[0]):
            for j in range(0, self.fc_ground_truth.shape[1]):

                # always identity at the beginning of the sequence
                if i % (self.cfg.timesteps + 1) == 0:
                    m = transformations.identity_matrix()
                else:
                    m = np.dot(np.linalg.inv(poses_wrt_g[i - 1, j]),
                               poses_wrt_g[i, j])  # double check

                translation = transformations.translation_from_matrix(m)
                ypr = transformations.euler_from_matrix(m, axes="rzyx")
                assert (np.all(np.abs(ypr) < np.pi))
                self.fc_ground_truth[i,
                                     j] = np.concatenate([translation,
                                                          ypr])  # double check

        tools.printf(
            "All data loaded, batch_size=%d, timesteps=%d, num_batches=%d" %
            (self.cfg.batch_size, self.cfg.timesteps, self.total_batch_count))

        self.next_epoch()
        gc.collect()  # force garbage collection
Пример #43
0
    def fromSkeleton(self, skel, animationTrack=None):
        """
        Construct a BVH object from a skeleton structure and optionally an 
        animation track. If no animation track is specified, a dummy animation
        of one frame will be added.
        NOTE: Make sure that the skeleton has only one root.
        """

        # Traverse skeleton joints in depth-first order
        for jointName in skel.getJointNames():
            bone = skel.getBone(jointName)
            if bone.parent:
                joint = self.addJoint(bone.parent.name, jointName)
                joint.channels = ["Zrotation", "Xrotation", "Yrotation"]
            else:
                joint = self.addRootJoint(bone.name)
                joint.channels = [
                    "Xposition", "Yposition", "Zposition", "Zrotation",
                    "Xrotation", "Yrotation"
                ]

            offset = bone.getRestOffset()
            self.__calcPosition(joint, offset)
            if not bone.hasChildren():
                endJoint = self.addJoint(jointName, 'End effector')
                offset = bone.getRestTailPos() - bone.getRestHeadPos()
                self.__calcPosition(endJoint, offset)

        self.__cacheGetJoints()
        nonEndJoints = [
            joint for joint in self.getJoints() if not joint.isEndConnector()
        ]

        if animationTrack:
            self.frameCount = animationTrack.nFrames
            self.frameTime = 1.0 / animationTrack.frameRate

            jointToBoneIdx = {}
            for joint in nonEndJoints:
                jointToBoneIdx[joint.name] = skel.getBone(joint.name).index

            for fIdx in xrange(animationTrack.nFrames):
                offset = fIdx * animationTrack.nBones
                for jIdx, joint in enumerate(nonEndJoints):
                    bIdx = jointToBoneIdx[joint.name]
                    poseMat = animationTrack.data[offset + bIdx]
                    if len(joint.channels) == 6:
                        # Add transformation
                        tx, ty, tz = poseMat[:3, 3]
                        joint.frames.extend([tx, ty, tz])
                    ay, ax, az = tm.euler_from_matrix(poseMat, "syxz")
                    joint.frames.extend([az / D, ax / D, ay / D])
        else:
            # Add bogus animation with one frame
            self.frameCount = 1
            self.frameTime = 1.0
            for jIdx, joint in enumerate(nonEndJoints):
                if len(joint.channels) == 6:
                    # Add transformation
                    joint.frames.extend([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
                else:
                    joint.frames.extend([0.0, 0.0, 0.0])

        for joint in self.getJoints():
            joint.calculateFrames()
Пример #44
0
import numpy as np
import transformations as trans
from probreg import filterreg
from probreg import callbacks
import utils

source, target = utils.prepare_source_and_target_rigid_3d('bunny.pcd')

cbs = [callbacks.Open3dVisualizerCallback(source, target)]
objective_type = 'pt2pt'
tf_param, _, _ = filterreg.registration_filterreg(source, target, target.normals,
                                                  objective_type=objective_type,
                                                  sigma2=None,
                                                  callbacks=cbs)
rot = trans.identity_matrix()
rot[:3, :3] = tf_param.rot
print("result: ", np.rad2deg(trans.euler_from_matrix(rot)),
      tf_param.scale, tf_param.t)
Пример #45
0
                if body_offset_enabled == 1:
                    H_body_camera = tf.euler_matrix(0, 0, 0, 'sxyz')
                    H_body_camera[0][3] = body_offset_x
                    H_body_camera[1][3] = body_offset_y
                    H_body_camera[2][3] = body_offset_z
                    H_camera_body = np.linalg.inv(H_body_camera)
                    H_aeroRef_aeroBody = H_body_camera.dot(H_aeroRef_aeroBody.dot(H_camera_body))

                # Realign heading to face north using initial compass data
                if compass_enabled == 1 and heading_north_yaw not None:
                    H_aeroRef_aeroBody = H_aeroRef_aeroBody.dot( tf.euler_matrix(0, 0, heading_north_yaw, 'sxyz'))

                # Show debug messages here
                if debug_enable == 1:
                    os.system('clear') # This helps in displaying the messages to be more readable
                    utils.progress("DEBUG: Raw RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_T265Ref_T265body, 'sxyz')) * 180 / m.pi))
                    utils.progress("DEBUG: NED RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_aeroRef_aeroBody, 'sxyz')) * 180 / m.pi))
                    utils.progress("DEBUG: Raw pos xyz : {}".format( np.array( [data.translation.x, data.translation.y, data.translation.z])))
                    utils.progress("DEBUG: NED pos xyz : {}".format( np.array( tf.translation_from_matrix( H_aeroRef_aeroBody))))

                if save_file:
                    timestamp = str(time.time())
                    s1 = "[" + timestamp + "] Raw RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_T265Ref_T265body, 'sxyz')) * 180 / m.pi)
                    s2 = "[" + timestamp + "] NED RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_aeroRef_aeroBody, 'sxyz')) * 180 / m.pi)
                    s3 = "[" + timestamp + "] Raw pos xyz : {}".format( np.array( [data.translation.x, data.translation.y, data.translation.z]))
                    s4 = "[" + timestamp + "] NED pos xyz : {}".format( np.array( tf.translation_from_matrix( H_aeroRef_aeroBody)))
                    save_file.write(s1+"\n")
                    save_file.write(s2+"\n")
                    save_file.write(s3+"\n")
                    save_file.write(s4+"\n")
Пример #46
0
 def tfm_to_state(tfm):
     euler = tf.euler_from_matrix(tfm)
     return [tfm[0, 3], tfm[1, 3], tfm[2, 3], euler[0], euler[1], euler[2]]
def compute_relative_error(p_es,
                           q_es,
                           p_gt,
                           q_gt,
                           T_cm,
                           dist,
                           max_dist_diff,
                           accum_distances=[],
                           scale=1.0):

    if len(accum_distances) == 0:
        accum_distances = tu.get_distance_from_start(p_gt)
    comparisons = tu.compute_comparison_indices_length(accum_distances, dist,
                                                       max_dist_diff)

    n_samples = len(comparisons)
    print('number of samples = {0} '.format(n_samples))
    if n_samples < 2:
        print("Too few samples! Will not compute.")
        return np.array([]), np.array([]), np.array([]), np.array([]), np.array([]),\
            np.array([]), np.array([])

    T_mc = np.linalg.inv(T_cm)
    errors = []
    for idx, c in enumerate(comparisons):
        if not c == -1:
            T_c1 = tu.get_rigid_body_trafo(q_es[idx, :], p_es[idx, :])
            T_c2 = tu.get_rigid_body_trafo(q_es[c, :], p_es[c, :])
            T_c1_c2 = np.dot(np.linalg.inv(T_c1), T_c2)
            T_c1_c2[:3, 3] *= scale

            T_m1 = tu.get_rigid_body_trafo(q_gt[idx, :], p_gt[idx, :])
            T_m2 = tu.get_rigid_body_trafo(q_gt[c, :], p_gt[c, :])
            T_m1_m2 = np.dot(np.linalg.inv(T_m1), T_m2)

            T_m1_m2_in_c1 = np.dot(T_cm, np.dot(T_m1_m2, T_mc))
            T_error_in_c2 = np.dot(np.linalg.inv(T_m1_m2_in_c1), T_c1_c2)
            T_c2_rot = np.eye(4)
            T_c2_rot[0:3, 0:3] = T_c2[0:3, 0:3]
            T_error_in_w = np.dot(
                T_c2_rot, np.dot(T_error_in_c2, np.linalg.inv(T_c2_rot)))
            errors.append(T_error_in_w)

    error_trans_norm = []
    error_trans_perc = []
    error_yaw = []
    error_gravity = []
    e_rot = []
    e_rot_deg_per_m = []
    for e in errors:
        tn = np.linalg.norm(e[0:3, 3])
        error_trans_norm.append(tn)
        error_trans_perc.append(tn / dist * 100)
        ypr_angles = tf.euler_from_matrix(e, 'rzyx')
        e_rot.append(tu.compute_angle(e))
        error_yaw.append(abs(ypr_angles[0]) * 180.0 / np.pi)
        error_gravity.append(
            np.sqrt(ypr_angles[1]**2 + ypr_angles[2]**2) * 180.0 / np.pi)
        e_rot_deg_per_m.append(e_rot[-1] / dist)
    return errors, np.array(error_trans_norm), np.array(error_trans_perc),\
        np.array(error_yaw), np.array(error_gravity), np.array(e_rot),\
        np.array(e_rot_deg_per_m)
def CCD_error(window_angular_error, window_range_error, angular_error, mechanical_error, distance, iterations):
    
#Initializing the arrays for the 6-DOF values    
    X_CCD = []
    Y_CCD = []
    Z_CCD = []
    Roll_CCD = []
    Pitch_CCD = []
    Yaw_CCD = []
    CCD_centroid = np.zeros((3,1)) 
    
   
    
    for i in range(iterations):
        
        
#the cartesian cordinates of targets in the CCD frame is known
        CCD_target_1 = np.matrix([[0.034125],[0.039125],[0],[1]])
        CCD_target_2 = np.matrix([[-0.034125],[0.039125],[0],[1]])
        CCD_target_3 = np.matrix([[-0.034125],[-0.039125],[0],[1]])
        CCD_target_4 = np.matrix([[0.034125],[-0.039125],[0],[1]])

        Targets_CCD_frame = np.concatenate((CCD_target_1, CCD_target_2, CCD_target_3, CCD_target_4), axis = 1)
        
        

#transformation matrix for converting the CCD frame to the LT frame
        CCD_to_LT = np.matrix([[1,0,0,1.3],[0,1,0,1.2],[0,0,1,distance],[0,0,0,1]])

        Homogeneous_modelled_points = (CCD_to_LT * Targets_CCD_frame).T

#removing the fourth dimension for ease of calculations
        modelled_points = np.delete(Homogeneous_modelled_points, 3, 1)
    
        LT_err_CCD = un.LT_uncertainities(modelled_points, 1).T
        Window_err_CCD = un.Window_uncertainities(modelled_points, window_range_error, window_angular_error, window_angular_error)
        Additional_err = un.Additional_ang_uncertatinities(modelled_points, angular_error, angular_error)
        Mechanical_err = un.Mechanical_uncertainities(modelled_points, mechanical_error, mechanical_error, mechanical_error)

#Adding mechanical uncertainities of mounting the targets
        modelled_points_1 = modelled_points + Mechanical_err

#Converting the modelled points into Laser tracker's spherical coordinate system
        spherical_points = cartesian_to_spherical(modelled_points_1)

#Adding the uncertainities from the Laser Tracker
        #spherical_points = spherical_points + LT_err_CCD

#Adding uncertainities from the window
        spherical_points = spherical_points + Window_err_CCD 

#Adding additional angular uncertainities
        spherical_points = spherical_points + Additional_err

#Converting back to the cartesian coordinate system
        cartesian_points = spherical_to_cartesian(spherical_points)
        
        Homogeneous_transform = t.superimposition_matrix(modelled_points, cartesian_points, usesvd=True)
        Rotation = np.matrix(Homogeneous_transform[0:3, 0:3])
        Translation = np.matrix(Homogeneous_transform[0:3,3]).T


        #[Rotation, Translation] = best_fit(cartesian_points, modelled_points, scaling)

        n = modelled_points.shape[0]

#modelled_points = modelled_points.T     
        fit_points = (Rotation * modelled_points.T) + np.tile(Translation, (1, n))

#Finding centroid of the fitted point cloud
        fit_point_centroid = np.mean(fit_points, axis = 1)
        
        CCD_centroid = np.append(CCD_centroid, fit_point_centroid, 1)
       
        
#Finding centroid of the modelled point cloud
        modelled_points_centroid = np.mean(modelled_points, axis = 0).T

#calculating homogeneous transformation function 
        #Homogeneous_transform = np.zeros((4,4))
        #Homogeneous_transform[:3,:3] = Rotation
        #Homogeneous_transform[0,3] = Translation[0]
        #Homogeneous_transform[1,3] = Translation[1]
        #Homogeneous_transform[2,3] = Translation[2]
        #Homogeneous_transform[3,3] = 1
        
#Finding the euler angles from the homogeneous transformation matrix
        euler_angles_CCD = np.matrix(t.euler_from_matrix(Homogeneous_transform)).T
        
        for i in range(3):
            euler_angles_CCD[i,0] = m.degrees(euler_angles_CCD[i,0]) * 3600

#Appending strings for 6-DOF values in each iteration
        X_CCD.append((Translation[0,0])*1e6)
        Y_CCD.append((Translation[1,0])*1e6)
        Z_CCD.append((Translation[2,0])*1e6)
        Roll_CCD.append(euler_angles_CCD[0,0])
        Pitch_CCD.append(euler_angles_CCD[1,0])
        Yaw_CCD.append(euler_angles_CCD[2,0])
        
#calculating STD of each 6-DOF strings to find the average over the number of iterations
    X_CCD = np.std(X_CCD)
    Y_CCD = np.std(Y_CCD)
    Z_CCD = np.std(Z_CCD)
    Roll_CCD = np.std(Roll_CCD)
    Pitch_CCD = np.std(Pitch_CCD)
    Yaw_CCD = np.std(Yaw_CCD)
    
    CCD_centroid = np.delete(CCD_centroid, 0, 1)
    CCD_centroid_STD = np.std(CCD_centroid, axis = 1)
    CCD_centroid = np.mean(CCD_centroid, axis = 1)

    return X_CCD, Y_CCD, Z_CCD, Roll_CCD, Pitch_CCD, Yaw_CCD, CCD_centroid, CCD_centroid_STD
Пример #49
0
def q_to_rpy(q):
    m = xf.quaternion_matrix([q.w, q.x, q.y, q.z])  # Order is w, x, y, z
    rpy = xf.euler_from_matrix(m)
    return rpy
Пример #50
0
def matrix_to_rpypose(mat):
    """Returns the rpy pose from a homogeneous transformation matrix."""
    rpy = tf.euler_from_matrix(mat, axes='sxyz')
    trans = mat[:3, 3]
    return trans, rpy
Пример #51
0
                if body_offset_enabled == 1:
                    H_body_camera = tf.euler_matrix(0, 0, 0, 'sxyz')
                    H_body_camera[0][3] = body_offset_x
                    H_body_camera[1][3] = body_offset_y
                    H_body_camera[2][3] = body_offset_z
                    H_camera_body = np.linalg.inv(H_body_camera)
                    H_aeroRef_aeroBody = H_body_camera.dot(H_aeroRef_aeroBody.dot(H_camera_body))

                # Realign heading to face north using initial compass data
                if compass_enabled == 1:
                    H_aeroRef_aeroBody = H_aeroRef_aeroBody.dot( tf.euler_matrix(0, 0, heading_north_yaw, 'sxyz'))

                # Show debug messages here
                if debug_enable == 1:
                    os.system('clear') # This helps in displaying the messages to be more readable
                    print("DEBUG: Raw RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_T265Ref_T265body, 'sxyz')) * 180 / m.pi))
                    print("DEBUG: NED RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_aeroRef_aeroBody, 'sxyz')) * 180 / m.pi))
                    print("DEBUG: Raw pos xyz : {}".format( np.array( [data.translation.x, data.translation.y, data.translation.z])))
                    print("DEBUG: NED pos xyz : {}".format( np.array( tf.translation_from_matrix( H_aeroRef_aeroBody))))

except KeyboardInterrupt:
    send_msg_to_gcs('Closing the script...')  

except:
    send_msg_to_gcs('ERROR IN SCRIPT')  
    print("Unexpected error:", sys.exc_info()[0])

finally:
    pipe.stop()
    vehicle.close()
    print("INFO: Realsense pipeline and vehicle object closed.")
Пример #52
0
def solvePnP(pts_dict):
    # start with a clean slate
    for image in proj.image_list:
        image.img_pts = []
        image.obj_pts = []
    # build a new cam_dict that is a copy of the current one
    cam_dict = {}
    for image in proj.image_list:
        cam_dict[image.name] = {}
        rvec, tvec = image.get_proj()
        ned, ypr, quat = image.get_camera_pose()
        cam_dict[image.name]['rvec'] = rvec
        cam_dict[image.name]['tvec'] = tvec
        cam_dict[image.name]['ned'] = ned

    # iterate through the match dictionary and build a per image list of
    # obj_pts and img_pts
    for key in matches_dict:
        feature_dict = matches_dict[key]
        points = feature_dict['pts']
        ned = pts_dict[key]  # from separate provided point positions
        for p in points:
            image = proj.image_list[p[0]]
            kp = image.kp_list[p[1]]
            image.img_pts.append(kp.pt)
            image.obj_pts.append(ned)

    camw, camh = proj.cam.get_image_params()
    for image in proj.image_list:
        print image.name
        if len(image.img_pts) < 4:
            continue
        scale = float(image.width) / float(camw)
        K = proj.cam.get_K(scale)
        rvec, tvec = image.get_proj()
        (result, rvec, tvec) = cv2.solvePnP(np.float32(image.obj_pts),
                                            np.float32(image.img_pts),
                                            K,
                                            None,
                                            rvec,
                                            tvec,
                                            useExtrinsicGuess=True)
        print "rvec=", rvec
        print "tvec=", tvec
        Rned2cam, jac = cv2.Rodrigues(rvec)
        #print "Rraw (from SolvePNP):\n", Rraw

        ned = image.camera_pose['ned']
        print "original ned = ", ned
        #tvec = -np.matrix(R[:3,:3]) * np.matrix(ned).T
        #print "tvec =", tvec
        pos = -np.matrix(Rned2cam[:3, :3]).T * np.matrix(tvec)
        newned = pos.T[0].tolist()[0]
        print "new ned =", newned

        # Our Rcam matrix (in our ned coordinate system) is body2cam * Rned,
        # so solvePnP returns this combination.  We can extract Rned by
        # premultiplying by cam2body aka inv(body2cam).
        cam2body = image.get_cam2body()
        Rned2body = cam2body.dot(Rned2cam)
        #print "R (after M * R):\n", R

        ypr = image.camera_pose['ypr']
        print "original ypr = ", ypr
        Rbody2ned = np.matrix(Rned2body).T
        IRo = transformations.euler_matrix(ypr[0] * d2r, ypr[1] * d2r,
                                           ypr[2] * d2r, 'rzyx')
        IRq = transformations.quaternion_matrix(image.camera_pose['quat'])
        #print "Original IR:\n", IRo
        #print "Original IR (from quat)\n", IRq
        #print "IR (from SolvePNP):\n", IR

        (yaw, pitch,
         roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')
        print "ypr =", [yaw / d2r, pitch / d2r, roll / d2r]

        #image.set_camera_pose( pos.T[0].tolist(), [yaw/d2r, pitch/d2r, roll/d2r] )
        #print "Proj =", np.concatenate((R, tvec), axis=1)
        cam_dict[image.name] = {}
        cam_dict[image.name]['rvec'] = rvec
        cam_dict[image.name]['tvec'] = tvec
        cam_dict[image.name]['ned'] = newned
    return cam_dict
def CCD_mechanical_error(mechanical_error, iterations):
    
#Initializing the arrays for the 6-DOF values    
    ME_X_CCD = []
    ME_Y_CCD = []
    ME_Z_CCD = []
    ME_Roll_CCD = []
    ME_Pitch_CCD = []
    ME_Yaw_CCD = []
    ME_CCD_centroid = np.zeros((3,1)) 
    
    for i in range(iterations):
        
        
#the cartesian cordinates of targets in the CCD frame is known
        CCD_target_1 = np.matrix([[0.034125],[0.039125],[0],[1]])
        CCD_target_2 = np.matrix([[-0.034125],[0.039125],[0],[1]])
        CCD_target_3 = np.matrix([[-0.034125],[-0.039125],[0],[1]])
        CCD_target_4 = np.matrix([[0.034125],[-0.039125],[0],[1]])

        Targets_CCD_frame = np.concatenate((CCD_target_1, CCD_target_2, CCD_target_3, CCD_target_4), axis = 1)

#transformation matrix for converting the CCD frame to the LT frame
        CCD_to_LT = np.matrix([[1,0,0,1.3],[0,1,0,1.2],[0,0,1,5.7],[0,0,0,1]])

        Homogeneous_modelled_points = (CCD_to_LT * Targets_CCD_frame).T

#removing the fourth dimension for ease of calculations
        modelled_points = np.delete(Homogeneous_modelled_points, 3, 1)
    
        LT_err_CCD = un.LT_uncertainities(modelled_points, 1).T
        Mechanical_err = un.Mechanical_uncertainities(modelled_points, mechanical_error, mechanical_error, mechanical_error)

#Adding mechanical uncertainities of mounting the targets
        modelled_points_1 = modelled_points + Mechanical_err

#Converting the modelled points into Laser tracker's spherical coordinate system
        spherical_points = cartesian_to_spherical(modelled_points_1)

#Adding the uncertainities from the Laser Tracker
        spherical_points = spherical_points + LT_err_CCD

#Converting back to the cartesian coordinate system
        cartesian_points = spherical_to_cartesian(spherical_points)


        [Rotation, Translation] = best_fit(cartesian_points, modelled_points, scaling)

        n = modelled_points.shape[0]

#modelled_points = modelled_points.T     
        fit_points = (Rotation * modelled_points.T) + np.tile(Translation, (1, n))

#Finding centroid of the fitted point cloud
        fit_point_centroid = np.mean(fit_points, axis = 1)
        
        ME_CCD_centroid = np.append(ME_CCD_centroid, fit_point_centroid, 1)
        
#Finding centroid of the modelled point cloud
        modelled_points_centroid = np.mean(modelled_points, axis = 0).T

#calculating homogeneous transformation function 
        Homogeneous_transform = np.zeros((4,4))
        Homogeneous_transform[:3,:3] = Rotation
        Homogeneous_transform[0,3] = Translation[0]
        Homogeneous_transform[1,3] = Translation[1]
        Homogeneous_transform[2,3] = Translation[2]
        Homogeneous_transform[3,3] = 1
        
#Finding the euler angles from the homogeneous transformation matrix
        ME_euler_angles_CCD = np.matrix(t.euler_from_matrix(Homogeneous_transform)).T
        for i in range(3):
            ME_euler_angles_CCD[i,0] = m.degrees(ME_euler_angles_CCD[i,0]) * 3600

#Appending strings for 6-DOF values in each iteration
        ME_X_CCD.append((Translation[0,0])*1e6)
        ME_Y_CCD.append((Translation[1,0])*1e6)
        ME_Z_CCD.append((Translation[2,0])*1e6)
        ME_Roll_CCD.append(ME_euler_angles_CCD[0,0])
        ME_Pitch_CCD.append(ME_euler_angles_CCD[1,0])
        ME_Yaw_CCD.append(ME_euler_angles_CCD[2,0])
        
#calculating mean of each 6-DOF strings to find the average over the number of iterations
    ME_X_CCD = np.mean(ME_X_CCD)
    ME_Y_CCD = np.mean(ME_Y_CCD)
    ME_Z_CCD = np.mean(ME_Z_CCD)
    ME_Roll_CCD = np.mean(ME_Roll_CCD)
    ME_Pitch_CCD = np.mean(ME_Pitch_CCD)
    ME_Yaw_CCD = np.mean(ME_Yaw_CCD)
    
    ME_CCD_centroid = np.delete(ME_CCD_centroid, 0, 1)
    ME_CCD_centroid = np.mean(ME_CCD_centroid, axis = 1)

    return ME_X_CCD, ME_Y_CCD, ME_Z_CCD, ME_Roll_CCD, ME_Pitch_CCD, ME_Yaw_CCD, ME_CCD_centroid
Пример #54
0
                                  image_name)  # Reading
    image = Image.open(abs_image_path)
    image_rgb = rgb_converter(image)

    # Remap camera name
    camera_name = camera_calibration_map[camera_dir]

    x, y, z, *q = pose
    t = (x, y, z)

    # Calculate camera transformation
    R_camera, T_camera, R_final_inv, T_final_inv = calculateCameraMotion(
        t, q,
        torch.from_numpy(calibration_info[camera_name].T_bl_cam).float())

    a1, a2, theta = euler_from_matrix(R_camera[0])
    qw, qx, qy, qz = quaternion_from_euler(a1, a2, theta)
    pose = x, y, qw, qx, qy, qz

    # Saving
    image_name, ext = image_name.split(".")
    jpeg_image_name = ".".join((image_name, "jpeg"))

    stereo_ch, camera_id = camera_dir.split("_z_")
    jpeg_image_dir = os.path.join(dest_path, dir_map[camera_id])

    jpeg_image_path = os.path.join(jpeg_image_dir, jpeg_image_name)
    os.makedirs(jpeg_image_dir, exist_ok=True)
    image_rgb.save(jpeg_image_path, "JPEG")

    poses_path = os.path.join(jpeg_image_dir, "poses.txt")
Пример #55
0
    def fromSkeleton(self, skel, animationTrack=None, dummyJoints=True):
        """
        Construct a BVH object from a skeleton structure and optionally an 
        animation track. If no animation track is specified, a dummy animation
        of one frame will be added.
        If dummyJoints is true (the default) then extra dummy joints will be
        introduced when bones are not directly connected, but have their head
        position offset from their parent bone tail. This often happens when
        multiple bones are attached to one parent bones, for example in the
        shoulder, hip and hand areas.
        When dummyJoints is set to false, for each bone in the skeleton, exactly
        one BVH joint will be created. How this is interpreted depends on the
        tool importing the BVH file. Some create only a bone between the parent
        and its first child joint, and create empty offsets to the other childs.
        Other tools create one bone, with the tail position being the average
        of all the child joints. Dummy joints are introduced to prevent 
        ambiguities between tools. Dummy joints carry the same name as the bone
        they parent, with "__" prepended.

        NOTE: Make sure that the skeleton has only one root.
        """

        # Traverse skeleton joints in depth-first order
        for jointName in skel.getJointNames():
            bone = skel.getBone(jointName)
            if dummyJoints and bone.parent and \
               (bone.getRestHeadPos() != bone.parent.getRestTailPos()).any():
                # Introduce a dummy joint to cover the offset between two not-
                # connected bones
                joint = self.addJoint(bone.parent.name, "__" + jointName)
                joint.channels = ["Zrotation", "Xrotation", "Yrotation"]
                parentName = joint.name

                offset = bone.parent.getRestTailPos(
                ) - bone.parent.getRestHeadPos()
                self.__calcPosition(joint, offset)
                offset = bone.getRestHeadPos() - bone.parent.getRestTailPos()
            else:
                parentName = bone.parent.name if bone.parent else None
                offset = bone.getRestOffset()

            if bone.parent:
                joint = self.addJoint(parentName, jointName)
                joint.channels = ["Zrotation", "Xrotation", "Yrotation"]
            else:
                # Root bones have translation channels
                joint = self.addRootJoint(bone.name)
                joint.channels = [
                    "Xposition", "Yposition", "Zposition", "Zrotation",
                    "Xrotation", "Yrotation"
                ]

            self.__calcPosition(joint, offset)
            if not bone.hasChildren():
                endJoint = self.addJoint(jointName, 'End effector')
                offset = bone.getRestTailPos() - bone.getRestHeadPos()
                self.__calcPosition(endJoint, offset)

        self.__cacheGetJoints()
        nonEndJoints = [
            joint for joint in self.getJoints() if not joint.isEndConnector()
        ]

        if animationTrack:
            self.frameCount = animationTrack.nFrames
            self.frameTime = 1.0 / animationTrack.frameRate

            jointToBoneIdx = {}
            for joint in nonEndJoints:
                if skel.containsBone(joint.name):
                    jointToBoneIdx[joint.name] = skel.getBone(joint.name).index
                else:
                    jointToBoneIdx[joint.name] = -1

            for fIdx in range(animationTrack.nFrames):
                offset = fIdx * animationTrack.nBones
                for jIdx, joint in enumerate(nonEndJoints):
                    bIdx = jointToBoneIdx[joint.name]
                    if bIdx < 0:
                        poseMat = np.identity(4, dtype=np.float32)
                    else:
                        poseMat = animationTrack.data[offset + bIdx]

                    if len(joint.channels) == 6:
                        # Add transformation
                        tx, ty, tz = poseMat[:3, 3]
                        joint.frames.extend([tx, ty, tz])
                    ay, ax, az = tm.euler_from_matrix(poseMat, "syxz")
                    joint.frames.extend([az / D, ax / D, ay / D])
        else:
            # Add bogus animation with one frame
            self.frameCount = 1
            self.frameTime = 1.0
            for jIdx, joint in enumerate(nonEndJoints):
                if len(joint.channels) == 6:
                    # Add transformation
                    joint.frames.extend([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
                else:
                    joint.frames.extend([0.0, 0.0, 0.0])

        for joint in self.getJoints():
            joint.calculateFrames()
Пример #56
0
# Start streaming with requested config
pipe.start(cfg)

try:
    for _ in range(50000):
        # Wait for the next set of frames from the camera
        frames = pipe.wait_for_frames()

        # Fetch pose frame
        pose = frames.get_pose_frame()
        if pose:
            # Print some of the pose data to the terminal
            data = pose.get_pose_data()

            H_T265Ref_T265body = tf.quaternion_matrix([data.rotation.w, data.rotation.x,data.rotation.y,data.rotation.z]) # in transformations, Quaternions w+ix+jy+kz are represented as [w, x, y, z]!

            # transform to aeronautic coordinates (body AND reference frame!)
            H_aeroRef_aeroBody = H_aeroRef_T265Ref.dot( H_T265Ref_T265body.dot( H_T265body_aeroBody ))

            rpy_rad = np.array( tf.euler_from_matrix(H_aeroRef_aeroBody, 'rxyz') )

#            print("Frame #{}".format(pose.frame_number))
#            print("RPY [deg]: {}".format(rpy_rad*180/m.pi))
	    heading = rpy_rad[2]*180/m.pi
	    if heading < 0: heading = 360+heading
	    print ("Heading", round(heading))

finally:
    pipe.stop()
Пример #57
0
def parseLXFML(handle, lxffile,ldrfile):
  #begin urdf

  print "<!--this file was autogenerated from %s -->" % lxffile
  ldr_file = open('%s' % ldrfile,'r')
  print "<robot name=nxt_%s>" % lxffile.strip('.lxf').rsplit("/")[0]
  lxf_doc = xml.dom.minidom.parse(handle)

  bricks = {}
  parts = {}
  bones = {}

  for brick in lxf_doc.getElementsByTagName('Brick'):
    b = {}
    b['refID']= int(brick.getAttribute('refID'))
    b['parts'] = []
    b['designID'] = int(brick.getAttribute('designID'))
    b['handled']=False
    i = parseInts(brick.getAttribute('itemNos'))
    if i:
      b['itemNos'] = i
    bricks[int(brick.getAttribute('refID'))] = b

    for part in brick.getElementsByTagName('Part'):
      p = {}
      p['bones'] = []
      p['designID'] = part.getAttribute('designID')
      p['materials'] = parseInts(part.getAttribute('materials'))
      parts[int(part.getAttribute('refID'))] = p
      b['parts'].append(p)
      p['parent'] = b

      for bone in part.getElementsByTagName('Bone'):
        d = {}
        d['transformation'] = parseFloats(bone.getAttribute('transformation'))
        bones[int(bone.getAttribute('refID'))] = d
        p['bones'].append(d)
        d['parent'] = p

  rigids = {}
  for rigid in lxf_doc.getElementsByTagName('Rigid'):
    r = {}
    r['refID'] = int(rigid.getAttribute('refID'))
    r['transformation'] = parseFloats(rigid.getAttribute('transformation'))
    r['boneRefs'] = parseInts(rigid.getAttribute('boneRefs'))
    r['handled'] = False
    rigids[r['refID']] = r

  joints = []
  for joint in lxf_doc.getElementsByTagName('Joint'):
    j = []
    for rigid_ref in joint.getElementsByTagName('RigidRef'):
      r = {}
      r['rigidRef'] = int(rigid_ref.getAttribute('rigidRef'))
      r['a'] = parseFloats(rigid_ref.getAttribute('a'))
      r['z'] = parseFloats(rigid_ref.getAttribute('z'))
      r['t'] = parseFloats(rigid_ref.getAttribute('t'))
      j.append(r)
    joints.append(j)


  ldr_trans={}
  count=0
  for line in ldr_file:
    if line.startswith('1'):
      ldr= [x for x in line.split(' ')]
      l={}
      l['ldraw'] = ldr[14].strip('.dat\r\n')
      l['transformation'] = [float(ldr[2]), float(ldr[3]), float(ldr[4]), float(ldr[5]), float(ldr[6]), float(ldr[7]), float(ldr[8]), float(ldr[9]), float(ldr[10]), float(ldr[11]), float(ldr[12]), float(ldr[13])]
      ldr_trans[count]=l
      count=count+1

  for refID in sorted(bricks.keys()):
    designID = ldr_trans[refID]['ldraw']
    ldrID =ldr_trans[refID]['ldraw']
    rot_x=rot_y=rot_z=0
    #these parts with odd bends need a 180 rotation
    if ldrID == '6629' or  ldrID == '32348' or ldrID == '32140' or ldrID == '32526':
      rot_y = 3.14159
    #scaling factor from the .dat models to meters
    scale =0.0004
    d = {
      'refID' : refID,
      'mesh' : "package://nxt_description/meshes/parts/%s.dae" % designID,
      'bound_x' : 0,
      'bound_y' : 0,
      'bound_z' : 0,
      'm_scale' :' %s' % str(scale),
      'bound_roll' : 0,
      'bound_pitch' : 0,
      'bound_yaw' : 0,
      'dim_x' : 0,
      'dim_y' : 0,
      'dim_z' : 0,
      'rot_x': '%s' % str(rot_x),
      'rot_y': '%s' % str(rot_y),
      'rot_z': '%s' % str(rot_z),
    }
    #special templates for sensors and motors
    if ldr_trans[refID]['ldraw'] == '53787':
      print motor_template % d
    elif ldr_trans[refID]['ldraw'] == '53792':
      print ultrasonic_link % d
    elif ldr_trans[refID]['ldraw'] == '1044' or ldr_trans[refID]['ldraw'] == '64892' or  ldr_trans[refID]['ldraw'] == '53793':
      print generic_sensor_link % d
    else:
      print link_template % d
  #create tree with no loops 
  create_rigid_tree(0, joints, rigids)
  create_brick_tree(rigids, bricks, bones)
  #for refID, joint_list in enumerate(joints):
  for refID, branch in enumerate(brick_tree):
    joint_type = "fixed"
    child_refID = branch[1]
    parent_refID = branch[0] 
    #all units are in CM
    #the models are in LDU which are 0.4mm
    #let's compute some transforms
    world_to_c = homogeneous_matrix(ldr_trans[child_refID]['transformation'])
    world_to_p = homogeneous_matrix(ldr_trans[parent_refID]['transformation'])
    #now let's get the stuff for the URDF
    p_to_c = world_to_p.I*world_to_c
    rpy = TF.euler_from_matrix(p_to_c, 'sxzy')
      
    d = {
    'refID' : refID,
    'joint_type' : joint_type,
    'parent_link' : 'ref_%s_link' % parent_refID,
    'child_link' : 'ref_%s_link' % child_refID,
    #a straight transform doesn't seem to work properly (sign changes as needed)
    'origin_x' : '%s' % str(-1*float(p_to_c[0,3])*scale),
    'origin_y' : '%s'% str(1*float(p_to_c[2,3])*scale),
    'origin_z' : '%s' % str(-1*float(p_to_c[1,3])*scale),
    'origin_roll' : '%s' % str(1*rpy[0]),
    'origin_pitch' : '%s' % str(-1*rpy[1]),
    'origin_yaw' : '%s' % str(1*rpy[2]),
    'axis_x' : 0, 'axis_y' : 0, 'axis_z' : 0,
    }
    print joint_template % d
  #end urdf 
  print "</robot>"
Пример #58
0
    # create cables
    if sim_cables:
        model.insert(2, etree.Comment('Definition of the robot cables'))
        z = [0, 0, 1]
        for i, cbl in enumerate(config.points):
            fp = np.array(cbl.frame).reshape(3, 1)  # frame attach point
            # express platform attach point in world frame
            pp = pf_t + np.dot(pf_R, np.array(cbl.platform).reshape(3, 1))
            # cable orientation
            u = (pp - fp).reshape(3)
            u = list(u / np.linalg.norm(u))
            R = tr.rotation_matrix(
                np.arctan2(np.linalg.norm(np.cross(z, u)), np.dot(u, z)),
                np.cross(z, u))
            # to RPY
            rpy = list(tr.euler_from_matrix(R))
            # rpy of z-axis
            # cable position to stick to the platform
            a = l / (2. * np.linalg.norm(pp - fp))
            cp = list((pp - a * (pp - fp)).reshape(3))
            # create cable
            link = etree.SubElement(model, 'link', name='cable%i' % i)
            CreateNested(link, 'pose', '%f %f %f %f %f %f' % tuple(cp + rpy))

            CreateVisualCollision(link,
                                  '/geometry/cylinder/radius',
                                  config.cable.radius,
                                  color='Black',
                                  collision=False,
                                  mass=0.001)
            CreateNested(link, 'visual/geometry/cylinder/length', str(l))
Пример #59
0
 def to_euler(self):
     return tm.euler_from_matrix(self.matrix)
Пример #60
0
def compute_state_space_trajectory_and_derivatives(p, psi, dt):

    num_timesteps = len(p)

    psi = trigutils.compute_continuous_angle_array(psi)

    p_dot = c_[gradient(p[:, 0], dt),
               gradient(p[:, 1], dt),
               gradient(p[:, 2], dt)]
    p_dot_dot = c_[gradient(p_dot[:, 0], dt),
                   gradient(p_dot[:, 1], dt),
                   gradient(p_dot[:, 2], dt)]

    f_thrust_world = m * p_dot_dot - f_external_world.T.A
    f_thrust_world_normalized = sklearn.preprocessing.normalize(f_thrust_world)

    z_axis_intermediate = c_[cos(psi), zeros_like(psi), -sin(psi)]
    y_axis = f_thrust_world_normalized
    x_axis = sklearn.preprocessing.normalize(cross(z_axis_intermediate,
                                                   y_axis))
    z_axis = sklearn.preprocessing.normalize(cross(y_axis, x_axis))

    theta = zeros(num_timesteps)
    psi_recomputed = zeros(num_timesteps)
    phi = zeros(num_timesteps)

    for ti in range(num_timesteps):

        z_axis_ti = c_[matrix(z_axis[ti]), 0].T
        y_axis_ti = c_[matrix(y_axis[ti]), 0].T
        x_axis_ti = c_[matrix(x_axis[ti]), 0].T

        R_world_from_body_ti = c_[z_axis_ti, y_axis_ti, x_axis_ti,
                                  [0, 0, 0, 1]]
        psi_recomputed_ti, theta_ti, phi_ti = transformations.euler_from_matrix(
            R_world_from_body_ti, "ryxz")

        assert allclose(
            R_world_from_body_ti,
            transformations.euler_matrix(psi_recomputed_ti, theta_ti, phi_ti,
                                         "ryxz"))

        theta[ti] = theta_ti
        psi_recomputed[ti] = psi_recomputed_ti
        phi[ti] = phi_ti

    theta = trigutils.compute_continuous_angle_array(theta)
    psi_recomputed = trigutils.compute_continuous_angle_array(psi_recomputed)
    phi = trigutils.compute_continuous_angle_array(phi)

    assert allclose(psi_recomputed, psi)

    psi = psi_recomputed

    theta_dot = gradient(theta, dt)
    psi_dot = gradient(psi, dt)
    phi_dot = gradient(phi, dt)

    theta_dot_dot = gradient(theta_dot, dt)
    psi_dot_dot = gradient(psi_dot, dt)
    phi_dot_dot = gradient(phi_dot, dt)

    return p, p_dot, p_dot_dot, theta, theta_dot, theta_dot_dot, psi, psi_dot, psi_dot_dot, phi, phi_dot, phi_dot_dot