コード例 #1
0
ファイル: kukaGymEnv.py プロジェクト: jiapei100/bullet3
  def getExtendedObservation(self):
     self._observation = self._kuka.getObservation()
     gripperState  = p.getLinkState(self._kuka.kukaUid,self._kuka.kukaGripperIndex)
     gripperPos = gripperState[0]
     gripperOrn = gripperState[1]
     blockPos,blockOrn = p.getBasePositionAndOrientation(self.blockUid)

     invGripperPos,invGripperOrn = p.invertTransform(gripperPos,gripperOrn)
     gripperMat = p.getMatrixFromQuaternion(gripperOrn)
     dir0 = [gripperMat[0],gripperMat[3],gripperMat[6]]
     dir1 = [gripperMat[1],gripperMat[4],gripperMat[7]]
     dir2 = [gripperMat[2],gripperMat[5],gripperMat[8]]

     gripperEul =  p.getEulerFromQuaternion(gripperOrn)
     #print("gripperEul")
     #print(gripperEul)
     blockPosInGripper,blockOrnInGripper = p.multiplyTransforms(invGripperPos,invGripperOrn,blockPos,blockOrn)
     projectedBlockPos2D =[blockPosInGripper[0],blockPosInGripper[1]]
     blockEulerInGripper = p.getEulerFromQuaternion(blockOrnInGripper)
     #print("projectedBlockPos2D")
     #print(projectedBlockPos2D)
     #print("blockEulerInGripper")
     #print(blockEulerInGripper)

     #we return the relative x,y position and euler angle of block in gripper space
     blockInGripperPosXYEulZ =[blockPosInGripper[0],blockPosInGripper[1],blockEulerInGripper[2]]

     #p.addUserDebugLine(gripperPos,[gripperPos[0]+dir0[0],gripperPos[1]+dir0[1],gripperPos[2]+dir0[2]],[1,0,0],lifeTime=1)
     #p.addUserDebugLine(gripperPos,[gripperPos[0]+dir1[0],gripperPos[1]+dir1[1],gripperPos[2]+dir1[2]],[0,1,0],lifeTime=1)
     #p.addUserDebugLine(gripperPos,[gripperPos[0]+dir2[0],gripperPos[1]+dir2[1],gripperPos[2]+dir2[2]],[0,0,1],lifeTime=1)

     self._observation.extend(list(blockInGripperPosXYEulZ))
     return self._observation
コード例 #2
0
 def _reward(self):
   current_base_position = self.minitaur.GetBasePosition()
   forward_reward = current_base_position[0] - self._last_base_position[0]
   # Cap the forward reward if a cap is set.
   forward_reward = min(forward_reward, self._forward_reward_cap)
   # Penalty for sideways translation.
   drift_reward = -abs(current_base_position[1] - self._last_base_position[1])
   # Penalty for sideways rotation of the body.
   orientation = self.minitaur.GetBaseOrientation()
   rot_matrix = pybullet.getMatrixFromQuaternion(orientation)
   local_up_vec = rot_matrix[6:]
   shake_reward = -abs(np.dot(np.asarray([1, 1, 0]), np.asarray(local_up_vec)))
   energy_reward = -np.abs(
       np.dot(self.minitaur.GetMotorTorques(),
              self.minitaur.GetMotorVelocities())) * self._time_step
   objectives = [forward_reward, energy_reward, drift_reward, shake_reward]
   weighted_objectives = [o * w for o, w in zip(objectives, self._objective_weights)]
   reward = sum(weighted_objectives)
   self._objectives.append(objectives)
   return reward
コード例 #3
0
    def getExtendedObservation(self):
        self._observation = self._kuka.getObservation()
        gripperState = p.getLinkState(self._kuka.kukaUid,
                                      self._kuka.kukaGripperIndex)
        gripperPos = gripperState[0]
        gripperOrn = gripperState[1]
        blockPos, blockOrn = p.getBasePositionAndOrientation(self.blockUid)

        invGripperPos, invGripperOrn = p.invertTransform(
            gripperPos, gripperOrn)
        gripperMat = p.getMatrixFromQuaternion(gripperOrn)
        dir0 = [gripperMat[0], gripperMat[3], gripperMat[6]]
        dir1 = [gripperMat[1], gripperMat[4], gripperMat[7]]
        dir2 = [gripperMat[2], gripperMat[5], gripperMat[8]]

        gripperEul = p.getEulerFromQuaternion(gripperOrn)
        #print("gripperEul")
        #print(gripperEul)
        blockPosInGripper, blockOrnInGripper = p.multiplyTransforms(
            invGripperPos, invGripperOrn, blockPos, blockOrn)
        projectedBlockPos2D = [blockPosInGripper[0], blockPosInGripper[1]]
        blockEulerInGripper = p.getEulerFromQuaternion(blockOrnInGripper)
        #print("projectedBlockPos2D")
        #print(projectedBlockPos2D)
        #print("blockEulerInGripper")
        #print(blockEulerInGripper)

        #we return the relative x,y position and euler angle of block in gripper space
        blockInGripperPosXYEulZ = [
            blockPosInGripper[0], blockPosInGripper[1], blockEulerInGripper[2]
        ]

        #p.addUserDebugLine(gripperPos,[gripperPos[0]+dir0[0],gripperPos[1]+dir0[1],gripperPos[2]+dir0[2]],[1,0,0],lifeTime=1)
        #p.addUserDebugLine(gripperPos,[gripperPos[0]+dir1[0],gripperPos[1]+dir1[1],gripperPos[2]+dir1[2]],[0,1,0],lifeTime=1)
        #p.addUserDebugLine(gripperPos,[gripperPos[0]+dir2[0],gripperPos[1]+dir2[1],gripperPos[2]+dir2[2]],[0,0,1],lifeTime=1)

        self._observation.extend(list(blockInGripperPosXYEulZ))
        return self._observation
コード例 #4
0
def get_trimesh_scene(axis: bool = False, bbox: bool = False) -> trimesh.Scene:
    """Returns trimesh scene."""
    import pybullet

    scene = trimesh.Scene()
    for unique_id in unique_ids:
        _, _, shape_id, _, mesh_file, *_ = pybullet.getVisualShapeData(
            unique_id
        )[0]
        mesh_file = mesh_file.decode()
        if pybullet.GEOM_MESH != shape_id:
            raise ValueError(
                f"Unsupported shape_id: {shape_id_to_str(shape_id)}"
            )

        pos, ori = pybullet.getBasePositionAndOrientation(unique_id)
        t = np.array(pos, dtype=float)
        R = pybullet.getMatrixFromQuaternion(ori)
        R = np.array(R, dtype=float).reshape(3, 3)
        transform = geometry.compose_transform(R=R, t=t)

        mesh = trimesh.load_mesh(mesh_file)
        scene.add_geometry(
            mesh, node_name=str(unique_id), transform=transform,
        )

        if bbox:
            scene.add_geometry(
                trimesh.path.creation.box_outline(mesh.bounding_box),
                transform=transform,
            )

        if axis:
            origin_size = np.max(mesh.bounding_box.extents) * 0.05
            scene.add_geometry(
                trimesh.creation.axis(origin_size), transform=transform,
            )
    return scene
コード例 #5
0
 def _imu(self):
     p = self._p
     [quart_link, lin_vel,
      ang_vel] = p.getLinkState(bodyUniqueId=self.soccerbotUid,
                                linkIndex=Links.IMU,
                                computeLinkVelocity=1)[5:8]
     lin_vel = np.array(lin_vel, dtype=self.DTYPE)
     lin_acc = (lin_vel - self.prev_lin_vel) / self.timeStep
     lin_acc -= self._GRAVITY_VECTOR
     rot_mat = np.array(pb.getMatrixFromQuaternion(quart_link),
                        dtype=self.DTYPE).reshape((3, 3))
     lin_acc = np.matmul(rot_mat, lin_acc)
     ang_vel = np.array(ang_vel, dtype=self.DTYPE)
     self.prev_lin_vel = lin_vel
     imu_noise = np.concatenate(
         (self.np_random.normal(0, self._IMU_LIN_STDDEV,
                                int(self._IMU_DIM / 2)),
          self.np_random.normal(0, self._IMU_ANG_STDDEV,
                                int(self._IMU_DIM / 2))))
     imu_val = np.concatenate(
         (lin_acc, ang_vel)) + imu_noise + self.imu_bias
     imu_val = np.clip(imu_val, -self.imu_limit, self.imu_limit)
     return imu_val
コード例 #6
0
def reconstruct_heightmaps(color, depth, configs, bounds, pixel_size):
    """Reconstruct top-down heightmap views from multiple 3D pointclouds.

    The color and depth are np.arrays or lists, where the leading dimension
    or list wraps around differnet viewpoints. So, if the np.array shape is
    (3,480,640,3), then the leading '3' denotes the number of camera views.

    TODO: documentation.
    """
    heightmaps, colormaps = [], []
    for color, depth, config in zip(color, depth, configs):
        intrinsics = np.array(config['intrinsics']).reshape(3, 3)
        xyz = get_pointcloud(depth, intrinsics)
        position = np.array(config['position']).reshape(3, 1)
        rotation = p.getMatrixFromQuaternion(config['rotation'])
        rotation = np.array(rotation).reshape(3, 3)
        transform = np.eye(4)
        transform[:3, :] = np.hstack((rotation, position))
        xyz = transform_pointcloud(xyz, transform)
        heightmap, colormap = get_heightmap(xyz, color, bounds, pixel_size)
        heightmaps.append(heightmap)
        colormaps.append(colormap)
    return heightmaps, colormaps
コード例 #7
0
ファイル: image_getter.py プロジェクト: eaa3/pybullet_cloud
def addDebugFrame(pos, quat, length=0.2, lw=2, duration=0.2, pcid=0):
    rot_mat = np.array(p.getMatrixFromQuaternion(quat)).reshape(3,
                                                                3).transpose()
    toX = pos + length * rot_mat[0, :]
    toY = pos + length * rot_mat[1, :]
    toZ = pos + length * rot_mat[2, :]
    item_id = [0, 0, 0]
    item_id[0] = p.addUserDebugLine(pos,
                                    toX, [1, 0, 0],
                                    lw,
                                    duration,
                                    physicsClientId=pcid)
    item_id[1] = p.addUserDebugLine(pos,
                                    toY, [0, 1, 0],
                                    lw,
                                    duration,
                                    physicsClientId=pcid)
    item_id[2] = p.addUserDebugLine(pos,
                                    toZ, [0, 0, 1],
                                    lw,
                                    duration,
                                    physicsClientId=pcid)
    return item_id
コード例 #8
0
 def getExtendedObservation(self):
     carpos, carorn = self._p.getBasePositionAndOrientation(
         self._racecar.racecarUniqueId)
     print('RACE CAR ID:', self._racecar.racecarUniqueId, carorn)
     carmat = p.getMatrixFromQuaternion(carorn)
     ballpos, ballorn = self._p.getBasePositionAndOrientation(
         self._ballUniqueId)
     invCarPos, invCarOrn = self._p.invertTransform(carpos, carorn)
     ballPosInCar, ballOrnInCar = self._p.multiplyTransforms(
         invCarPos, invCarOrn, ballpos, ballorn)
     dist0 = 0.3
     dist1 = 1.
     eyePos = [
         carpos[0] + dist0 * carmat[0], carpos[1] + dist0 * carmat[3],
         carpos[2] + dist0 * carmat[6] + 0.3
     ]
     targetPos = [
         carpos[0] + dist1 * carmat[0], carpos[1] + dist1 * carmat[3],
         carpos[2] + dist1 * carmat[6] + 0.3
     ]
     up = [carmat[2], carmat[5], carmat[8]]
     viewMat = self._p.computeViewMatrix(eyePos, targetPos, up)
     #viewMat = self._p.computeViewMatrixFromYawPitchRoll(carpos,1,0,0,0,2)
     #print("projectionMatrix:")
     #print(self._p.getDebugVisualizerCamera()[3])
     projMatrix = [
         0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
         -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0
     ]
     img_arr = self._p.getCameraImage(width=self._width,
                                      height=self._height,
                                      viewMatrix=viewMat,
                                      projectionMatrix=projMatrix)
     rgb = img_arr[2]
     np_img_arr = np.reshape(rgb, (self._height, self._width, 4))
     self._observation = np_img_arr
     return self._observation
コード例 #9
0
    def ahead_view(self):
        link_state = pb.getBasePositionAndOrientation(self.agent_mb)
        link_p = link_state[0]
        link_o = link_state[1]
        #handmat = pb.getMatrixFromQuaternion(link_o)

        #axisX = [handmat[0],handmat[3],handmat[6]]
        #axisY = [-handmat[1],-handmat[4],-handmat[7]] # Negative Y axis
        #axisZ = [handmat[2],handmat[5],handmat[8]]

        # we define the up axis in terms of the object to classify since it is
        # stationary. If you define up in terms of the orientation of the wand
        # then your up axis can rotate as the wand rotates due to the normal
        # contact forces with the object to classify. This causes the camera
        # image to exhibit some unwanted behavior, such as inverting the up
        # and down directions.
        # If your object to classify is not stationary, then you could switch to
        # the above code to use the wand orientation to define your up axis.

        state = pb.getBasePositionAndOrientation(self.obj_to_classify)
        state_o = state[1]
        mat = pb.getMatrixFromQuaternion(state_o)
        axisZ2 = [mat[2], mat[5], mat[8]]
        eyeOffset = self.wandLength
        focusOffset = eyeOffset + 10e-6#0.0000001

        self.eye_pos = [link_p[0] + eyeOffset, link_p[1], link_p[2]]
        self.target_pos = [link_p[0] + focusOffset, link_p[1], link_p[2]]

        up = axisZ2 # Up is Z axis of obj_to_classify - change to axisZ to use
        # the wand orientation as up
        viewMatrix = pb.computeViewMatrix(self.eye_pos, self.target_pos, up)

        if 'render' in self.options and self.options['render'] == True:
            pass

        return viewMatrix
コード例 #10
0
 def compute_p_W_and_n_W(self, phi_B):
     """
     Given a planar angle in the object frame B, perform ray cast from the
     origin of the B frame and find the corresponding surface normal and
     ray impact position in the world frame W.
     FIXME(wualbert): We don't care about z as we are in 2D.
                      This may cause problems later.
     @param phi_B: The yaw of the contact point in the B frame.
     @return: (p_W, n_W). p_W is the 3D contact point on the object.
     n_W is the 3D outward-facing unit normal vector at the contact point.
     Both of these are in the world frame.
     """
     # Get the pose of the object frame origin in world frame
     p_B_W, q_B_W = p.getBasePositionAndOrientation(self.body_id)
     e_B_W = p.getEulerFromQuaternion(q_B_W)
     # In 2D, only the yaw should be nonzero
     np.testing.assert_allclose(e_B_W[:2], np.zeros(2), atol=1e-4)
     # add phi_B
     e_phi_W = e_B_W + np.asarray([0., 0., phi_B])
     e_phi_W %= 2 * np.pi
     q_phi_W = p.getQuaternionFromEuler(e_phi_W)
     # Construct the unit ray vector
     # Compute the ray to cast
     ray_direction_W = np.dot(
         np.array(p.getMatrixFromQuaternion(q_phi_W)).reshape(3, 3),
         np.asarray([1., 0., 0.]))
     # FIXME(wualbert): the ray shouldn't be of arbitrary length
     ray_start_W = p_B_W + ray_direction_W * 10.
     ray_end_W = p_B_W - ray_direction_W * 10.
     ans = p.rayTest(ray_start_W, ray_end_W)
     for a in ans:
         if a[0] == self.body_id:
             # FIXME(wualbert): a cleaner way to do this
             # We only care about the normal of this particular object
             return a[3:]
     raise AssertionError
コード例 #11
0
ファイル: car.py プロジェクト: namratadeka/cs515
    def get_camera_image(self):
        image = np.zeros((100, 100, 4))
        proj_matrix = p.computeProjectionMatrixFOV(fov=80,
                                                   aspect=1,
                                                   nearVal=0.01,
                                                   farVal=100)
        pos, ori = [
            list(l)
            for l in p.getBasePositionAndOrientation(self.id, self.client)
        ]
        pos[2] = 0.2

        # Rotate camera direction
        rot_mat = np.array(p.getMatrixFromQuaternion(ori)).reshape(3, 3)
        camera_vec = np.matmul(rot_mat, [1, 0, 0])
        up_vec = np.matmul(rot_mat, np.array([0, 0, 1]))
        view_matrix = p.computeViewMatrix(pos, pos + camera_vec, up_vec)

        w, h, rgb, depth, self.segmask = p.getCameraImage(
            100, 100, view_matrix, proj_matrix)
        rgb = rgb[:, :, :-1] / 255.
        rgbd = np.concatenate([rgb, np.expand_dims(depth, 2)], axis=2)

        return rgbd
コード例 #12
0
 def getCameraImage(self):
     """Return camera image from CAMERA_JOINT_INDEX.
     """
     # compute eye and target position for viewMatrix
     pos, orn, _, _, _, _ = p.getLinkState(self.robotId,
                                           self.CAMERA_JOINT_INDEX)
     cameraPos = np.array(pos)
     cameraMat = np.array(p.getMatrixFromQuaternion(orn)).reshape(3, 3).T
     eyePos = cameraPos + self.CAMERA_EYE_SCALE * cameraMat[
         self.CAMERA_EYE_INDEX]
     targetPos = cameraPos + self.CAMERA_TARGET_SCALE * cameraMat[
         self.CAMERA_EYE_INDEX]
     up = self.CAMERA_UP_SCALE * cameraMat[self.CAMERA_UP_INDEX]
     # p.addUserDebugLine(eyePos, targetPos, lineColorRGB=[1, 0, 0], lifeTime=0.1) # red line for camera vector
     # p.addUserDebugLine(eyePos, eyePos + up * 0.5, lineColorRGB=[0, 0, 1], lifeTime=0.1) # blue line for up vector
     viewMatrix = p.computeViewMatrix(eyePos, targetPos, up)
     image = p.getCameraImage(self.CAMERA_PIXEL_WIDTH,
                              self.CAMERA_PIXEL_HEIGHT,
                              viewMatrix,
                              self.projectionMatrix,
                              shadow=1,
                              lightDirection=[1, 1, 1],
                              renderer=p.ER_BULLET_HARDWARE_OPENGL)
     return image
コード例 #13
0
 def _getObservation(self):
     allJoints = [j.value for j in Joints]
     jointStates = p.getJointStates(self.anymal, allJoints)
     position = np.array([js[0] for js in jointStates])
     velocity = np.array([js[1] for js in jointStates])
     # global lastVelocity
     # if self.t == 0.0:
     #     lastVelocity = velocity
     if len(self.history_buffer['joint_state']['velocity']) == 0:
         acceleration = np.zeros(12)
     else:
         lastVelocity = self.history_buffer['joint_state']['velocity'][-1]
         acceleration = (velocity - lastVelocity) * SIMULATIONFREQUENCY
     # lastVelocity = velocity
     basePosition, baseOrientation = p.getBasePositionAndOrientation(
         self.anymal)
     baseOrientation_Matrix = np.array(
         p.getMatrixFromQuaternion(baseOrientation)).reshape(3, 3)
     baseOrientationVector = np.matmul(baseOrientation_Matrix, [0, 0, -1])
     baseVelocity, baseAngularVelocity = p.getBaseVelocity(self.anymal)
     observationAsArray = np.concatenate([
         position, velocity, basePosition, baseOrientation,
         baseOrientationVector, baseVelocity, baseAngularVelocity,
         acceleration
     ])
     observationAsDict = {
         "position": position,
         "velocity": velocity,
         "basePosition": basePosition,
         "baseOrientation": baseOrientation,
         "baseOrientationVector": baseOrientationVector,
         "baseVelocity": baseVelocity,
         "baseAngularVelocity": baseAngularVelocity,
         "acceleration": acceleration
     }
     return observationAsArray, observationAsDict
コード例 #14
0
ファイル: sim_robot.py プロジェクト: ggory15/GFS_ARM
 def addDebugLinkFrame(self, linkId, axisLength=0.2, axisWidth=1):
     localTransCom = self.getLinkLocalInertialTransform(linkId)
     comPosLocal, comQuatLocal = pybullet.invertTransform(
         localTransCom[0], localTransCom[1])
     comRotLocal = np.array(
         pybullet.getMatrixFromQuaternion(comQuatLocal)).reshape((3, 3))
     pybullet.addUserDebugLine(comPosLocal,
                               comPosLocal + comRotLocal[:, 0] * axisLength,
                               Color.red,
                               lineWidth=axisWidth,
                               parentObjectUniqueId=self.id,
                               parentLinkIndex=linkId)
     pybullet.addUserDebugLine(comPosLocal,
                               comPosLocal + comRotLocal[:, 1] * axisLength,
                               Color.green,
                               lineWidth=axisWidth,
                               parentObjectUniqueId=self.id,
                               parentLinkIndex=linkId)
     pybullet.addUserDebugLine(comPosLocal,
                               comPosLocal + comRotLocal[:, 2] * axisLength,
                               Color.blue,
                               lineWidth=axisWidth,
                               parentObjectUniqueId=self.id,
                               parentLinkIndex=linkId)
コード例 #15
0
 def get_imu_raw(self, verbose=False):
     """
     Simulates the IMU at the IMU link location.
     TODO: Add noise model, make the refresh rate vary (currently in sync with the PyBullet time steps)
     :param verbose: Optional - Set to True to print the linear acceleration and angular velocity
     :return: concatenated 3-axes values for linear acceleration and angular velocity
     """
     [quart_link, lin_vel, ang_vel] = pb.getLinkState(self.body, linkIndex=Links.IMU,
                                                      computeLinkVelocity=1)[5:8]
     # [lin_vel, ang_vel] = p.getLinkState(bodyUniqueId=self.soccerbotUid, linkIndex=Links.HEAD_1, computeLinkVelocity=1)[6:8]
     # print(p.getLinkStates(bodyUniqueId=self.soccerbotUid, linkIndices=range(0,20,1), computeLinkVelocity=1))
     # p.getBaseVelocity(self.soccerbotUid)
     lin_vel = np.array(lin_vel, dtype=np.float32)
     self.gravity = [0, 0, -9.81]
     lin_acc = (lin_vel - self.prev_lin_vel) / self.time_step_sim
     lin_acc -= self.gravity
     rot_mat = np.array(pb.getMatrixFromQuaternion(quart_link), dtype=np.float32).reshape((3, 3))
     lin_acc = np.matmul(rot_mat, lin_acc)
     ang_vel = np.array(ang_vel, dtype=np.float32)
     self.prev_lin_vel = lin_vel
     if verbose:
         print(f'lin_acc = {lin_acc}', end="\t\t")
         print(f'ang_vel = {ang_vel}')
     return np.concatenate((lin_acc, ang_vel))
コード例 #16
0
    def render(self, mode='human'):
        if self.rendered_img is None:
            self.rendered_img = plt.imshow(np.zeros((100, 100, 4)))

        # Base information
        car_id, client_id = self.car.get_ids()
        proj_matrix = p.computeProjectionMatrixFOV(fov=80, aspect=1,
                                                   nearVal=0.01, farVal=100)
        pos, ori = [list(l) for l in
                    p.getBasePositionAndOrientation(car_id, client_id)]
        pos[2] = 0.2

        # Rotate camera direction
        rot_mat = np.array(p.getMatrixFromQuaternion(ori)).reshape(3, 3)
        camera_vec = np.matmul(rot_mat, [1, 0, 0])
        up_vec = np.matmul(rot_mat, np.array([0, 0, 1]))
        view_matrix = p.computeViewMatrix(pos, pos + camera_vec, up_vec)

        # Display image
        frame = p.getCameraImage(100, 100, view_matrix, proj_matrix)[2]
        frame = np.reshape(frame, (100, 100, 4))
        self.rendered_img.set_data(frame)
        plt.draw()
        plt.pause(.00001)
コード例 #17
0
 def getViewData(self):
     com_p, com_o = pybullet.getBasePositionAndOrientation(self._demon)
     rot_matrix = pybullet.getMatrixFromQuaternion(com_o)
     rot_matrix = np.array(rot_matrix).reshape(3, 3)
     # Initial vectors
     # init_camera_vector = (0, 0, -1) # z-axis
     # init_up_vector = (0, 1, 0) # y-axis
     # Rotated vectors
     # camera_vector = rot_matrix.dot(init_camera_vector)
     # up_vector = rot_matrix.dot(init_up_vector)
     # view_matrix = p.computeViewMatrix(com_p, com_p + 0.1 * camera_vector, up_vector)
     view_matrix = pybullet.computeViewMatrix(
         cameraEyePosition=[0, 0, 15],
         cameraTargetPosition=[0, 0, 0],
         cameraUpVector=[0, 1, 0])
     fov, aspect, nearplane, farplane = 60, 1.0, 0.01, 100        
     projection_matrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearplane, farplane)
     # img = p.getCameraImage(1000, 1000, view_matrix)
     (w,y,img,depth,segment) = pybullet.getCameraImage(width=self._render_shape[0],
                                                       height=self._render_shape[1], 
                                                       viewMatrix=view_matrix,
                                                       projectionMatrix=projection_matrix)
     # print (img)
     return img[..., :3]
コード例 #18
0
    def render(self):
        # Base information
        kuka_id, client_id = self.kuka_iiwa.get_ids()
        proj_matrix = p.computeProjectionMatrixFOV(fov=80,
                                                   aspect=1,
                                                   nearVal=0.01,
                                                   farVal=100)
        pos, ori = [
            list(l)
            for l in p.getBasePositionAndOrientation(kuka_id, client_id)
        ]

        # Rotate camera direction
        rot_mat = np.array(p.getMatrixFromQuaternion(ori)).reshape(3, 3)
        camera_vec = np.matmul(rot_mat, [1, 0, 0])
        up_vec = [0, 0, 1]
        view_matrix = p.computeViewMatrix(pos, pos + camera_vec, up_vec)

        # Display image
        _, _, frame, _, _ = p.getCameraImage(
            800, 800)  #, view_matrix, proj_matrix)[2]
        frame = np.reshape(frame, (800, 800, 4))
        self.rendered_img.set_data(frame)
        return frame
コード例 #19
0
    def get_robot_observation(self, with_vel=False):
        obs = []

        # root z and root rot mat (1+9)
        root_pos, root_orn = self.get_link_com_xyz_orn(-1)
        root_x, root_y, root_z = root_pos
        obs.extend([root_z])
        obs.extend(pybullet.getMatrixFromQuaternion(root_orn))

        # root lin vel (3)
        base_vel, base_ang_vel = self._p.getBaseVelocity(self.go_id)
        obs.extend(base_vel)

        # non-root joint q (12)
        q, dq = self.get_q_dq(self.ctrl_dofs)
        obs.extend(q)

        # feet (offset from root) (12)
        for link in self.feet:
            pos, _ = self.get_link_com_xyz_orn(link, fk=1)
            pos[0] -= root_x
            pos[1] -= root_y
            pos[2] -= root_z
            obs.extend(pos)

        length_wo_vel = len(obs)

        obs.extend(base_ang_vel)  # (3)
        obs.extend(dq)  # (12)

        obs = np.array(obs) * self.robo_obs_scale

        if not with_vel:
            obs = obs[:length_wo_vel]

        return list(obs)
コード例 #20
0
    def pick(self,
             pos,
             rot,
             offset,
             dynamic=True,
             objects=None,
             simulate_grasp=True):
        ''''''
        # Setup pre-grasp pos and default orientation
        self.openGripper()
        pre_pos = copy.copy(pos)
        m = np.array(pb.getMatrixFromQuaternion(rot)).reshape(3, 3)
        pre_pos += m[:, 2] * offset
        # rot = pb.getQuaternionFromEuler([np.pi/2.,-np.pi,np.pi/2])
        pre_rot = rot

        # Move to pre-grasp pose and then grasp pose
        self.moveTo(pre_pos, pre_rot, dynamic)
        if simulate_grasp:
            self.moveTo(pos, rot, True, pos_th=1e-3, rot_th=1e-3)

            # Grasp object and lift up to pre pose
            gripper_fully_closed = self.closeGripper()
            self.adjustGripperCommand()
            if gripper_fully_closed:
                self.openGripper()

            self.moveTo(pre_pos, pre_rot, True)
            for i in range(10):
                pb.stepSimulation()
        else:
            self.moveTo(pos, rot, dynamic)

        self.holding_obj = self.getPickedObj(objects)
        self.moveToJ(self.home_positions_joint, dynamic)
        self.checkGripperClosed()
コード例 #21
0
 def calcWheelVelsAngle(self, pos, quat, lv, av):
     # Calculate the Z axis of the car body.
     rotMat = p.getMatrixFromQuaternion(quat)
     hMat = np.eye(4, dtype=np.float32)
     hMat[:3, :3] = np.array(rotMat).reshape((3, 3))
     zVec = np.array([0, 0, 1, 1])
     rotatedZ = np.matmul(hMat, zVec)
     # Calculate the angle of Z axis.
     theta = self.calcAngle(zVec, rotatedZ)
     yVec = np.array([0, 1, 0, 1])
     rotatedY = np.matmul(hMat, yVec)
     cross = np.cross(zVec[:3], rotatedZ[:3])
     yTheta = self.calcAngle(cross, rotatedY)
     if yTheta > math.pi / 2:
         theta = -theta
     print(theta)
     # PID
     dTheta = (theta - self.lastP) / self.ts
     self.lastP = theta
     self.inti = self.inti + theta * self.ts
     v = self.kp * theta + self.kd * dTheta + self.ki * self.inti
     res = self.mul * v
     print(res)
     return res, res
コード例 #22
0
camInfo = p.getDebugVisualizerCamera()
lastTime = time.time()
lastControlTime = time.time()
lastLidarTime = time.time()

frame=0
while (True):
	
	nowTime = time.time()
	#render Camera at 10Hertz
	if (nowTime-lastTime>.1):
		ls = p.getLinkState(car,zed_camera_joint, computeForwardKinematics=True)
		camPos = ls[0]
		camOrn = ls[1]
		camMat = p.getMatrixFromQuaternion(camOrn)
		upVector = [0,0,1]
		forwardVec = [camMat[0],camMat[3],camMat[6]]
		#sideVec =  [camMat[1],camMat[4],camMat[7]]
		camUpVec =  [camMat[2],camMat[5],camMat[8]]
		camTarget = [camPos[0]+forwardVec[0]*10,camPos[1]+forwardVec[1]*10,camPos[2]+forwardVec[2]*10]
		camUpTarget = [camPos[0]+camUpVec[0],camPos[1]+camUpVec[1],camPos[2]+camUpVec[2]]
		viewMat = p.computeViewMatrix(camPos, camTarget, camUpVec)
		projMat = camInfo[3]
		#p.getCameraImage(320,200,viewMatrix=viewMat,projectionMatrix=projMat, flags=p.ER_NO_SEGMENTATION_MASK, renderer=p.ER_BULLET_HARDWARE_OPENGL)
		p.getCameraImage(320,200,viewMatrix=viewMat,projectionMatrix=projMat, renderer=p.ER_BULLET_HARDWARE_OPENGL)
		lastTime=nowTime
	
	nowControlTime = time.time()
	
	nowLidarTime = time.time()
コード例 #23
0
def is_fallen():
  global minitaur
  orientation = minitaur.getBaseOrientation()
  rotMat = p.getMatrixFromQuaternion(orientation)
  localUp = rotMat[6:]
  return np.dot(np.asarray([0, 0, 1]), np.asarray(localUp)) < 0
コード例 #24
0
def talker():
    global getMode, get_last_vel, myflags
    iter = 0
    iter1 = 0
    get_orientation = []
    get_euler = []
    get_matrix = []
    get_velocity = []
    get_invert = []
    jointTorques = []
    jointTorques1 = []
    init_pos = []
    middle_target = []
    joint_Kp = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
    joint_Kd = [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]
    stand_target = [
        0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6
    ]
    pub1 = rospy.Publisher('/get_js', JointState, queue_size=100)
    pub2 = rospy.Publisher('/imu_body', Imu, queue_size=100)
    pub3 = rospy.Publisher('/get_com', commandDes, queue_size=100)
    imu_msg = Imu()
    setJSMsg = JointState()
    com_msg = commandDes()
    freq = 400
    rate = rospy.Rate(freq)  # hz

    while not rospy.is_shutdown():

        if myflags == 100:
            p.resetBasePositionAndOrientation(boxId, [0, 0, 0.2], [0, 0, 0, 1])
            time.sleep(1)
            for j in range(16):
                force = 0
                p.setJointMotorControl2(boxId,
                                        j,
                                        p.VELOCITY_CONTROL,
                                        force=force)

        get_orientation = []
        pose_orn = p.getBasePositionAndOrientation(boxId)
        for i in range(4):
            get_orientation.append(pose_orn[1][i])
        get_euler = p.getEulerFromQuaternion(get_orientation)
        get_velocity = p.getBaseVelocity(boxId)
        get_invert = p.invertTransform(pose_orn[0], pose_orn[1])
        get_matrix = p.getMatrixFromQuaternion(get_invert[1])

        # IMU data
        imu_msg.orientation.x = pose_orn[1][0]
        imu_msg.orientation.y = pose_orn[1][1]
        imu_msg.orientation.z = pose_orn[1][2]
        imu_msg.orientation.w = pose_orn[1][3]

        imu_msg.angular_velocity.x = get_matrix[0] * get_velocity[1][
            0] + get_matrix[1] * get_velocity[1][1] + get_matrix[
                2] * get_velocity[1][2]
        imu_msg.angular_velocity.y = get_matrix[3] * get_velocity[1][
            0] + get_matrix[4] * get_velocity[1][1] + get_matrix[
                5] * get_velocity[1][2]
        imu_msg.angular_velocity.z = get_matrix[6] * get_velocity[1][
            0] + get_matrix[7] * get_velocity[1][1] + get_matrix[
                8] * get_velocity[1][2]

        # calculate the acceleration of the robot
        linear_X = (get_velocity[0][0] - get_last_vel[0]) * freq
        linear_Y = (get_velocity[0][1] - get_last_vel[1]) * freq
        linear_Z = 9.8 + (get_velocity[0][2] - get_last_vel[2]) * freq
        imu_msg.linear_acceleration.x = get_matrix[0] * linear_X + get_matrix[
            1] * linear_Y + get_matrix[2] * linear_Z
        imu_msg.linear_acceleration.y = get_matrix[3] * linear_X + get_matrix[
            4] * linear_Y + get_matrix[5] * linear_Z
        imu_msg.linear_acceleration.z = get_matrix[6] * linear_X + get_matrix[
            7] * linear_Y + get_matrix[8] * linear_Z

        # joint data
        joint_state = p.getJointStates(boxId, motor_id_list)
        setJSMsg.position = [
            joint_state[0][0], joint_state[1][0], joint_state[2][0],
            joint_state[3][0], joint_state[4][0], joint_state[5][0],
            joint_state[6][0], joint_state[7][0], joint_state[8][0],
            joint_state[9][0], joint_state[10][0], joint_state[11][0]
        ]

        setJSMsg.velocity = [
            joint_state[0][1], joint_state[1][1], joint_state[2][1],
            joint_state[3][1], joint_state[4][1], joint_state[5][1],
            joint_state[6][1], joint_state[7][1], joint_state[8][1],
            joint_state[9][1], joint_state[10][1], joint_state[11][1]
        ]

        # com data
        com_msg.com_position = [pose_orn[0][0], pose_orn[0][1], pose_orn[0][2]]
        com_msg.com_velocity = [
            get_velocity[0][0], get_velocity[0][1], get_velocity[0][2]
        ]
        get_last_vel.clear()
        get_last_vel = com_msg.com_velocity

        # stand up control
        if len(get_effort):
            print(get_effort)
            p.setJointMotorControlArray(bodyUniqueId=boxId,
                                        jointIndices=motor_id_list,
                                        controlMode=p.TORQUE_CONTROL,
                                        forces=get_effort)

        setJSMsg.header.stamp = rospy.Time.now()
        setJSMsg.name = [
            "abduct_fr", "thigh_fr", "knee_fr", "abduct_fl", "thigh_fl",
            "knee_fl", "abduct_hr", "thigh_hr", "knee_hr", "abduct_hl",
            "thigh_hl", "knee_hl"
        ]
        if myflags % 2 == 0:
            pub2.publish(imu_msg)

        pub1.publish(setJSMsg)
        pub3.publish(com_msg)
        myflags = myflags + 1
        p.setTimeStep(0.0015)
        p.stepSimulation()
        rate.sleep()
コード例 #25
0
    def pbvs(self,
             arm,
             goal_pos,
             goal_rot,
             kv=1.3,
             kw=0.8,
             eps_pos=0.005,
             eps_rot=0.05,
             vs_rate=20,
             plot_pose=False,
             print_err=False,
             perturb_jacobian=False,
             perturb_Jac_joint_mu=0.1,
             perturb_Jac_joint_sigma=0.1,
             perturb_orientation=False,
             mu_R=0.4,
             sigma_R=0.4,
             perturb_motion_R_mu=0.0,
             perturb_motion_R_sigma=0.0,
             perturb_motion_t_mu=0.0,
             perturb_motion_t_sigma=0.0,
             plot_result=False,
             pf_mode=False):

        # Initialize variables
        self.perturb_Jac_joint_mu = perturb_Jac_joint_mu
        self.perturb_Jac_joint_sigma = perturb_Jac_joint_sigma
        self.perturb_R_mu = mu_R
        self.perturb_R_sigma = sigma_R
        if arm == "right":
            tool = self.right_tool
        elif arm == "left":
            tool = self.left_tool
        else:
            print('''arm incorrect, input "left" or "right" ''')
            return

        # Initialize pos_plot_id for later use
        if pf_mode:
            cur_pos, cur_rot = SE32_trans_rot(self.cur_pose_est)
        else:
            cur_pos, cur_rot = SE32_trans_rot(
                self.Trc.inverse() * get_pose(self.robot, tool, SE3=True))
        if plot_pose:
            pos_plot_id = self.draw_pose_cam(trans_rot2SE3(cur_pos, cur_rot))

        # record end effector pose and time stamp at each iteration
        if plot_result:
            start = time.time()
            times = []
            eef_pos = []
            eef_rot = []

        if pf_mode:
            motion = sp.SE3()
        ##### Control loop starts #####

        while True:

            # If using particle filter and the hog computation has finished, exit pbvs control
            # and assign total estimated motion in eef frame
            if pf_mode and self.shared_pf_fin.value == 1:
                # eef Motion estimated from pose estimate of last iteration and current fk
                motion_truth = (self.Trc *
                                self.cur_pose_est).inverse() * get_pose(
                                    self.robot, tool, SE3=True)
                # print("motion truth:", motion_truth)

                dR = sp.SO3.exp(
                    np.random.normal(perturb_motion_R_mu,
                                     perturb_motion_R_sigma, 3)).matrix()
                dt = np.random.normal(perturb_motion_t_mu,
                                      perturb_motion_t_sigma, 3)
                # self.draw_pose_cam(motion)
                self.motion = motion_truth * sp.SE3(dR, dt)
                return

            # get current eef pose in camera frame
            if pf_mode:
                cur_pos, cur_rot = SE32_trans_rot(self.cur_pose_est)
            else:
                cur_pos, cur_rot = SE32_trans_rot(
                    self.Trc.inverse() * get_pose(self.robot, tool, SE3=True))
            if plot_pose:
                self.draw_pose_cam(trans_rot2SE3(cur_pos, cur_rot),
                                   uids=pos_plot_id)
            if plot_result:
                eef_pos.append(cur_pos)
                eef_rot.append(cur_rot)
                times.append(time.time() - start)
            cur_pos_inv, cur_rot_inv = p.invertTransform(cur_pos, cur_rot)
            # Pose of goal in camera frame
            pos_cg, rot_cg = p.multiplyTransforms(cur_pos_inv, cur_rot_inv,
                                                  goal_pos, goal_rot)
            # Evaluate current translational and rotational error
            err_pos = np.linalg.norm(pos_cg)
            err_rot = np.linalg.norm(p.getAxisAngleFromQuaternion(rot_cg)[1])
            if err_pos < eps_pos and err_rot < eps_rot:
                p.removeAllUserDebugItems()
                break
            else:
                if print_err:
                    print("Error to goal, position: {:2f}, orientation: {:2f}".
                          format(err_pos, err_rot))

            Rsc = np.array(p.getMatrixFromQuaternion(cur_rot)).reshape(3, 3)
            # Perturb Rsc in SO(3) by a random variable in tangent space so(3)
            if perturb_orientation:
                dR = sp.SO3.exp(
                    np.random.normal(self.perturb_R_mu, self.perturb_R_sigma,
                                     3))
                Rsc = Rsc.dot(dR.matrix())

            twist_local = np.hstack(
                (np.array(pos_cg) * kv,
                 np.array(quat2se3(rot_cg)) * kw)).reshape(6, 1)
            local2global = np.block([[Rsc, np.zeros((3, 3))],
                                     [np.zeros((3, 3)), Rsc]])
            twist_global = local2global.dot(twist_local)
            start_loop = time.time()
            self.cartesian_vel_control(arm,
                                       np.asarray(twist_global),
                                       1 / vs_rate,
                                       perturb_jacobian=perturb_jacobian)
            if pf_mode:
                delta_t = time.time() - start_loop
                motion = motion * sp.SE3.exp(twist_local * delta_t)
                # self.draw_pose_cam(motion)

        self.goal_reached = True
        print("PBVS goal achieved!")

        if plot_result:
            eef_pos = np.array(eef_pos)
            eef_rot_rpy = np.array(
                [p.getEulerFromQuaternion(quat) for quat in eef_rot])

            fig, axs = plt.subplots(3, 2)

            sub_titles = [['x', 'roll'], ['y', 'pitch'], ['z', 'yaw']]
            fig.suptitle(
                "Position Based Visual Servo End Effector Pose - time plot")
            for i in range(3):
                axs[i, 0].plot(times, eef_pos[:, i] * 100)
                axs[i, 0].plot(times, goal_pos[i] * np.ones(len(times)) * 100)
                axs[i, 0].legend([sub_titles[i][0], 'goal'])
                axs[i, 0].set_xlabel('Time(s)')
                axs[i, 0].set_ylabel('cm')
            goal_rpy = p.getEulerFromQuaternion(goal_rot)

            for i in range(3):
                axs[i, 1].plot(times, eef_rot_rpy[:, i] * 180 / np.pi)
                axs[i, 1].plot(times,
                               goal_rpy[i] * np.ones(len(times)) * 180 / np.pi)
                axs[i, 1].legend([sub_titles[i][1], 'goal'])
                axs[i, 1].set_xlabel('Time(s)')
                axs[i, 1].set_ylabel('deg')

            for ax in axs.flat:
                ax.set(xlabel='time')

            plt.show()
コード例 #26
0
def matrix_from_quat(quat):
    return np.array(p.getMatrixFromQuaternion(quat, physicsClientId=CLIENT)).reshape(3, 3)
コード例 #27
0
def trans_rot2SE3(trans, rot):
    rotm = np.array(p.getMatrixFromQuaternion(rot)).reshape(3, 3)
    return sp.SE3(rotm, np.array(trans))
コード例 #28
0
ファイル: kuka_grasp.py プロジェクト: ry-2456/pybullet_work
        seg = np.reshape(images[4], (height, width))
        block_mask = object_mask_from_seg(object_uid, seg)
        posmap = get_posmap(near, far, view_matrix, projection_matrix, height,
                            width, depth_buffer)
        block_posmap = posmap[block_mask != 0]
        v = longitudinal_direction(block_posmap)

        # blockの長手方向を描画
        p.addUserDebugLine(b_pos + v / 10, b_pos - v / 10, [1, 0, 0])

        # world -> eefの同時変換行列を計算
        eef_base_pos, eef_base_orn = p.getLinkState(kukaId,
                                                    kukaEndEffectorIndex)[4:6]
        world_to_eef_base = np.eye(4)
        world_to_eef_base[0:3, 0:3] = np.array(
            p.getMatrixFromQuaternion(eef_base_orn)).reshape(3, 3)
        world_to_eef_base[:3, 3] = eef_base_pos

        # yawの回転角の計算, eefのy軸方向にブロックの長手方向が一致すればいい
        v_in_eef = np.linalg.inv(world_to_eef_base).dot(np.hstack(
            [v, 1]))[:3] - np.linalg.inv(world_to_eef_base).dot(
                np.array([0, 0, 0, 1]))[:3]
        v_in_eef = v_in_eef / np.linalg.norm(v_in_eef)
        e_y = np.array([0, 1., 0])
        yaw_rot_diff = np.arccos(np.dot(v_in_eef, e_y))
        yaw_now = p.getEulerFromQuaternion(eef_base_orn)[2]
        eef_orn = [0, -np.pi, yaw_now]  # eefが下向き
        if v_in_eef[0] < 0: eef_orn[2] -= yaw_rot_diff
        else: eef_orn[2] += yaw_rot_diff
        # TODO : eef_orn[2]をjoint_limitに収める処理を追加?
コード例 #29
0
                                 parentLinkIndex=-1,
                                 replaceItemUniqueId=lines[i])
    lines[i] = textUid

  #keep the gripper centered/symmetric
  b = p.getJointState(pr2_gripper, 2)[0]
  p.setJointMotorControl2(pr2_gripper, 0, p.POSITION_CONTROL, targetPosition=b, force=3)

  events = p.getVREvents()
  for e in (events):
    if e[CONTROLLER_ID] == uiControllerId:
      p.resetBasePositionAndOrientation(uiCube, e[POSITION], e[ORIENTATION])
      pos = e[POSITION]
      orn = e[ORIENTATION]
      lineFrom = pos
      mat = p.getMatrixFromQuaternion(orn)
      dir = [mat[0], mat[3], mat[6]]
      to = [pos[0] + dir[0] * rayLen, pos[1] + dir[1] * rayLen, pos[2] + dir[2] * rayLen]
      hit = p.rayTest(lineFrom, to)
      oldRay = pointRay
      color = [1, 1, 0]
      width = 3
      #pointRay = p.addUserDebugLine(lineFrom,to,color,width,lifeTime=1)
      #if (oldRay>=0):
      #	p.removeUserDebugItem(oldRay)

      if (hit):
        #if (hit[0][0]>=0):
        hitObjectUid = hit[0][0]
        linkIndex = hit[0][1]
        if (hitObjectUid >= 0):
コード例 #30
0
    def apply_command(self, cmd, velocity=True, local_coord=True):
        """
        Applies the command to the sensor.
        Args:
            cmd (list)          : Position and orientation commands.
            velocity (bool)     : Set to true if the commands are velocity, they are assumed to be position otherwise.
            local_coord (bool)  : Set to false to use global coordinates.
        """
        if self._virtual_links:
            for j in range(p.getNumJoints(self._sensor_id)):
                if velocity:
                    p.setJointMotorControl2(self._sensor_id,
                                            j,
                                            p.VELOCITY_CONTROL,
                                            targetPosition=0,
                                            targetVelocity=cmd[j],
                                            velocityGain=1.,
                                            force=self._max_force)
                else:
                    p.setJointMotorControl2(self._sensor_id,
                                            j,
                                            p.POSITION_CONTROL,
                                            targetPosition=cmd[j],
                                            targetVelocity=0,
                                            positionGain=1,
                                            velocityGain=1,
                                            force=self._max_force)
        else:
            if velocity:
                delta_position = np.array(cmd[0:3]) * config.TIME_STEP
                delta_orientation = np.array(cmd[3:6]) * config.TIME_STEP

                base_position, base_orientation = p.getBasePositionAndOrientation(
                    self._sensor_id)

                if local_coord:
                    rot_mat = p.getMatrixFromQuaternion(base_orientation)
                    rot_mat = np.array(rot_mat).reshape(3, 3)
                    new_position = rot_mat.dot(delta_position) + np.array(
                        base_position)
                else:
                    new_position = delta_position + np.array(base_position)

                base_orientation = np.array(
                    p.getEulerFromQuaternion(base_orientation))
                new_orientation = (base_orientation +
                                   delta_orientation).tolist()
                new_orientation = p.getQuaternionFromEuler(new_orientation)
            else:
                assert not local_coord, "Position controller only works with global coordinates."
                new_position = cmd[0:3]
                new_orientation = p.getQuaternionFromEuler(cmd[3:6])

            if self._constrained:
                p.changeConstraint(self._sensor_constraint,
                                   new_position,
                                   new_orientation,
                                   maxForce=self._max_force)
            else:
                p.resetBasePositionAndOrientation(self._sensor_id,
                                                  new_position,
                                                  new_orientation)
コード例 #31
0
		nowhere = False

		while (path.rad < radius + .5):
			# maxForce = p.readUserDebugParameter(maxForceSlider)
			# targetVelocity = p.readUserDebugParameter(targetVelocitySlider)
			# steeringAngle = p.readUserDebugParameter(steeringSlider)
			maxForce = 10.
			targetVelocity = -5
			steeringAngle = 0
			

		#########---------------START Obstacle Avoidance----------########

			position, orientation = p.getBasePositionAndOrientation(car)

			matrix = p.getMatrixFromQuaternion(orientation)
			matrix = np.reshape(matrix, (3, 3))

			src = np.array(position) 
			src = src + np.matmul(matrix,offset)

			path.calcDist(src)
			h = 10

			rays_src = np.repeat([src], num_rays, axis=0)

			orn = np.matmul(matrix, rot) #rotates unit vector y to -x

			rays_end = np.matmul(orn, rays) # unit vector in direction of minitaur
			rays_end = (rays_end + src[:, None]).T
			rays_info = p.rayTestBatch(rays_src.tolist(), rays_end.tolist())
コード例 #32
0
ファイル: vrtracker.py プロジェクト: Icequamber/Creature
colors[0] = [0, 0, 0]
colors[1] = [0.5, 0, 0]
colors[2] = [0, 0.5, 0]
colors[3] = [0, 0, 0.5]
colors[4] = [0.5, 0.5, 0.]
colors[5] = [.5, .5, .5]

p.startStateLogging(p.STATE_LOGGING_VR_CONTROLLERS, "vr_hmd.bin", deviceTypeFilter=p.VR_DEVICE_HMD)
p.startStateLogging(p.STATE_LOGGING_GENERIC_ROBOT, "generic_data.bin")
p.startStateLogging(p.STATE_LOGGING_CONTACT_POINTS, "contact_points.bin")

while True:
  events = p.getVREvents(p.VR_DEVICE_HMD + p.VR_DEVICE_GENERIC_TRACKER)
  for e in (events):
    pos = e[POSITION]
    mat = p.getMatrixFromQuaternion(e[ORIENTATION])
    dir0 = [mat[0], mat[3], mat[6]]
    dir1 = [mat[1], mat[4], mat[7]]
    dir2 = [mat[2], mat[5], mat[8]]
    lineLen = 0.1
    dir = [-mat[2], -mat[5], -mat[8]]

    to = [pos[0] + lineLen * dir[0], pos[1] + lineLen * dir[1], pos[2] + lineLen * dir[2]]
    toX = [pos[0] + lineLen * dir0[0], pos[1] + lineLen * dir0[1], pos[2] + lineLen * dir0[2]]
    toY = [pos[0] + lineLen * dir1[0], pos[1] + lineLen * dir1[1], pos[2] + lineLen * dir1[2]]
    toZ = [pos[0] + lineLen * dir2[0], pos[1] + lineLen * dir2[1], pos[2] + lineLen * dir2[2]]
    p.addUserDebugLine(pos, toX, [1, 0, 0], 1)
    p.addUserDebugLine(pos, toY, [0, 1, 0], 1)
    p.addUserDebugLine(pos, toZ, [0, 0, 1], 1)

    p.addUserDebugLine(pos, to, [0.5, 0.5, 0.], 1, 3)
コード例 #33
0
    b = p.getJointState(pr2_gripper, 2)[0]
    p.setJointMotorControl2(pr2_gripper,
                            0,
                            p.POSITION_CONTROL,
                            targetPosition=b,
                            force=3)

    events = p.getVREvents()
    for e in (events):
        if e[CONTROLLER_ID] == uiControllerId:
            p.resetBasePositionAndOrientation(uiCube, e[POSITION],
                                              e[ORIENTATION])
            pos = e[POSITION]
            orn = e[ORIENTATION]
            lineFrom = pos
            mat = p.getMatrixFromQuaternion(orn)
            dir = [mat[0], mat[3], mat[6]]
            to = [
                pos[0] + dir[0] * rayLen, pos[1] + dir[1] * rayLen,
                pos[2] + dir[2] * rayLen
            ]
            hit = p.rayTest(lineFrom, to)
            oldRay = pointRay
            color = [1, 1, 0]
            width = 3
            #pointRay = p.addUserDebugLine(lineFrom,to,color,width,lifeTime=1)
            #if (oldRay>=0):
            #	p.removeUserDebugItem(oldRay)

            if (hit):
                #if (hit[0][0]>=0):
コード例 #34
0
    def _dslPIDPositionControl(self, control_timestep, cur_pos, cur_quat,
                               cur_vel, target_pos, target_rpy, target_vel):
        """DSL's CF2.x PID position control.

        Parameters
        ----------
        control_timestep : float
            The time step at which control is computed.
        cur_pos : ndarray
            (3,1)-shaped array of floats containing the current position.
        cur_quat : ndarray
            (4,1)-shaped array of floats containing the current orientation as a quaternion.
        cur_vel : ndarray
            (3,1)-shaped array of floats containing the current velocity.
        target_pos : ndarray
            (3,1)-shaped array of floats containing the desired position.
        target_rpy : ndarray
            (3,1)-shaped array of floats containing the desired orientation as roll, pitch, yaw.
        target_vel : ndarray
            (3,1)-shaped array of floats containing the desired velocity.

        Returns
        -------
        float
            The target thrust along the drone z-axis.
        ndarray
            (3,1)-shaped array of floats containing the target roll, pitch, and yaw.
        float
            The current position error.

        """
        cur_rotation = np.array(p.getMatrixFromQuaternion(cur_quat)).reshape(
            3, 3)
        pos_e = target_pos - cur_pos
        vel_e = target_vel - cur_vel
        self.integral_pos_e = self.integral_pos_e + pos_e * control_timestep
        self.integral_pos_e = np.clip(self.integral_pos_e, -2., 2.)
        self.integral_pos_e[2] = np.clip(self.integral_pos_e[2], -0.15, .15)
        #### PID target thrust #####################################
        target_thrust = np.multiply(self.P_COEFF_FOR, pos_e) \
                        + np.multiply(self.I_COEFF_FOR, self.integral_pos_e) \
                        + np.multiply(self.D_COEFF_FOR, vel_e) + np.array([0, 0, self.GRAVITY])
        scalar_thrust = max(0., np.dot(target_thrust, cur_rotation[:, 2]))
        thrust = (math.sqrt(scalar_thrust / (4 * self.KF)) -
                  self.PWM2RPM_CONST) / self.PWM2RPM_SCALE
        target_z_ax = target_thrust / np.linalg.norm(target_thrust)
        target_x_c = np.array(
            [math.cos(target_rpy[2]),
             math.sin(target_rpy[2]), 0])
        target_y_ax = np.cross(target_z_ax, target_x_c) / np.linalg.norm(
            np.cross(target_z_ax, target_x_c))
        target_x_ax = np.cross(target_y_ax, target_z_ax)
        target_rotation = (np.vstack([target_x_ax, target_y_ax,
                                      target_z_ax])).transpose()
        #### Target rotation #######################################
        target_euler = (Rotation.from_matrix(target_rotation)).as_euler(
            'XYZ', degrees=False)
        if np.any(np.abs(target_euler) > math.pi):
            print(
                "\n[ERROR] ctrl it", self.control_counter,
                "in Control._dslPIDPositionControl(), values outside range [-pi,pi]"
            )
        return thrust, target_euler, pos_e