Exemplo n.º 1
0
def get_image(camera_pos,
              target_pos,
              width=640,
              height=480,
              vertical_fov=60.0,
              near=0.02,
              far=5.0,
              segment=False,
              segment_links=False):
    # computeViewMatrixFromYawPitchRoll
    view_matrix = p.computeViewMatrix(cameraEyePosition=camera_pos,
                                      cameraTargetPosition=target_pos,
                                      cameraUpVector=[0, 0, 1],
                                      physicsClientId=CLIENT)
    projection_matrix = get_projection_matrix(width, height, vertical_fov,
                                              near, far)
    if segment:
        if segment_links:
            flags = p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX
        else:
            flags = 0
    else:
        flags = p.ER_NO_SEGMENTATION_MASK
    image = CameraImage(*p.getCameraImage(
        width,
        height,
        viewMatrix=view_matrix,
        projectionMatrix=projection_matrix,
        shadow=False,
        flags=flags,
        renderer=p.ER_TINY_RENDERER,  # p.ER_BULLET_HARDWARE_OPENGL
        physicsClientId=CLIENT)[2:])
    depth = far * near / (far - (far - near) * image.depthPixels)
    # https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/pointCloudFromCameraImage.py
    # https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/getCameraImageTest.py
    segmented = None
    if segment:
        segmented = np.zeros(image.segmentationMaskBuffer.shape + (2, ))
        for r in range(segmented.shape[0]):
            for c in range(segmented.shape[1]):
                pixel = image.segmentationMaskBuffer[r, c]
                segmented[r, c, :] = demask_pixel(pixel)
    return CameraImage(image.rgbPixels, depth, segmented)
Exemplo n.º 2
0
    def set_view_matrix(self, camera_eye_pos, camera_target_pos, camera_up_vec):
        """
        Sets the camera view matrix (OpenGL ModelView Matrix).
        Args:
            camera_eye_pos (list or np.array)       : Position of the center of the camera.
            camera_target_pos (list or np.array)    : Position of the center of the scene where the camera is looking at
            camera_up_vec (list or np.array)        : Camera up vector.
        """
        self._camera_eye_pos = camera_eye_pos
        self._camera_target_pos = camera_target_pos
        self._camera_up_vec = camera_up_vec

        # use the Pybullet helper function to compute the view matrix
        view_mat = p.computeViewMatrix(cameraEyePosition=camera_eye_pos,
                                       cameraTargetPosition=camera_target_pos,
                                       cameraUpVector=camera_up_vec)

        # Bullet is using column major matrices
        self._view_mat = np.reshape(view_mat, (4, 4)).transpose()
Exemplo n.º 3
0
    def _getCameraImage(self):
        """
        INTERNAL METHOD, Computes the OpenGL virtual camera image. The
        resolution and the projection matrix have to be computed before calling
        this method, or it will crash

        Returns:
            camera_image - The camera image of the OpenGL virtual camera
        """
        _, _, _, _, pos_world, q_world = pybullet.getLinkState(
            self.robot_model,
            self.camera_link.getParentIndex(),
            computeForwardKinematics=False,
            physicsClientId=self.physics_client)

        rotation = pybullet.getMatrixFromQuaternion(q_world)
        forward_vector = [rotation[0], rotation[3], rotation[6]]
        up_vector = [rotation[2], rotation[5], rotation[8]]

        camera_target = [
            pos_world[0] + forward_vector[0] * 10,
            pos_world[1] + forward_vector[1] * 10,
            pos_world[2] + forward_vector[2] * 10
        ]

        view_matrix = pybullet.computeViewMatrix(
            pos_world,
            camera_target,
            up_vector,
            physicsClientId=self.physics_client)

        with self.resolution_lock:
            camera_image = pybullet.getCameraImage(
                self.resolution.width,
                self.resolution.height,
                view_matrix,
                self.projection_matrix,
                renderer=pybullet.ER_BULLET_HARDWARE_OPENGL,
                flags=pybullet.ER_NO_SEGMENTATION_MASK,
                physicsClientId=self.physics_client)

        return camera_image
Exemplo n.º 4
0
    def render_img(self,
                   left_gripper_joint,
                   left_gripper2_joint,
                   Tce,
                   plot_cam_pose=False,
                   imsize=(214, 214),
                   plot_img=False,
                   show_time_taken=False):
        if show_time_taken:
            start = time.time()
        p.resetJointStatesMultiDof(self.robot,
                                   jointIndices=self.joint_ids,
                                   targetValues=[[left_gripper_joint],
                                                 [left_gripper2_joint]],
                                   physicsClientId=self.physicsClient)
        Tec = Tce.inverse()
        cam_trans, cam_rot = SE32_trans_rot(Tec)
        cam_rotm = Tec.rotationMatrix()
        if plot_cam_pose:
            draw_pose(cam_trans, cam_rot)
        # Calculate extrinsic matrix
        # target position is camera frame y axis tip in global frame
        # up vector is the z axis of camera frame
        viewMatrix = p.computeViewMatrix(
            cameraEyePosition=cam_trans,
            cameraTargetPosition=(cam_rotm.dot(np.array([0, 1, 0]).T) +
                                  np.array(cam_trans)).tolist(),
            cameraUpVector=cam_rotm[:, 2].tolist())
        width, height, rgbImg, depthImg, segImg = p.getCameraImage(
            width=imsize[0],
            height=imsize[1],
            viewMatrix=viewMatrix,
            projectionMatrix=self.projectionMatrix,
            flags=p.ER_NO_SEGMENTATION_MASK,
            physicsClientId=self.physicsClient)

        if show_time_taken:
            print("time taken for rendering: {}".format(time.time() - start))
        if plot_img:
            plt.imshow(rgbImg[:, :, :3])
            plt.show()
        return rgbImg[:, :, :3]
Exemplo n.º 5
0
    def getlocalMapObservation(self):
        # raycast around the area of the agent
        """
            For now this includes the agent in the center of the computation
        """

        com_p, com_o = pybullet.getBasePositionAndOrientation(self._demon)
        rot_matrix = pybullet.getMatrixFromQuaternion(com_o)
        rot_matrix = np.array(rot_matrix).reshape(3, 3)
        # Initial vectors
        # init_camera_vector = (0, 0, -1) # z-axis
        # init_up_vector = (0, 1, 0) # y-axis
        # Rotated vectors
        # camera_vector = rot_matrix.dot(init_camera_vector)
        # up_vector = rot_matrix.dot(init_up_vector)
        # view_matrix = p.computeViewMatrix(com_p, com_p + 0.1 * camera_vector, up_vector)
        if (self._fixed_view):
            x = 0.0
            y = 0
        else:
            x = com_p[0]
            y = com_p[1]
        view_matrix = pybullet.computeViewMatrix(
            cameraEyePosition=[x, y, self._observation_height],
            cameraTargetPosition=[x, y, 0],
            cameraUpVector=[0, 1, 0])
        projection_matrix = pybullet.computeProjectionMatrixFOV(
            self._obs_fov, self._obs_aspect, self._obs_nearplane,
            self._obs_farplane)
        # img = pybullet.getCameraImage(1000, 1000, view_matrix)
        (w, y, img, depth,
         segment) = pybullet.getCameraImage(width=self._observation_shape[0],
                                            height=self._observation_shape[1],
                                            viewMatrix=view_matrix,
                                            projectionMatrix=projection_matrix)
        # print (img)
        # Don't want alpha channel
        img = img[..., :3]

        if (self._grayscale):  ### Convert to Gray scale
            img = np.mean(img, axis=-1)
        return img
Exemplo n.º 6
0
    def render(self,
               width=720,
               height=720,
               far=100.0,
               near=0.01):  # Just camera rendering function

        dis = 1000.0
        # self.state_robot = p.getLinkState(self.pandaUid, 11)[0]  # the position of end-effector
        # self.orien_robot = p.getLinkState(self.pandaUid, 11)[1]

        offset_pos = [0, 0, 0.035]
        target_vec = [-1, 0, 0]
        offset_orn = p.getQuaternionFromEuler([0, 0, 0])

        view_pos, view_orn = p.multiplyTransforms(self.get_link_position(),
                                                  self.get_link_orien(),
                                                  offset_pos, offset_orn)
        target_pos, _ = p.multiplyTransforms(view_pos, view_orn, offset_pos,
                                             offset_orn)
        target_vector, _ = p.multiplyTransforms(view_pos, view_orn, target_vec,
                                                offset_orn)

        view_matrix = p.computeViewMatrix(cameraEyePosition=view_pos,
                                          cameraTargetPosition=target_pos,
                                          cameraUpVector=target_vector)
        proj_matrix = p.computeProjectionMatrixFOV(fov=80,
                                                   aspect=float(width) /
                                                   height,
                                                   nearVal=near,
                                                   farVal=far)
        (_, _, px, self.dp,
         _) = p.getCameraImage(width=width,
                               height=height,
                               viewMatrix=view_matrix,
                               projectionMatrix=proj_matrix,
                               renderer=p.ER_BULLET_HARDWARE_OPENGL)

        rgb_array = np.array(px, dtype=np.uint8)
        rgb_array = np.reshape(rgb_array, (width, height, 4))

        rgb_array = rgb_array[:, :, :3]
        return rgb_array, self.dp
Exemplo n.º 7
0
 def makeCamera(self, node, tr, parentId):
     self.hasCamera = True
     imageSize = [int(node.width), int(node.height)]
     viewMatrix = p.computeViewMatrix(
         cameraEyePosition=[tr.x(), tr.y(), tr.z()],
         cameraTargetPosition=[tr.x(), tr.y() + 10,
                               tr.z()],
         cameraUpVector=[0, 0, 1])
     eyePos = [tr.x(), tr.y() - 0.6, tr.z()]
     targetPos = [tr.x(), tr.y() - 5, tr.z()]
     upVector = [0, 0, 1]
     projectionMatrix = p.computeProjectionMatrixFOV(fov=node.focal,
                                                     aspect=node.width /
                                                     node.height,
                                                     nearVal=0.3,
                                                     farVal=5.1)
     camera = BodyCamera(viewMatrix, projectionMatrix, eyePos, targetPos,
                         upVector, imageSize)
     camera.imCameraNode = node
     self.cameras.append(camera)
Exemplo n.º 8
0
 def get_camera_image(self):
     width, height = 128, 128
     # width, height = 8, 8 # doesn't actually make much difference
     view = pb.computeViewMatrix(
         cameraEyePosition=(0, -.02, .02),
         cameraTargetPosition=(0, -.4, .02),  # focal point
         cameraUpVector=(0, 0, .5),
     )
     proj = pb.computeProjectionMatrixFOV(
         fov=135,
         aspect=height / width,
         nearVal=0.01,
         farVal=.4,
     )
     # rgba shape is (height, width, 4)
     _, _, rgba, _, _ = pb.getCameraImage(
         width, height, view, proj,
         flags=pb.ER_NO_SEGMENTATION_MASK)  # not much speed difference
     # rgba = np.empty((height, width, 4)) # much faster than pb.getCameraImage
     return rgba, view, proj
def takepicture(Pos, Orn):
    # Pos is the position of end effect, orn is the orientation of the end effect
    rotmatrix = p.getMatrixFromQuaternion(Orn)
    # distance from camera to focus
    distance = 0.2
    # where does camera aim at
    camview = list()
    for i in range(3):
        camview.append(np.dot(rotmatrix[i * 3:i * 3 + 3], (0, 0, distance)))
    # camview[2] = camview[2]
    tagPos = np.add(camview, Pos)

    # p.removeBody(kukaId)
    viewMatrix = p.computeViewMatrix(Pos, tagPos, (0, 0, 1))
    # viewMatrix=p.computeViewMatrix([-2, 0, 2], [0, 0, 1],[0, 0, 1])
    projectMatrix = p.computeProjectionMatrixFOV(
        60, 1, 0.1, 100)  # input: field of view, ratio(width/height),near,far
    rgbpix = p.getCameraImage(128, 128, viewMatrix, projectMatrix)[2]

    return rgbpix[:, :, 0:3]
Exemplo n.º 10
0
 def get_camera_images(self):
     eyeid = pyplan.link_from_name(self.id, 'torso_camera_link')
     fov, aspect, nearplane, farplane = 80, 1.0, 0.01, 100
     projection_matrix = p.computeProjectionMatrixFOV(
         fov, aspect, nearplane, farplane)
     com_p, com_o, _, _, _, _ = p.getLinkState(self.id, eyeid)
     rot_matrix = p.getMatrixFromQuaternion(
         com_o)  #p.getQuaternionFromEuler((1.5707,0.866,0)))
     rot_matrix = np.array(rot_matrix).reshape(3, 3)
     # Initial vectors
     init_camera_vector = (1, 0, 0)  # z-axis
     init_up_vector = (0, 1, 0)  # y-axis
     # Rotated vectors
     camera_vector = rot_matrix.dot(init_camera_vector)
     up_vector = rot_matrix.dot(init_up_vector)
     view_matrix = p.computeViewMatrix(com_p + 0.25 * camera_vector,
                                       com_p + 100 * camera_vector,
                                       up_vector)
     imgs = p.getCameraImage(640, 640, view_matrix, projection_matrix)
     return imgs
Exemplo n.º 11
0
    def getImage(self,flag=False):
        position = (0, -3, 2)
        targetPosition = (0, 0, 2)
        viewMatrix = p.computeViewMatrix(
            position, targetPosition, cameraUpVector=[0, 0, 1],
            physicsClientId=self.clientId)
        projectionMatrix = p.computeProjectionMatrixFOV(60, 1, 0.1, 3.5,physicsClientId=self.clientId)
        img = p.getCameraImage(self.res[0], self.res[1], viewMatrix, projectionMatrix,
                           renderer=p.ER_BULLET_HARDWARE_OPENGL,
                               physicsClientId=self.clientId)
        img = np.reshape(img[2], (self.res[0],self.res[1], 4))
        
        if flag:
            img = img[:,:,:3]
        else:
            img = img[:,:,:3]
            img= cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
            _,img = cv2.threshold(img,180,255,cv2.THRESH_BINARY_INV)

        return img.astype('uint8')
Exemplo n.º 12
0
    def get_view_matrix_from_pose(self, pose_mtx=None):
        '''Convert from pose matrix to view matrix suitable for pybullet render

        Parameters
        ----------
        pose_mtx : array_like, optional
            4x4 pose matrix that describes location of
            camera in world frame.  If not provided, the default camera pose
            (from nuro_arm.constants) will be used
        '''
        if pose_mtx is None:
            print('[WARNING:] no pose matrix specified for simulated camera. '
                  'Camera will be placed in default location.')
            pose_mtx = constants.DEFAULT_CAM_POSE_MTX

        vecs = np.array(((0, 0, 0), (0, 0, 1), (0, -1, 0)))
        eye_pos, target_pos, up_vec = tfm.apply_transformation(pose_mtx, vecs)
        view_mtx = pb.computeViewMatrix(eye_pos, target_pos, up_vec)

        return view_mtx
def getCameraImage(cam_pos, cam_orn):
    fov = 60
    aspect = 640 / 480
    near = 0.01
    far = 1000
    angle = 0.0
    q = p.getQuaternionFromEuler(cam_orn)
    cam_orn = np.reshape(p.getMatrixFromQuaternion(q), (3, 3))
    view_pos = np.matmul(cam_orn, np.array([-0.001, 0, 0.0]).T)
    view_pos = np.array(view_pos + cam_pos)
    view_matrix = p.computeViewMatrix([cam_pos[0], cam_pos[1], cam_pos[2]],
                                      view_pos, [0, 0, 1])
    projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)
    images = p.getCameraImage(640,
                              480,
                              view_matrix,
                              projection_matrix,
                              shadow=False,
                              renderer=p.ER_BULLET_HARDWARE_OPENGL)
    return images
Exemplo n.º 14
0
def main(args):

    workspace = np.asarray([[0.35, 0.65], [-0.15, 0.15], [0, 0.50]])
    env_config = {
        "workspace": workspace,
        "max_steps": 10,
        "obs_size": 90,
        "fast_mode": False,
        "render": True,
        "goal_string": args.goal,
        "gen_blocks": 4,
        "gen_bricks": 2,
        "gen_triangles": 1,
        "gen_roofs": 1,
        "small_random_orientation": False,
        "seed": np.random.randint(100),
        "action_sequence": "xyrp",
        "simulate_grasp": True
    }

    env = createHouseBuildingXDeconstructEnv(PyBulletEnv, env_config)()
    env.reset()

    viewMatrix = pb.computeViewMatrix(
        cameraEyePosition=[1, 0, 0.2],
        cameraTargetPosition=[0, 0, 0],
        cameraUpVector=[0, 0, 1])

    projectionMatrix = pb.computeProjectionMatrixFOV(
        fov=45.0,
        aspect=1.0,
        nearVal=0.1,
        farVal=3.1)

    width, height, rgbImg, depthImg, segImg = pb.getCameraImage(
        width=224,
        height=224,
        viewMatrix=viewMatrix,
        projectionMatrix=projectionMatrix)

    imsave("{:s}.png".format(args.goal), rgbImg)
Exemplo n.º 15
0
    def _reset(self):
        """Called by gym.env.reset()"""
        ## TODO(hzyjerry): return noisy_observation
        #  self._noisy_observation()
        sensor_state = BaseRobotEnv._reset(self)
        self.temp_target_x = self.robot.walk_target_x
        self.temp_target_y = self.robot.walk_target_y

        ## This is important to ensure potential doesn't change drastically when reset
        self.potential = self.robot.calc_potential()

        staticMat = p.computeViewMatrix([0, 0, 0], [1, 1, 1], [0, 0, 1])
        static_cam = {
            'yaw': -130,
            'pitch': -30,
            'distance': 9.9,
            'target': [-1.253, -4.94, 1.05]
        }

        #p.resetDebugVisualizerCamera(static_cam['distance'], static_cam['yaw'], static_cam['pitch'], static_cam['target'])
        #staticImg = p.getCameraImage(self.windowsz, self.windowsz)[2]

        if not self._require_camera_input or self.test_env:
            visuals = self.get_blank_visuals()
            return visuals, sensor_state

        eye_pos, eye_quat = self.get_eye_pos_orientation()
        pose = [eye_pos, eye_quat]
        all_dist, all_pos = self.r_camera_rgb.rankPosesByDistance(pose)
        top_k = self.find_best_k_views(eye_pos, all_dist, all_pos)

        self.render_rgb, self.render_depth, self.render_semantics, self.render_normal, self.render_unfilled = self.r_camera_rgb.renderOffScreen(
            pose, top_k)

        visuals = self.get_visuals(self.render_rgb, self.render_depth)

        target_x = self.temp_target_x
        target_y = self.temp_target_y
        #p.resetBasePositionAndOrientation(self.target_mapId, [target_x  / self.robot.mjcf_scaling, target_y / self.robot.mjcf_scaling, 6], [0, 0, 0, 1])

        return visuals, sensor_state
Exemplo n.º 16
0
def get_image(robotID, width=200, height=200):
    cam_link_state = p.getLinkState(robotID,
                                    linkIndex=7,
                                    computeForwardKinematics=1)
    pos = cam_link_state[-2]
    ort = cam_link_state[-1]
    rot_mat = np.array(p.getMatrixFromQuaternion(ort))
    rot_mat = np.reshape(rot_mat, [3, 3])
    dir = rot_mat.dot([1, 0, 0])
    up_vector = rot_mat.dot([0, 0, 1])
    s = 0.01
    view_matrix = p.computeViewMatrix(pos, pos + s * dir, up_vector)
    # p.addUserDebugText(text=".",textPosition=pos+s*dir,textColorRGB=[1,0,0],textSize=10)
    projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)
    # f_len = projection_matrix[0]
    ld = [
        random.uniform(-20, 20),
        random.uniform(-20, 20),
        random.uniform(10, 20)
    ]
    lc = [random.random(), random.random(), random.random()]
    if random.random() > 0.0:
        _, _, rgbImg, depthImg_buffer, segImg = p.getCameraImage(
            width,
            height,
            view_matrix,
            projection_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL)
    else:
        _, _, rgbImg, depthImg_buffer, segImg = p.getCameraImage(
            width,
            height,
            view_matrix,
            projection_matrix,
            renderer=p.ER_TINY_RENDERER,
            lightColor=lc,
            lightAmbientCoeff=random.random(),
            lightDiffuseCoeff=random.random(),
            lightSpecularCoeff=random.random())
    A = np.reshape(rgbImg, (width, height, 4))[:, :, :3]
    return Image.fromarray(A), depthImg_buffer, segImg
Exemplo n.º 17
0
    def __init__(self,
                 width=400,
                 height=300,
                 fov=60,
                 pos=[.5, .5, .5],
                 look_at=[0, 0, 0]):
        self.width = width
        self.heigt = height

        self.view = p.computeViewMatrix(cameraEyePosition=pos,
                                        cameraTargetPosition=look_at,
                                        cameraUpVector=[0, 0, 1])

        self.proj = p.computeProjectionMatrixFOV(fov=fov,
                                                 aspect=width / height,
                                                 nearVal=0.1,
                                                 farVal=0.8)

        # if this should ever trigger, the official PyPi version (pip install pybullet) is compiled with Numpy headers
        assert p.isNumpyEnabled(
        )  # because if not, copying the pixels of the camera image from C++ to python takes forever
Exemplo n.º 18
0
 def __init__(self,
              size=None,
              cameraEyePosition=[0, 0, 0.5],
              fov=60,
              cameraTargetPosition=[0, 0.5, 0.5]):
     if size is None:
         size = Camera.default_size
     self.size = size
     self.viewMatrix = pb.computeViewMatrix(
         cameraEyePosition=cameraEyePosition,
         cameraTargetPosition=cameraTargetPosition,
         cameraUpVector=[0, 0, 1])
     self.projectionMatrix = pb.computeProjectionMatrixFOV(fov=fov,
                                                           aspect=1.0,
                                                           nearVal=0.1,
                                                           farVal=100)
     self.cam_image_kwargs = {
         **self.size, 'viewMatrix': self.viewMatrix,
         'projectionMatrix': self.projectionMatrix,
         'renderer': pb.ER_TINY_RENDERER
     }
Exemplo n.º 19
0
def get_camera_image(robot, link_id, projection_matrix):
    link_info = p.getLinkState(robot, link_id['head'])  #Get head link info
    link_pos = link_info[0]  #Get link com pos wrt world
    link_ori = link_info[1]  #Get link com ori wrt world
    rot = p.getMatrixFromQuaternion(link_ori)
    rot = np.array(rot).reshape(3, 3)

    global_camera_x_unit = np.array([1, 0, 0])
    global_camera_z_unit = np.array([0, 0, 1])

    camera_eye_pos = link_pos + np.dot(rot, 0.1 * global_camera_x_unit)
    camera_target_pos = link_pos + np.dot(rot, 1.0 * global_camera_x_unit)
    camera_up_vector = np.dot(rot, global_camera_z_unit)
    view_matrix = p.computeViewMatrix(camera_eye_pos, camera_target_pos,
                                      camera_up_vector)
    width, height, rgb_img, depth_img, seg_img = p.getCameraImage(
        50,  #image width
        10,  #image height
        view_matrix,
        projection_matrix)
    return width, height, rgb_img, depth_img, seg_img
Exemplo n.º 20
0
 def capture_image(self):
     # Camera
     *_, camera_position, camera_orientation = p.getLinkState(
         self.robot, self.camera_link)
     camera_look_position, _ = p.multiplyTransforms(camera_position,
                                                    camera_orientation,
                                                    [0, 0.1, 0],
                                                    [0, 0, 0, 1])
     view_matrix = p.computeViewMatrix(
         cameraEyePosition=camera_position,
         cameraTargetPosition=camera_look_position,
         cameraUpVector=(0, 0, 1))
     projection_matrix = p.computeProjectionMatrixFOV(fov=45.0,
                                                      aspect=1.0,
                                                      nearVal=0.1,
                                                      farVal=3.1)
     return p.getCameraImage(300,
                             300,
                             view_matrix,
                             projection_matrix,
                             renderer=p.ER_BULLET_HARDWARE_OPENGL)[2]
Exemplo n.º 21
0
def setCameraPicAndGetPic(robot_id: int,
                          width: int = 224,
                          height: int = 224,
                          physicsClientId: int = 0):
    """
    给合成摄像头设置图像并返回robot_id对应的图像
    摄像头的位置为miniBox前头的位置
    """
    basePos, baseOrientation = p.getBasePositionAndOrientation(
        robot_id, physicsClientId=physicsClientId)
    # 从四元数中获取变换矩阵,从中获知指向(左乘(1,0,0),因为在原本的坐标系内,摄像机的朝向为(1,0,0))
    matrix = p.getMatrixFromQuaternion(baseOrientation,
                                       physicsClientId=physicsClientId)
    tx_vec = np.array([matrix[0], matrix[3], matrix[6]])  # 变换后的x轴
    tz_vec = np.array([matrix[2], matrix[5], matrix[8]])  # 变换后的z轴

    basePos = np.array(basePos)
    # 摄像头的位置
    cameraPos = basePos + BASE_RADIUS * tx_vec + 0.5 * BASE_THICKNESS * tz_vec
    targetPos = cameraPos + 1 * tx_vec

    viewMatrix = p.computeViewMatrix(cameraEyePosition=cameraPos,
                                     cameraTargetPosition=targetPos,
                                     cameraUpVector=tz_vec,
                                     physicsClientId=physicsClientId)
    projectionMatrix = p.computeProjectionMatrixFOV(
        fov=50.0,  # 摄像头的视线夹角
        aspect=1.0,
        nearVal=0.01,  # 摄像头焦距下限
        farVal=20,  # 摄像头能看上限
        physicsClientId=physicsClientId)

    width, height, rgbImg, depthImg, segImg = p.getCameraImage(
        width=width,
        height=height,
        viewMatrix=viewMatrix,
        projectionMatrix=projectionMatrix,
        physicsClientId=physicsClientId)

    return width, height, rgbImg, depthImg, segImg
Exemplo n.º 22
0
def main_usingEnvOnly():
    # environment = KukaGymEnv(renders=True,isDiscrete=False, maxSteps = 10000000)
    environment =  KukaCamGymEnv_Reconfigured(renders=True,isDiscrete=False)
    environment._reset()
   
    #p.resetBasePositionAndOrientation(self.kukaUid,[-0.100000,0.000000,0.070000],[0.000000,0.000000,0.000000,1.000000])
    randomObjs = add_random_objs_to_scene(10)	  
    motorsIds=[]
    motorsIds.append(environment._p.addUserDebugParameter("posX",0.4,0.75,0.537))
    motorsIds.append(environment._p.addUserDebugParameter("posY",-.22,.3,0.0))
    motorsIds.append(environment._p.addUserDebugParameter("posZ",0.1,1,0.2))
    motorsIds.append(environment._p.addUserDebugParameter("yaw",-3.14,3.14,0))
    motorsIds.append(environment._p.addUserDebugParameter("fingerAngle",0,0.3,.3))

    # dv = 0.01 
    # motorsIds.append(environment._p.addUserDebugParameter("posX",-dv,dv,0))
    # motorsIds.append(environment._p.addUserDebugParameter("posY",-dv,dv,0))
    # motorsIds.append(environment._p.addUserDebugParameter("posZ",-dv,dv,0))
    # motorsIds.append(environment._p.addUserDebugParameter("yaw",-dv,dv,0))
    # motorsIds.append(environment._p.addUserDebugParameter("fingerAngle",0,0.3,.3))

    done = False
    #According to the hand-eye coordination paper, the camera is mounted over the shoulder of the arm
    camEyePos = [0.03,0.236,0.54]
    targetPos = [0.640000,0.075000,-0.190000]
    cameraUp = [0,0,1]
    viewMat = p.computeViewMatrix(camEyePos,targetPos,cameraUp)
    camInfo = p.getDebugVisualizerCamera()
    projMatrix = camInfo[3]
    # viewMat = [-0.5120397806167603, 0.7171027660369873, -0.47284144163131714, 0.0, -0.8589617609977722, -0.42747554183006287, 0.28186774253845215, 0.0, 0.0, 0.5504802465438843, 0.8348482847213745, 0.0, 0.1925382763147354, -0.24935829639434814, -0.4401884973049164, 1.0]
    # projMatrix = [0.75, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
    img_arr = p.getCameraImage(640,512,viewMatrix=viewMat,projectionMatrix=projMatrix)#640*512*3 
    # write_from_imgarr(img_arr, 1)
    
    while (not done):   
        action=[]
        for motorId in motorsIds:
            action.append(environment._p.readUserDebugParameter(motorId))        
        state, reward, done, info = environment.step2(action)
        obs = environment.getExtendedObservation()
Exemplo n.º 23
0
 def __init__(self,
              urdfRootPath='',
              pos=[-0.2, 0.0, -.12],
              orn=[0, 0, 0, 1.0],
              render=True,
              global_t=0):
     self.lf_id = 8
     self.rf_id = 9
     self.gripper_close_pose = 0.06
     self.maxForce = 1000  #200.
     self.scale = 0.01
     self.object_gripper_offset = 0
     self.fingerTipForce = 8
     self.is_gripper_open = True
     self.kukaId = p.loadSDF(
         os.path.join(urdfRootPath, "kuka_iiwa/kuka_with_gripper5.sdf"),
         globalScaling=1)[0]
     self.kukaEndEffectorIndex = 7  #Link index of gripper base
     p.resetBasePositionAndOrientation(self.kukaId, pos, orn)
     self.numJoints = p.getNumJoints(self.kukaId)
     far = 1
     #near = 0.001
     #self.cam_viewMatrix = list(p.computeViewMatrix(cameraEyePosition=[0, 0, 0.5],
     #  cameraTargetPosition=[0.4, 0, 0.2], cameraUpVector = [0.0, 0.0, 1.0]))
     #self.cam_projMatrix = p.computeProjectionMatrixFOV(80, 1, near, far);
     #self.img_size_h = 200#700
     near = 0.1
     self.cam_viewMatrix = list(
         p.computeViewMatrix(
             cameraEyePosition=[-0.2, 0, 0.04],
             cameraTargetPosition=[0.4, 0, -0.1],
             cameraUpVector=[0.6, 0.0, 0.8]))
     self.cam_projMatrix = p.computeProjectionMatrixFOV(100, 1, near, far)
     self.img_size_h = 360  #700
     self.img_size_w = 360  #1280
     self.far = far
     self.near = near
     self.render = render
     self.global_t = global_t  #for debugging
     self.default = {}
Exemplo n.º 24
0
    def render(self, mode='rgb_array', close=False):

        if mode != "rgb_array":
            return np.array([])
        base_pos, orn = p.getBasePositionAndOrientation(self.car)
        yaw = p.getEulerFromQuaternion(orn)[2]  # z軸方向から見た本体の回転角度
        rot_matrix = np.array([[np.cos(yaw), -np.sin(yaw)],
                               [np.sin(yaw), np.cos(yaw)]])  # 回転行列
        target_relative_vec2D = np.array([2, 0])  # 本体から見たtargetの相対位置
        target_abs_vec2D = np.dot(rot_matrix,
                                  target_relative_vec2D)  # targetの絶対位置

        cam_eye = np.array(base_pos) + np.array([0, 0, 0.2])
        cam_target = np.array(base_pos) + np.append(target_abs_vec2D,
                                                    0.2)  # z=0.2は足()
        cam_upvec = [0, 0, 1]

        view_matrix = p.computeViewMatrix(cameraEyePosition=cam_eye,
                                          cameraTargetPosition=cam_target,
                                          cameraUpVector=cam_upvec)

        proj_matrix = p.computeProjectionMatrixFOV(fov=60,
                                                   aspect=float(self.width) /
                                                   self.height,
                                                   nearVal=0.1,
                                                   farVal=100.0)

        (_, _, rgb, _,
         mask) = p.getCameraImage(width=self.width,
                                  height=self.height,
                                  viewMatrix=view_matrix,
                                  projectionMatrix=proj_matrix,
                                  renderer=p.ER_BULLET_HARDWARE_OPENGL)

        rgb_array = np.array(rgb)
        rgb_array = rgb_array[:, :, :3]
        mask_array = np.array(mask)

        return rgb_array
Exemplo n.º 25
0
    def getlocalMapObservation(self):
        # raycast around the area of the agent
        """
            For now this includes the agent in the center of the computation
        """
        
        com_p, com_o = pybullet.getBasePositionAndOrientation(self._demon)
        rot_matrix = pybullet.getMatrixFromQuaternion(com_o)
        rot_matrix = np.array(rot_matrix).reshape(3, 3)
        # Initial vectors
        # init_camera_vector = (0, 0, -1) # z-axis
        # init_up_vector = (0, 1, 0) # y-axis
        # Rotated vectors
        # camera_vector = rot_matrix.dot(init_camera_vector)
        # up_vector = rot_matrix.dot(init_up_vector)
        # view_matrix = p.computeViewMatrix(com_p, com_p + 0.1 * camera_vector, up_vector)

        # Make the observation track the agent.
        view_matrix = pybullet.computeViewMatrix(cameraEyePosition=[com_p[0], com_p[1], self._observation_height],
                                                 cameraTargetPosition=[com_p[0], com_p[1], 0],
                                                 cameraUpVector=[0, 1, 0])
        # This will make it look similar to the rendering.
        # view_matrix = pybullet.computeViewMatrix(
        #     cameraEyePosition=[0, 0, 15],
        #     cameraTargetPosition=[0, 0, 0],
        #     cameraUpVector=[0, 1, 0])
        
        projection_matrix = pybullet.computeProjectionMatrixFOV(
            self._obs_fov, self._obs_aspect, self._obs_nearplane, self._obs_farplane)
        # img = pybullet.getCameraImage(1000, 1000, view_matrix)
        (w,y,img,depth,segment) = pybullet.getCameraImage(
            width=self._observation_shape[0],
            height=self._observation_shape[1], 
            viewMatrix=view_matrix,
            projectionMatrix=projection_matrix)
        # print (img)
        # Don't want alpha channel
        img = img[..., :3]
        return img
Exemplo n.º 26
0
 def get_image(self, imsize=(214, 214), plot_cam_pose=False):
     cam_trans, cam_rot = self.get_cam_pose()
     cam_rotm = np.array(p.getMatrixFromQuaternion(cam_rot)).reshape(3, 3)
     if plot_cam_pose:
         draw_pose(cam_trans, cam_rot)
     # Calculate extrinsic matrix
     # target position is camera frame y axis tip in global frame
     # up vector is the z axis of camera frame
     viewMatrix = p.computeViewMatrix(
         cameraEyePosition=cam_trans,
         cameraTargetPosition=(cam_rotm.dot(np.array([0, 1, 0]).T) +
                               np.array(cam_trans)).tolist(),
         cameraUpVector=cam_rotm[:, 2].tolist())
     width, height, rgbImg, depthImg, segImg = p.getCameraImage(
         width=imsize[0],
         height=imsize[1],
         viewMatrix=viewMatrix,
         projectionMatrix=self.projectionMatrix,
         flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX)
     # depth image transformed to true depth, each pixel in segImg is the linkID
     return rgbImg[:, :, :3], get_true_depth(
         depthImg, self.z_near, self.z_far), ((segImg >> 24) - 1)
Exemplo n.º 27
0
    def _get_onrobot_observation(self):
        """Return the observation as an image.


    """
        camerapos = p.getLinkState(self._tm700.tm700Uid,
                                   self._tm700.tmEndEffectorIndex + 2)
        blockPos, blockOrn = p.getBasePositionAndOrientation(
            self._objectUids[0])
        camerapos = camerapos[0]
        print(camerapos)
        distance = 1  # camerapos[2]
        pitch = -90  #-60 + self._cameraRandom * np.random.uniform(-3, 3) #original -56
        yaw = 0  # 245 + self._cameraRandom * np.random.uniform(-3, 3)
        roll = 0
        # self._view_matrix = p.computeViewMatrixFromYawPitchRoll(camerapos, distance, yaw, pitch, roll, 2)
        self._view_matrix = p.computeViewMatrix(cameraEyePosition=camerapos,
                                                cameraTargetPosition=blockPos,
                                                cameraUpVector=[0, 0, 1])
        img_arr = p.getCameraImage(width=self._width,
                                   height=self._height,
                                   viewMatrix=self._view_matrix,
                                   projectionMatrix=self._proj_matrix)
        rgb = img_arr[2]
        depth = img_arr[3]
        min = 0.97
        max = 1.0
        print(depth)
        depthnormalized = (float(depth) - min) / (max - min)
        segmentation = img_arr[4]
        depth = np.reshape(depth, (self._height, self._width, 1))
        segmentation = np.reshape(segmentation, (self._height, self._width, 1))

        np_img_arr = np.reshape(rgb, (self._height, self._width, 4))
        np_img_arr = np_img_arr.astype(np.float64)

        test = np.concatenate([np_img_arr[:, :, 1:3], depth], axis=-1)

        return test
Exemplo n.º 28
0
 def _take_picture(self):
     # get the last link's position and orientation
     Pos, Orn = p.getLinkState(self.robot_id, self.robot_joint_num - 1)[:2]
     # Pos is the position of end effect, orn is the orientation of the end effect
     rotmatrix = p.getMatrixFromQuaternion(Orn)
     # distance from camera to focus
     distance = 0.2
     # where does camera aim at
     camview = list()
     for i in range(3):
         camview.append(np.dot(rotmatrix[i * 3:i * 3 + 3],
                               (0, 0, distance)))
     tagPos = np.add(camview, Pos)
     #p.removeBody(kukaId)
     viewMatrix = p.computeViewMatrix(Pos, tagPos, (0, 0, 1))
     viewMatrix = [round(elem, 2) for elem in viewMatrix]
     projectMatrix = p.computeProjectionMatrixFOV(
         60, 1, 0.1,
         100)  # input: field of view, ratio(width/height),near,far
     projectMatrix = [round(elem, 2) for elem in projectMatrix]
     rgbpix = p.getCameraImage(XSIZE, YSIZE, viewMatrix, projectMatrix)[2]
     return rgbpix[:, :, 0:3]
Exemplo n.º 29
0
def _get_seg_mask_for_target(target_position, cam_position, world=None):
    """
    Calculates the view and projection Matrix and returns the Segmentation mask
    The segmentation mask indicates for every pixel the visible Object.
    :param cam_position: The position of the Camera as a list of x,y,z and orientation as quaternion
    :param target_position: The position to which the camera should point as a list of x,y,z and orientation as quaternion
    :return: The Segmentation mask from the camera position
    """
    world, world_id = _world_and_id(world)
    fov = 300
    aspect = 256 / 256
    near = 0.2
    far = 10

    view_matrix = p.computeViewMatrix(cam_position[0], target_position[0],
                                      [0, 0, -1])
    projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)
    return p.getCameraImage(256,
                            256,
                            view_matrix,
                            projection_matrix,
                            physicsClientId=world_id)[4]
Exemplo n.º 30
0
def init(useGUI):
    if useGUI:
        p.connect(p.GUI)
    else:
        p.connect(p.DIRECT)
    p.configureDebugVisualizer(p.COV_ENABLE_PLANAR_REFLECTION, 0)
    p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0)
    p.configureDebugVisualizer(p.COV_ENABLE_GUI, 1)

    p.setAdditionalSearchPath(pdata.getDataPath())
    p.setGravity(0, 0, -9.81)
    p.setRealTimeSimulation(0)

    # Compute the view and image from camera
    view_matrix = p.computeViewMatrix([0.575, 0, 0.5], [0.575, 0, 0], [-1, 0, 0])
    projection_matrix = p.computeProjectionMatrixFOV(
        fov=45.0,
        aspect=1,
        nearVal=0.1,
        farVal=0.55)

    return view_matrix, projection_matrix
Exemplo n.º 31
0

p.loadURDF('plane.urdf')
p.loadURDF("r2d2.urdf",[0,0,1])
p.loadURDF('cube_small.urdf', basePosition=[0.0, 0.0, 0.025])
cube_trans = p.loadURDF('cube_small.urdf', basePosition=[0.0, 0.1, 0.025])
p.changeVisualShape(cube_trans,-1,rgbaColor=[1,1,1,0.1])
width = 128
height = 128

fov = 60
aspect = width / height
near = 0.02
far = 1

view_matrix = p.computeViewMatrix([0, 0, 0.5], [0, 0, 0], [1, 0, 0])
projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)

# Get depth values using the OpenGL renderer
images = p.getCameraImage(width, height, view_matrix, projection_matrix, shadow=True,renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgb_opengl= np.reshape(images[2], (height, width, 4))*1./255.
depth_buffer_opengl = np.reshape(images[3], [width, height])
depth_opengl = far * near / (far - (far - near) * depth_buffer_opengl)
seg_opengl = np.reshape(images[4], [width, height])*1./255.
time.sleep(1)

# Get depth values using Tiny renderer
images = p.getCameraImage(width, height, view_matrix, projection_matrix, shadow=True, renderer=p.ER_TINY_RENDERER)
depth_buffer_tiny = np.reshape(images[3], [width, height])
depth_tiny = far * near / (far - (far - near) * depth_buffer_tiny)
rgb_tiny= np.reshape(images[2], (height, width, 4))*1./255.