Esempio n. 1
0
    def render(self, mode="human"):
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=[0.55, 0.35, 0.2],
            distance=1.2,
            yaw=0,
            pitch=-70,
            roll=0,
            upAxisIndex=2,
        )
        proj_matrix = p.computeProjectionMatrixFOV(fov=60,
                                                   aspect=float(960) / 720,
                                                   nearVal=0.1,
                                                   farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
            width=960,
            height=720,
            viewMatrix=view_matrix,
            projectionMatrix=proj_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL,
        )

        rgb_array = np.array(px, dtype=np.uint8)
        rgb_array = np.reshape(rgb_array, (720, 960, 4))

        rgb_array = rgb_array[:, :, :3]

        if is_notebook() and os.path.exists("/tmp/render.txt"):
            display(rgb_data)

        return rgb_array
	def camera_feed(self, is_flat = False):
		"""
		Function to get camera feed of the arena.

		Arguments:

			None
		
		Return Values:

			numpy array of RGB values
		"""
		look = [0, 0, 0.2]
		cameraeyepos = [0, 0, 6.5]
		cameraup = [0, -1, 0]
		self._view_matrix = p.computeViewMatrix(cameraeyepos, look, cameraup)
		fov = 75
		aspect = self._width / self._height
		near = 0.8
		far = 10
		self._proj_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)
		img_arr = p.getCameraImage(width=self._width,
								height=self._height,
								viewMatrix=self._view_matrix,
								projectionMatrix=self._proj_matrix,
								renderer=p.ER_BULLET_HARDWARE_OPENGL)
		rgb = img_arr[2]
		if is_flat == True:
			# Only for those who are getting a blank image in opencv
			rgb = np.array(rgb)
			rgb = np.reshape(rgb, (512, 512, 4))
		rgb = np.uint8(rgb)
		rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)
		rgb = np.rot90(rgb, 3)
		return rgb
Esempio n. 3
0
def follow_agent(agent: Agent, width=640, height=480) -> np.ndarray:
    position, orientation = pybullet.getBasePositionAndOrientation(
        agent.vehicle_id)
    _, _, yaw = pybullet.getEulerFromQuaternion(orientation)
    orientation = pybullet.getQuaternionFromEuler((0, 0, yaw))
    rot_matrix = pybullet.getMatrixFromQuaternion(orientation)
    rot_matrix = np.array(rot_matrix).reshape(3, 3)
    camera_position = position + rot_matrix.dot([-0.8, 0, 0.3])
    up_vector = rot_matrix.dot([0, 0, 1])
    target = position
    view_matrix = pybullet.computeViewMatrix(camera_position, target,
                                             up_vector)
    proj_matrix = pybullet.computeProjectionMatrixFOV(fov=60,
                                                      aspect=float(width) /
                                                      height,
                                                      nearVal=0.01,
                                                      farVal=10.0)

    _, _, rgb_image, _, _ = pybullet.getCameraImage(
        width=width,
        height=height,
        renderer=pybullet.ER_BULLET_HARDWARE_OPENGL,
        viewMatrix=view_matrix,
        projectionMatrix=proj_matrix)

    rgb_array = np.reshape(rgb_image, (height, width, -1))
    rgb_array = rgb_array[:, :, :3]
    return rgb_array
Esempio n. 4
0
def getExtendedObservation(p, vehicle_id):
    carpos, carorn = p.getBasePositionAndOrientation(vehicle_id)

    # Using the ZED calculations, I needed to add in rotation too.
    rotate = p.getQuaternionFromEuler([0, 0, np.pi])
    carpos, carorn = p.multiplyTransforms(carpos, carorn, [0, 0, .1], rotate)

    # Most of this is cribbed from racecarZEDGymEnv.py
    carmat = p.getMatrixFromQuaternion(carorn)
    dist0 = 0.3
    dist1 = 1.
    eyePos = [carpos[0]+dist0*carmat[0], carpos[1] +
              dist0*carmat[3], carpos[2]+dist0*carmat[6]]
    targetPos = [carpos[0]+dist1*carmat[0], carpos[1] +
                 dist1*carmat[3], carpos[2]+dist1*carmat[6]]
    up = [carmat[2], carmat[5], carmat[8]]

    # These things taken mostly from testrender_np.py
    pixelWidth = 64
    pixelHeight = 64
    nearPlane = 0.01
    farPlane = 100
    fov = 60

    viewMatrix = p.computeViewMatrix(eyePos, targetPos, up)
    aspect = pixelWidth / pixelHeight
    projectionMatrix = p.computeProjectionMatrixFOV(
        fov, aspect, nearPlane, farPlane)
    img_arr = p.getCameraImage(
        pixelWidth, pixelHeight, viewMatrix, projectionMatrix, shadow=1,
        lightDirection=[1, 1, 1], renderer=p.ER_BULLET_HARDWARE_OPENGL)
    return img_arr
Esempio n. 5
0
    def __init__(self, pose_mtx=None, pb_client=0):
        '''Class to allow asynchronous video capture within pybullet simulator

        Parameters
        ----------
        view_mtx : array_like, optional
            4x4 view matrix that describes location of camera in world frame.
            If not provided, the default camera pose (from nuro_arm.constants)
            will be used
        pb_client : int, default to 0
            Physics Client ID describing which simulator the camera exists
            within.  If you are only using one simulator, you can leave as
            default.
        '''
        super().__init__()

        # create projection matrix
        self._img_width, self._img_height = constants.NEW_CAM_ROI[2:]
        fov = np.degrees(
            2 * np.arctan2(self._img_height, 2 * constants.NEW_CAM_MTX[1, 1]))
        aspect = self._img_width / self._img_height
        self._proj_mtx = pb.computeProjectionMatrixFOV(fov, aspect, 0.001, 10)

        # create view matrix
        self._view_mtx = self.get_view_matrix_from_pose(pose_mtx)

        self._pb_client = pb_client
Esempio n. 6
0
    def render_map(self):
        base_pos = [0, 0, -3]
        if (hasattr(self, 'robot')):
            if (hasattr(self.robot, 'body_xyz')):
                base_pos[0] = self.robot.body_xyz[0]
                base_pos[1] = self.robot.body_xyz[1]

        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=35,
            yaw=0,
            pitch=-89,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60,
            aspect=float(self._render_width) / self._render_height,
            nearVal=0.1,
            farVal=100.0)
        (_, _, px, _,
         _) = p.getCameraImage(width=self._render_width,
                               height=self._render_height,
                               viewMatrix=view_matrix,
                               projectionMatrix=proj_matrix,
                               renderer=p.ER_BULLET_HARDWARE_OPENGL)
        rgb_array = np.array(px)
        rgb_array = rgb_array[:, :, :3]
        return rgb_array
Esempio n. 7
0
    def set_camera(self):
        # TODO: アーム先端部のworld座標

        rot_mtrx = np.array(
            p.getMatrixFromQuaternion(self.link_info['ee_link'][1])).reshape(
                3, 3)

        self.view_matrix = p.computeViewMatrix(
            cameraEyePosition=np.array(self.link_info['ee_link'][0]),
            cameraTargetPosition=np.array(self.link_info['ee_link'][0]) +
            rot_mtrx @ np.array([1, 0, 0]),
            cameraUpVector=rot_mtrx @ np.array([0, 1, 0]))

        # self.view_matrix = p.computeViewMatrixFromYawPitchRoll(
        #     cameraTargetPosition=np.array([0.5, -0.5, 1.5]) * 1/1,#np.array([4, -4, 3]) * 1/10, # target focus point
        #     distance=2., # from eye to target
        #     yaw=30, # degree left/right around z axis
        #     pitch=-30, # degree up/down around
        #     roll=90, # degree around forward vector
        #     upAxisIndex=2) # either 1 for Y or 2 for Z axis up

        # 表示のための投影マトリクス計算
        self.proj_matrix = p.computeProjectionMatrixFOV(
            fov=60,  # the field of view angle, in degrees, in the y direction
            aspect=320 /
            240,  # the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height).
            nearVal=
            0.1,  # the distance from the viewer to the near clipping plane (always positive)
            farVal=100.0
        )  # the distance from the viewer to the far clipping plane (always positive)
    def render(self, mode='human'):
        base_pos = [2, 1.4, 3.5]
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=1.0,
            yaw=0,
            pitch=-80,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(fov=60,
                                                   aspect=float(480) / 480,
                                                   nearVal=0.1,
                                                   farVal=100.0)
        (_, _, px, _,
         _) = p.getCameraImage(width=480,
                               height=480,
                               viewMatrix=view_matrix,
                               projectionMatrix=proj_matrix,
                               renderer=p.ER_BULLET_HARDWARE_OPENGL)
        rgb_array = np.array(px)
        rgb_array = rgb_array[:, :, :3]

        if mode == "rgb_array":
            return rgb_array

        elif mode == 'human':
            if self.viewer is None:
                self.viewer = rendering.SimpleImageViewer()
            self.viewer.imshow(rgb_array)
            return self.viewer.isopen
Esempio n. 9
0
def run_example():
    times = []
    dim = 5
    env = Shapes3D(gui=0, env_dim=dim)
    env.reset()

    env.add_sphere(0.2, [1, 1, 1, 1], [1, 1, 0.2])
    env.add_cube([0.2, 0.2, 0.2], [0, 0, 1, 1], [-1, 1, 0.2])
    env.add_capsule(0.2, 1, [1, 0, 0, 1], [-1, -1, 0.7])
    env.add_cylinder(0.4, 1.5, [1, 0, 0, 1], [1, -1, 0.75])

    intrinsic = p.computeProjectionMatrixFOV(fov=45,
                                             aspect=1,
                                             nearVal=.1,
                                             farVal=dim * 2)
    for i in itertools.count(0, 1):
        start = time.time()
        extrinsic = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=[1, 1, 1],
            distance=2.5,
            yaw=i * 5,
            pitch=-17,
            roll=0,
            upAxisIndex=2)
        env.compute_render(320, 320, intrinsic, extrinsic)
        end = time.time()
        print("rendering took: %f" % (end - start))
        times.append(end - start)
        if i > 1000:
            break

    env.reset()  # Not needed, testing purposes only
    env.destroy()

    print("avg of %f per render" % (sum(times) / 1000))
    def __init__(self, height, width, window_name, fov=60):
        camPos = [0, 0, 0]
        camUpVector = [0, -1, 0]
        camTargetPos = [0, 0, 1]
        nearPlane = 0.01
        farPlane = 100

        self.window_name = window_name
        self.height = height
        self.width = width

        ##############    VTK stuff   ##############

        # Create a rendering window and renderer
        self.ren = vtk.vtkRenderer()
        self.renWin = vtk.vtkRenderWindow()
        self.renWin.SetOffScreenRendering(1)
        self.renWin.SetAlphaBitPlanes(1)  # Enable usage of alpha channel
        self.renWin.AddRenderer(self.ren)

        self.renWin.SetSize(width, height)

        self.camera = self.ren.GetActiveCamera()
        self.camera.SetPosition(camPos)
        self.camera.SetViewUp(camUpVector)
        self.camera.SetFocalPoint(camTargetPos)
        self.camera.SetViewAngle(fov)

        self.w2if = vtk.vtkWindowToImageFilter()
        self.w2if.SetInput(self.renWin)
        self.w2if.SetInputBufferTypeToRGBA()

        ############## pybullet stuff ##############

        p.connect(p.DIRECT)
        # conid = p.connect(p.SHARED_MEMORY)
        # if (conid<0):
        #     p.connect(p.GUI)
        p.resetSimulation()
        p.setGravity(0, 0, -10)
        # p.setPhysicsEngineParameter(numSolverIterations=5)
        # p.setPhysicsEngineParameter(fixedTimeStep=1./240.)
        # p.setPhysicsEngineParameter(numSubSteps=10)

        self.viewMatrix = p.computeViewMatrix(camPos, camTargetPos,
                                              camUpVector)
        aspect = width / height
        self.projectionMatrix = p.computeProjectionMatrixFOV(
            fov, aspect, nearPlane, farPlane)

        egl = pkgutil.get_loader('eglRenderer')
        if (egl):
            p.loadPlugin(egl.get_filename(), "_eglRendererPlugin")
        else:
            p.loadPlugin("eglRendererPlugin")

        self.play = True
        self.physics_thread = Thread(target=self.update_physics, args=())
        self.physics_thread.daemon = True
        print("READY")
Esempio n. 11
0
    def render(self):
        # the target position in camera view will be in half meter [0.5, 0, 0] away from robot base [0,0,0]
        # the distance of 1.5m away from top of table makes sense for the robot which in longest length is about 1m.
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=[-0.5, 0, 0],
            distance=1.5,
            yaw=90,
            pitch=-90,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(fov=60,
                                                   aspect=float(960) / 720,
                                                   nearVal=0.1,
                                                   farVal=100.0)
        (width, height, rgbPixels, depthPixels,
         SegMaskBuffer) = p.getCameraImage(
             width=960,
             height=720,
             viewMatrix=view_matrix,
             projectionMatrix=proj_matrix,
             renderer=p.ER_BULLET_HARDWARE_OPENGL)

        rgb_array = np.array(rgbPixels, dtype=np.uint8)
        rgb_array = np.reshape(rgb_array, (height, width, 4))

        # the default internal values are (1; enabled); if 0 will be disabled.
        p.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW, 1)
        p.configureDebugVisualizer(p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 1)
        p.configureDebugVisualizer(p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 1)

        # return a tuple for rgb data and depth values
        return (rgb_array, depthPixels)
Esempio n. 12
0
    def render(self, mode='rgb_array', close=False):

        if mode != "rgb_array":
            return np.array([])
        base_pos, orn = p.getBasePositionAndOrientation(self.car)
        cam_eye = np.array(base_pos) + [0.1,0,0.2]
        cam_target = np.array(base_pos) + [2,0,0.2]
        cam_upvec = [1,0,1]

        view_matrix = p.computeViewMatrix(
                cameraEyePosition=cam_eye,
                cameraTargetPosition=cam_target,
                cameraUpVector=cam_upvec)

        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self.width)/self.height,
            nearVal=0.1, farVal=100.0)

        (_, _, rgb, _, mask) = p.getCameraImage(
            width=self.width, height=self.height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL)

        rgb_array = np.array(rgb)
        rgb_array = rgb_array[:,:,:3]
        mask_array = np.array(mask)

        return rgb_array
Esempio n. 13
0
def getCameraImageEEF(pos, orn):
    #print(eef_pose)
    com_p = pos
    com_o = orn
    rot_matrix = p.getMatrixFromQuaternion(com_o)
    rot_matrix = np.array(rot_matrix).reshape(3, 3)
    # Initial vectors
    init_camera_vector = (0, 0, 1)  # z-axis
    init_up_vector = (0, 1, 0)  # y-axis
    # Rotated vectors
    camera_vector = rot_matrix.dot(init_camera_vector)
    up_vector = rot_matrix.dot(init_up_vector)
    view_matrix = p.computeViewMatrix([com_p[0], com_p[1], com_p[2]],
                                      [0, 0, 0], up_vector)
    fov = 60
    aspect = imageWidth / imageHeight
    near = 0.01
    far = 1000
    angle = 0.0

    projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)
    images = p.getCameraImage(imageWidth,
                              imageHeight,
                              view_matrix,
                              projection_matrix,
                              shadow=True,
                              renderer=p.ER_BULLET_HARDWARE_OPENGL)
    return images, view_matrix
Esempio n. 14
0
    def render(self, mode="rgb_array", close=False):
        if mode != "rgb_array":
            return np.array([])

        cam_dist = 1.3
        cam_yaw = 180
        cam_pitch = -40
        RENDER_HEIGHT = 720
        RENDER_WIDTH = 960

        base_pos, orn = p.getBasePositionAndOrientation(self.robot_id)
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=cam_dist,
            yaw=cam_yaw,
            pitch=cam_pitch,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(fov=60,
                                                   aspect=float(RENDER_WIDTH) /
                                                   RENDER_HEIGHT,
                                                   nearVal=0.1,
                                                   farVal=100.0)
        (_, _, px, _,
         _) = p.getCameraImage(width=RENDER_WIDTH,
                               height=RENDER_HEIGHT,
                               viewMatrix=view_matrix,
                               projectionMatrix=proj_matrix,
                               renderer=p.ER_BULLET_HARDWARE_OPENGL)
        #renderer=self._p.ER_TINY_RENDERER)

        rgb_array = np.array(px, dtype=np.uint8)
        rgb_array = np.reshape(rgb_array, (RENDER_HEIGHT, RENDER_WIDTH, 4))
        rgb_array = rgb_array[:, :, :3]
        return rgb_array
Esempio n. 15
0
    def __init__(self):
        self.physicsClient = p.connect(
            p.DIRECT)  # or p.DIRECT for non-graphical version
        startPos = [0, 0, -0.181]
        startOrientation = p.getQuaternionFromEuler([0, 0, 0])
        self.robot = p.loadURDF(
            "hdt_michigan_description/urdf/left_gripper_no_housing.urdf",
            startPos,
            startOrientation,
            physicsClientId=self.physicsClient)

        # set jdict, maps from joint name to joint index
        self.jdict = {}
        for i in range(
                p.getNumJoints(self.robot,
                               physicsClientId=self.physicsClient)):
            info = p.getJointInfo(self.robot,
                                  i,
                                  physicsClientId=self.physicsClient)
            jname = info[1].decode("ascii")
            self.jdict[jname] = i
        #print(self.jdict)
        self.joint_ids = [
            self.jdict["leftgripper"], self.jdict["leftgripper2"]
        ]

        # camera stuff
        self.z_near = 0.1
        self.z_far = 3.1
        self.projectionMatrix = p.computeProjectionMatrixFOV(
            fov=65.0, aspect=1.0, nearVal=self.z_near, farVal=self.z_far)
    def get_gripper_camera(self, imgSize):
        #
        camera_angle = [0, np.pi / 12, 0]
        camera_relative_loc = [-0.1, -0.15]

        girperLocation = p.getLinkState(self.__gripper_body_id, 0)
        camera_t_R = p.getMatrixFromQuaternion(p.getQuaternionFromEuler(camera_angle))
        maxtrix_gri = p.getMatrixFromQuaternion(girperLocation[1])
        maxtrix_cam = np.array(maxtrix_gri) * np.array(camera_t_R)

        camlookat = [maxtrix_cam[2] + girperLocation[0][0], maxtrix_cam[5] + girperLocation[0][1],
                     maxtrix_cam[8] + girperLocation[0][2]]
        camup = [maxtrix_cam[1], maxtrix_cam[4], maxtrix_cam[7]]

        girpper_up = [maxtrix_gri[1], maxtrix_gri[4], maxtrix_gri[7]]
        gripper_fwd = [maxtrix_gri[2], maxtrix_gri[5], maxtrix_gri[8]]
        camera_loc = np.array(girperLocation[0]) + np.array(girpper_up) * camera_relative_loc[0] + np.array(
            gripper_fwd) * camera_relative_loc[1]

        view_matrix = p.computeViewMatrix(camera_loc.tolist(), camlookat, camup)
        proj_matrix = p.computeProjectionMatrixFOV(fov=60, aspect=1, nearVal=0.01, farVal=10.0)

        img_arr = p.getCameraImage(imgSize, imgSize, view_matrix, proj_matrix,
                                   shadow=1, flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX)
        return img_arr
    def update(self):
        # Guidance
        # Only do this if we are not being manually controlled
        if not self.keyboard_control:
            self.ltarget_vel, self.rtarget_vel = next(self.vel_gen)

        # Movement
        p.setJointMotorControlArray(self.robot,
                                    self.motor_links,
                                    p.VELOCITY_CONTROL,
                                    targetVelocities=[
                                        self.velocity_limit * self.rtarget_vel,
                                        self.velocity_limit * self.ltarget_vel
                                    ],
                                    forces=[self.max_force, self.max_force])

        # Camera
        *_, camera_position, camera_orientation = p.getLinkState(
            self.robot, self.camera_link)
        camera_look_position, _ = p.multiplyTransforms(camera_position,
                                                       camera_orientation,
                                                       [0, 0.1, 0],
                                                       [0, 0, 0, 1])
        view_matrix = p.computeViewMatrix(
            cameraEyePosition=camera_position,
            cameraTargetPosition=camera_look_position,
            cameraUpVector=(0, 0, 1))
        projection_matrix = p.computeProjectionMatrixFOV(fov=45.0,
                                                         aspect=1.0,
                                                         nearVal=0.1,
                                                         farVal=3.1)
Esempio n. 18
0
 def get_image(self,
               pos,
               forward,
               up=None,
               fov=90.,
               aspect=4 / 3,
               height=720):
     pos = totensor(pos)
     forward = totensor(forward)
     if up is not None:
         up = totensor(up)
     else:
         left = torch.cross(torch.tensor([0., 0., 1.]), forward)
         up = torch.cross(forward, left)
     view_mat = p.computeViewMatrix(cameraEyePosition=pos.tolist(),
                                    cameraTargetPosition=forward.tolist(),
                                    cameraUpVector=up.tolist(),
                                    physicsClientId=self.sim.id)
     NEAR_PLANE = 0.01
     FAR_PLANE = 1000.0
     proj_mat = p.computeProjectionMatrixFOV(fov=fov,
                                             aspect=aspect,
                                             nearVal=NEAR_PLANE,
                                             farVal=FAR_PLANE,
                                             physicsClientId=self.sim.id)
     _, _, img, depth, mask = p.getCameraImage(width=int(aspect * height),
                                               height=height,
                                               viewMatrix=view_mat,
                                               projectionMatrix=proj_mat,
                                               physicsClientId=self.sim.id)
     img = torch.tensor(img)
     depth = FAR_PLANE * NEAR_PLANE / (
         FAR_PLANE - (FAR_PLANE - NEAR_PLANE) * torch.tensor(depth))
     mask = torch.tensor(mask)
     return {"img": img, "depth": depth, "mask": mask}
Esempio n. 19
0
    def render(self, mode='human'):
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=[0.7, 0, 0.05],
            distance=0.7,
            yaw=90,
            pitch=-70,
            roll=0,
            upAxisIndex=2)

        proj_matrix = p.computeProjectionMatrixFOV(fov=60,
                                                   aspect=float(960) / 720,
                                                   nearVal=0.1,
                                                   farVal=100.0)

        (_, _, px, _,
         _) = p.getCameraImage(width=960,
                               height=720,
                               viewMatrix=view_matrix,
                               projectionMatrix=proj_matrix,
                               renderer=p.ER_BULLET_HARDWARE_OPENGL)

        rgb_array = np.array(px, dtype=np.uint8)
        rgb_array = np.reshape(rgb_array, (720, 960, 4))
        rgb_array = rgb_array[:, :, :3]

        return rgb_array
    def render(size=img_size,
               eye=cam_eye,
               target=cam_target,
               up=cam_up,
               fov=45,
               aspect=1.0,
               nearVal=0.1,
               farVal=30.1):
        viewMatrix = pb.computeViewMatrix(
            cameraEyePosition=eye,
            cameraTargetPosition=target,
            cameraUpVector=up,
        )

        projectionMatrix = pb.computeProjectionMatrixFOV(fov=fov,
                                                         aspect=aspect,
                                                         nearVal=nearVal,
                                                         farVal=farVal)

        w, h, rgba_img, depth_img, seg_img = pb.getCameraImage(
            width=size,
            height=size,
            viewMatrix=viewMatrix,
            projectionMatrix=projectionMatrix)

        rgb_img = rgba_img[:, :, :-1]

        return rgb_img
Esempio n. 21
0
def get_camera_image_from_link(robot, link, pic_width, pic_height, fov,
                               nearval, farval):
    aspect = pic_width / pic_height
    projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, nearval,
                                                     farval)
    link_info = p.getLinkState(robot, link, 1, 1)  #Get head link info
    link_pos = link_info[0]  #Get link com pos wrt world
    link_ori = link_info[1]  #Get link com ori wrt world
    rot = p.getMatrixFromQuaternion(link_ori)
    rot = np.array(rot).reshape(3, 3)

    global_camera_x_unit = np.array([1, 0, 0])
    global_camera_z_unit = np.array([0, 0, 1])

    camera_eye_pos = link_pos + np.dot(rot, 0.1 * global_camera_x_unit)
    camera_target_pos = link_pos + np.dot(rot, 1.0 * global_camera_x_unit)
    camera_up_vector = np.dot(rot, global_camera_z_unit)
    view_matrix = p.computeViewMatrix(camera_eye_pos, camera_target_pos,
                                      camera_up_vector)  #SE3_camera_to_world
    width, height, rgb_img, depth_img, seg_img = p.getCameraImage(
        pic_width,  #image width
        pic_height,  #image height
        view_matrix,
        projection_matrix)
    return width, height, rgb_img, depth_img, seg_img, view_matrix, projection_matrix, camera_eye_pos
Esempio n. 22
0
def birds_eye(agent: Agent, width=640, height=480) -> np.ndarray:
    position, _ = pybullet.getBasePositionAndOrientation(agent.vehicle_id)
    position = np.array([position[0], position[1], 3.0])
    view_matrix = pybullet.computeViewMatrixFromYawPitchRoll(
        cameraTargetPosition=position,
        distance=3.0,
        yaw=0,
        pitch=-90,
        roll=0,
        upAxisIndex=2)
    proj_matrix = pybullet.computeProjectionMatrixFOV(fov=90,
                                                      aspect=float(width) /
                                                      height,
                                                      nearVal=0.01,
                                                      farVal=100.0)
    _, _, rgb_image, _, _ = pybullet.getCameraImage(
        width=width,
        height=height,
        renderer=pybullet.ER_BULLET_HARDWARE_OPENGL,
        viewMatrix=view_matrix,
        projectionMatrix=proj_matrix)

    rgb_array = np.reshape(rgb_image, (height, width, -1))
    rgb_array = rgb_array[:, :, :3]
    return rgb_array
Esempio n. 23
0
    def render_physics(self):
        robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)

        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=robot_pos,
            distance=self.tracking_camera["distance"],
            yaw=self.tracking_camera["yaw"],
            pitch=self.tracking_camera["pitch"],
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60,
            aspect=float(self._render_width) / self._render_height,
            nearVal=0.1,
            farVal=100.0)
        with Profiler("render physics: Get camera image"):
            (_, _, px, _, _) = p.getCameraImage(width=self._render_width,
                                                height=self._render_height,
                                                viewMatrix=view_matrix,
                                                projectionMatrix=proj_matrix,
                                                renderer=p.ER_TINY_RENDERER)
        rgb_array = np.array(px).reshape(
            (self._render_width, self._render_height, -1))
        rgb_array = rgb_array[:, :, :3]
        return rgb_array
Esempio n. 24
0
	def _render(self, mode, close):
		if (mode=="human"):
			self.isRender = True
		if mode != "rgb_array":
			return np.array([])
		
		base_pos=[0,0,0]
		if (hasattr(self,'robot')):
			if (hasattr(self.robot,'body_xyz')):
				base_pos = self.robot.body_xyz
		
		view_matrix = p.computeViewMatrixFromYawPitchRoll(
			cameraTargetPosition=base_pos,
			distance=self._cam_dist,
			yaw=self._cam_yaw,
			pitch=self._cam_pitch,
			roll=0,
			upAxisIndex=2)
		proj_matrix = p.computeProjectionMatrixFOV(
			fov=60, aspect=float(self._render_width)/self._render_height,
			nearVal=0.1, farVal=100.0)
		(_, _, px, _, _) = p.getCameraImage(
		width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
			projectionMatrix=proj_matrix,
			renderer=p.ER_BULLET_HARDWARE_OPENGL
			)
		rgb_array = np.array(px)
		rgb_array = rgb_array[:, :, :3]
		return rgb_array
Esempio n. 25
0
    def __init__(self):
        try:
            isInit
        except:
            isInit = True
            CLIENT = pybullet.connect(pybullet.GUI)
            print("client", CLIENT)
            pb.setAdditionalSearchPath(
                pybullet_data.getDataPath())  #used by loadURDF
            plane = pybullet.loadURDF("plane.urdf")
            print("plane ID", plane)
            #pb.configureDebugVisualizer(pybullet.COV_ENABLE_GUI, 0, physicsClientId=CLIENT)
            #pb.configureDebugVisualizer(pybullet.COV_ENABLE_TINY_RENDERER, 0, physicsClientId=CLIENT)
            pb.setGravity(0, 0, -10.0)
            self.table = pb.loadURDF("table/table.urdf")
            print("table ID", self.table)
            self.plate = pb.loadURDF("dish/plate.urdf")
            print("plate ID", self.plate)
            self.rgripper = RGripper()
            self.lgripper = LGripper()
            #self.robot_model = skrobot.models.urdf.RobotModelFromURDF(urdf_file=skrobot.data.pr2_urdfpath())
            self.robot_model = PR2()
            interface = PybulletRobotInterface(self.robot_model)
        self.try_num = 100  #Simulator loop times
        self.frames = []  #Movie buffer
        pb.resetDebugVisualizerCamera(2.0, 90, -0.0, (0.0, 0.0, 1.0))

        # For pybullet getCameraImage argument
        self.viewMatrix = pb.computeViewMatrix(cameraEyePosition=[5, 5, 30],
                                               cameraTargetPosition=[3, 3, 3],
                                               cameraUpVector=[3, 1, 3])
        self.projectionMatrix = pb.computeProjectionMatrixFOV(fov=45.0,
                                                              aspect=1.0,
                                                              nearVal=0.1,
                                                              farVal=3.1)
Esempio n. 26
0
    def getImage(self, flag=False):
        position = (0, -3, 2)
        targetPosition = (0, 0, 2)
        viewMatrix = p.computeViewMatrix(position,
                                         targetPosition,
                                         cameraUpVector=[0, 0, 1],
                                         physicsClientId=self.clientId)
        projectionMatrix = p.computeProjectionMatrixFOV(
            60, 1, 0.1, 3.5, physicsClientId=self.clientId)
        img = p.getCameraImage(self.res[0],
                               self.res[1],
                               viewMatrix,
                               projectionMatrix,
                               renderer=p.ER_BULLET_HARDWARE_OPENGL,
                               physicsClientId=self.clientId)
        img = np.reshape(img[2], (self.res[0], self.res[1], 4))

        if flag:
            img = img[:, :, :3]
        else:
            img = img[:, :, :3]
            img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            _, img = cv2.threshold(img, 180, 255, cv2.THRESH_BINARY_INV)

        return img.astype('uint8')
 def render(self, mode='human'):
     if self.rendered_img is None:
         self.rendered_img = plt.imshow(np.zeros((100, 100, 4)))
     # 基本資訊
     car_id, client_id = self.car.get_ids()
     proj_matrix = p.computeProjectionMatrixFOV(fov=80,
                                                aspect=1,
                                                nearVal=0.01,
                                                farVal=100)
     pos, ori = [
         list(l)
         for l in p.getBasePositionAndOrientation(car_id, client_id)
     ]
     pos[2] = 0.2
     # 旋轉照相機方向
     rot_mat = np.array(p.getMatrixFromQuaternion(ori)).reshape(3, 3)
     camera_vec = np.matmul(rot_mat, [1, 0, 0])
     up_vec = np.matmul(rot_mat, np.array([0, 0, 1]))
     view_matrix = p.computeViewMatrix(pos, pos + camera_vec, up_vec)
     # 展示圖片
     frame = p.getCameraImage(100, 100, view_matrix, proj_matrix)[2]
     frame = np.reshape(frame, (100, 100, 4))
     self.rendered_img.set_data(frame)
     plt.draw()
     plt.pause(0.00001)
    def __init__(self,
                 root_dir='bull/test_cabinets/solo',
                 masked=False,
                 debug_flag=False):
        '''
        Class for generating simulated articulated object dataset.
        params:
            - root_dir: save in this directory
            - start_idx: index of first image saved - useful in threading context
            - depth_data: np array of depth images
            - masked: should the background of depth images be 0s or 1s?
        '''
        self.scenes = []
        self.savedir = root_dir
        self.masked = masked
        self.img_idx = 0
        self.depth_data = []
        self.debugging = debug_flag

        # Camera external settings
        self.viewMatrix = pb.computeViewMatrix(cameraEyePosition=[4, 0, 1],
                                               cameraTargetPosition=[0, 0, 1],
                                               cameraUpVector=[0, 0, 1])

        # Camera internal settings
        self.projectionMatrix = pb.computeProjectionMatrixFOV(fov=45.,
                                                              aspect=1.0,
                                                              nearVal=0.1,
                                                              farVal=8.1)

        print(root_dir)
Esempio n. 29
0
    def camera_feed(self):
        """
		Function to get camera feed of the arena.

		Arguments:

			None
		
		Return Values:

			numpy array of RGB values
		"""
        look = [0, 0, 0.2]
        cameraeyepos = [0, 0, 6.5]
        cameraup = [0, -1, 0]
        self._view_matrix = p.computeViewMatrix(cameraeyepos, look, cameraup)
        fov = 75
        aspect = self._width / self._height
        near = 0.8
        far = 10
        self._proj_matrix = p.computeProjectionMatrixFOV(
            fov, aspect, near, far)
        img_arr = p.getCameraImage(width=self._width,
                                   height=self._height,
                                   viewMatrix=self._view_matrix,
                                   projectionMatrix=self._proj_matrix,
                                   renderer=p.ER_BULLET_HARDWARE_OPENGL)
        rgb = img_arr[2]
        rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)
        return rgb
Esempio n. 30
0
	def _render(self, mode, close):
		if (mode=="human"):
			self.isRender = True
		if mode != "rgb_array":
			return np.array([])

		base_pos=[0,0,0]
		if (hasattr(self,'robot')):
			if (hasattr(self.robot,'body_xyz')):
				base_pos = self.robot.body_xyz

		view_matrix = p.computeViewMatrixFromYawPitchRoll(
			cameraTargetPosition=base_pos,
			distance=self._cam_dist,
			yaw=self._cam_yaw,
			pitch=self._cam_pitch,
			roll=0,
			upAxisIndex=2)
		proj_matrix = p.computeProjectionMatrixFOV(
			fov=60, aspect=float(self._render_width)/self._render_height,
			nearVal=0.1, farVal=100.0)
		(_, _, px, _, _) = p.getCameraImage(
		width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
			projectionMatrix=proj_matrix,
			renderer=p.ER_BULLET_HARDWARE_OPENGL
			)
		rgb_array = np.array(px)
		rgb_array = rgb_array[:, :, :3]
		return rgb_array
Esempio n. 31
0
    def renderPicture(self, height=320, width=320):
        '''bullet側からカメラ画像を取得'''
        base_pos, orn = p.getBasePositionAndOrientation(self.hand)
        yaw = p.getEulerFromQuaternion(orn)[2]  # z軸方向から見た本体の回転角度
        rot_matrix = np.array(
            [[np.cos(yaw), -np.sin(yaw)], [np.sin(yaw), np.cos(yaw)]])  # 回転行列
        target_relative_vec2D = np.array([2, 0])  # 本体から見たtargetの相対位置
        target_abs_vec2D = np.dot(
            rot_matrix, target_relative_vec2D)  # targetの絶対位置

        cam_eye = np.array(base_pos) + \
            np.array([-0.01, -0.020, 0.020])  # カメラの座標
        # カメラの焦点の座標(ターゲットの座標)、z軸方向を変える。
        cam_target = np.array(base_pos) + np.append(target_abs_vec2D, 0.20)
        cam_upvec = [0, 0, 1]

        view_matrix = p.computeViewMatrix(
            cameraEyePosition=cam_eye,
            cameraTargetPosition=cam_target,
            cameraUpVector=cam_upvec)

        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(width)/height,
            nearVal=0.1, farVal=100.0)

        (_, _, rgb, _, mask) = p.getCameraImage(
            width=width, height=height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL)

        rgb_array = np.array(rgb)
        rgb_array = rgb_array[:, :, :3]
        # mask_array = np.array(mask)

        return rgb_array
Esempio n. 32
0
 def step(self,action):
     p.stepSimulation()
     start = time.time()
     yaw = next(self.iter)
     viewMatrix = p.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
     aspect = pixelWidth / pixelHeight;
     projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
     img_arr = p.getCameraImage(pixelWidth, pixelHeight, viewMatrix,
         projectionMatrix, shadow=1,lightDirection=[1,1,1],
         renderer=p.ER_BULLET_HARDWARE_OPENGL)
         #renderer=pybullet.ER_TINY_RENDERER)
     self._observation = img_arr[2]
     return np.array(self._observation), 0, 0, {}
Esempio n. 33
0
def test(num_runs=300, shadow=1, log=True, plot=False):
    if log:
        logId = pybullet.startStateLogging(pybullet.STATE_LOGGING_PROFILE_TIMINGS, "renderTimings")

    if plot:
        plt.ion()

        img = np.random.rand(200, 320)
        #img = [tandard_normal((50,100))
        image = plt.imshow(img,interpolation='none',animated=True,label="blah")
        ax = plt.gca()


    times = np.zeros(num_runs)
    yaw_gen = itertools.cycle(range(0,360,10))
    for i, yaw in zip(range(num_runs),yaw_gen):
        pybullet.stepSimulation()
        start = time.time()
        viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
        aspect = pixelWidth / pixelHeight;
        projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
        img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,
            projectionMatrix, shadow=shadow,lightDirection=[1,1,1],
            renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
            #renderer=pybullet.ER_TINY_RENDERER)
        stop = time.time()
        duration = (stop - start)
        if (duration):
        	fps = 1./duration
        	#print("fps=",fps)
        else:
        	fps=0
        	#print("fps=",fps)
        #print("duraction=",duration)
        #print("fps=",fps)
        times[i] = fps

        if plot:
            rgb = img_arr[2]
            image.set_data(rgb)#np_img_arr)
            ax.plot([0])
            #plt.draw()
            #plt.show()
            plt.pause(0.01)

    mean_time = float(np.mean(times))
    print("mean: {0} for {1} runs".format(mean_time, num_runs))
    print("")
    if log:
        pybullet.stopStateLogging(logId)
    return mean_time
  def _reset(self):
    """Environment reset called at the beginning of an episode.
    """
    # Set the camera settings.
    look = [0.23, 0.2, 0.54]
    distance = 1.
    pitch = -56 + self._cameraRandom*np.random.uniform(-3, 3)
    yaw = 245 + self._cameraRandom*np.random.uniform(-3, 3)
    roll = 0
    self._view_matrix = p.computeViewMatrixFromYawPitchRoll(
        look, distance, yaw, pitch, roll, 2)
    fov = 20. + self._cameraRandom*np.random.uniform(-2, 2)
    aspect = self._width / self._height
    near = 0.01
    far = 10
    self._proj_matrix = p.computeProjectionMatrixFOV(
        fov, aspect, near, far)
    
    self._attempted_grasp = False
    self._env_step = 0
    self.terminated = 0

    p.resetSimulation()
    p.setPhysicsEngineParameter(numSolverIterations=150)
    p.setTimeStep(self._timeStep)
    p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1])
    
    p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
            
    p.setGravity(0,0,-10)
    self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
    self._envStepCounter = 0
    p.stepSimulation()

    # Choose the objects in the bin.
    urdfList = self._get_random_object(
      self._numObjects, self._isTest)
    self._objectUids = self._randomly_place_objects(urdfList)
    self._observation = self._get_observation()
    return np.array(self._observation)
Esempio n. 35
0
camDistance = 4
pixelWidth = 320
pixelHeight = 200
nearPlane = 0.01
farPlane = 100

fov = 60

main_start = time.time()
while(1): 
  for yaw in range (0,360,10) :
    pybullet.stepSimulation()
    start = time.time()
    viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
    aspect = pixelWidth / pixelHeight;
    projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
    img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,projectionMatrix, shadow=1,lightDirection=[1,1,1],renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
    stop = time.time()
    print ("renderImage %f" % (stop - start))

    w=img_arr[0] #width of the image, in pixels
    h=img_arr[1] #height of the image, in pixels
    rgb=img_arr[2] #color data RGB
    dep=img_arr[3] #depth data
    #print(rgb)
    print ('width = %d height = %d' % (w,h))

    #note that sending the data using imshow to matplotlib is really slow, so we use set_data

    #plt.imshow(rgb,interpolation='none')
Esempio n. 36
0
p.loadURDF('plane.urdf')
p.loadURDF("r2d2.urdf",[0,0,1])
p.loadURDF('cube_small.urdf', basePosition=[0.0, 0.0, 0.025])
cube_trans = p.loadURDF('cube_small.urdf', basePosition=[0.0, 0.1, 0.025])
p.changeVisualShape(cube_trans,-1,rgbaColor=[1,1,1,0.1])
width = 128
height = 128

fov = 60
aspect = width / height
near = 0.02
far = 1

view_matrix = p.computeViewMatrix([0, 0, 0.5], [0, 0, 0], [1, 0, 0])
projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)

# Get depth values using the OpenGL renderer
images = p.getCameraImage(width, height, view_matrix, projection_matrix, shadow=True,renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgb_opengl= np.reshape(images[2], (height, width, 4))*1./255.
depth_buffer_opengl = np.reshape(images[3], [width, height])
depth_opengl = far * near / (far - (far - near) * depth_buffer_opengl)
seg_opengl = np.reshape(images[4], [width, height])*1./255.
time.sleep(1)

# Get depth values using Tiny renderer
images = p.getCameraImage(width, height, view_matrix, projection_matrix, shadow=True, renderer=p.ER_TINY_RENDERER)
depth_buffer_tiny = np.reshape(images[3], [width, height])
depth_tiny = far * near / (far - (far - near) * depth_buffer_tiny)
rgb_tiny= np.reshape(images[2], (height, width, 4))*1./255.
seg_tiny = np.reshape(images[4],[width,height])*1./255.