コード例 #1
0
ファイル: active_perception.py プロジェクト: ehu-ai/domrand
class SimManager(object):
    """Object to generate sequence of images with their transforms"""
    def __init__(self,
                 filepath,
                 random_params={},
                 gpu_render=False,
                 gui=False,
                 display_data=False):
        self.model = load_model_from_path(filepath)
        self.sim = MjSim(self.model)
        self.filepath = filepath
        self.gui = gui
        self.display_data = display_data
        # Take the default random params and update anything we need
        self.RANDOM_PARAMS = {}
        self.RANDOM_PARAMS.update(random_params)

        if gpu_render:
            self.viewer = MjViewer(self.sim)
        else:
            self.viewer = None

        # Get start state of params to slightly jitter later
        self.START_GEOM_POS = self.model.geom_pos.copy()
        self.START_GEOM_SIZE = self.model.geom_size.copy()
        self.START_GEOM_QUAT = self.model.geom_quat.copy()
        self.START_BODY_POS = self.model.body_pos.copy()
        self.START_BODY_QUAT = self.model.body_quat.copy()
        self.START_MATID = self.model.geom_matid.copy()
        #self.FLOOR_OFFSET = self.model.body_pos[self.model.body_name2id('floor')]

        self.tex_modder = TextureModder(self.sim)
        self.tex_modder.whiten_materials(
        )  # ensures materials won't impact colors
        self.cam_modder = CameraModder(self.sim)
        self.light_modder = LightModder(self.sim)
        self.start_obj_pose = self.sim.data.get_joint_qpos(
            'object:joint').copy()

    def get_data(self, num_images=10):
        """
        Returns camera intrinsics, and a sequence of images, pose transforms, and
        camera transforms
        """
        # randomize the scene
        self._rand_textures()
        self._rand_lights()
        self._rand_object()
        self._rand_walls()
        self._rand_distract()
        sequence = defaultdict(list)
        context = {}
        # object pose
        obj_pose, robot_pose = self._get_ground_truth()
        context["obj_world_pose"] = obj_pose
        context["robot_world_pose"] = robot_pose
        self._cam_step = 0
        self._cam_choices = np.array([[-1.75, 0, 2], [-1.75, 0, 1.62],
                                      [-1.3, 1.7, 1.62]])
        self._curr_cam_pos = self._cam_choices[0]
        for i in range(num_images):
            self._next_camera()
            self._forward()
            img = self._get_cam_frame()
            sequence["img"].append(img)
            cam_pos = self.cam_modder.get_pos("camera1")
            cam_quat = self.cam_modder.get_quat("camera1")
            cam_pose = np.concatenate([cam_pos, cam_quat]).astype(np.float32)
            sequence["cam_pose"].append(cam_pose)

        cam_id = self.cam_modder.get_camid("camera1")
        fovy = self.sim.model.cam_fovy[cam_id]
        width, height = 640, 480
        f = 0.5 * height / math.tan(fovy * math.pi / 360)
        camera_intrinsics = np.array(
            ((f, 0, width / 2), (0, f, height / 2), (0, 0, 1)))
        context['cam_matrix'] = camera_intrinsics
        return context, sequence

    def _forward(self):
        """Advances simulator a step (NECESSARY TO MAKE CAMERA AND LIGHT MODDING WORK)
        And add some visualization"""
        self.sim.forward()
        if self.viewer and self.gui:
            # Get angle of camera and display it
            quat = np.quaternion(*self.model.cam_quat[0])
            ypr = quaternion.as_euler_angles(quat) * 180 / np.pi
            cam_pos = self.model.cam_pos[0]
            cam_fovy = self.model.cam_fovy[0]
            #self.viewer.add_marker(pos=cam_pos, label="CAM: {}{}".format(cam_pos, ypr))
            #self.viewer.add_marker(pos=cam_pos, label="CAM: {}".format(ypr))
            #self.viewer.add_marker(pos=cam_pos, label="CAM: {}".format(cam_pos))
            self.viewer.add_marker(pos=cam_pos,
                                   label="FOVY: {}, CAM: {}".format(
                                       cam_fovy, cam_pos))
            self.viewer.render()

    def _get_ground_truth(self):
        """
        Return the  position to the robot, and quaternion to the robot quaternion
        7 dim total
        """
        robot_gid = self.sim.model.geom_name2id('base_link')
        obj_gid = self.sim.model.geom_name2id('object')
        # only x and y pos needed
        obj_world_pos = self.sim.data.geom_xpos[obj_gid]
        robot_world_pos = self.sim.data.geom_xpos[robot_gid]
        # obj_pos_in_robot_frame = (self.sim.data.geom_xpos[obj_gid] - self.sim.data.geom_xpos[robot_gid])[:2]

        # robot_quat = quaternion.as_quat_array(self.model.geom_quat[robot_gid].copy())
        obj_world_quat = self.model.geom_quat[obj_gid].copy()
        robot_world_quat = self.model.geom_quat[robot_gid].copy()
        # # want quat of obj relative to robot frame
        # # obj_q = robot_q * localrot
        # # robot_q.inv * obj_q = localrot
        # rel_quat = quaternion.as_float_array(robot_quat.inverse() * obj_quat)
        # pose = np.concatenate([obj_pos_in_robot_frame, rel_quat]).astype(np.float32)
        obj_pose = self.sim.data.get_joint_qpos('object:joint').copy()
        robot_pose = np.concatenate([robot_world_pos,
                                     robot_world_quat]).astype(np.float32)
        return obj_pose, robot_pose

    def _get_cam_frame(self, ground_truth=None):
        """Grab an image from the camera (224, 244, 3) to feed into CNN"""
        #IMAGE_NOISE_RVARIANCE = Range(0.0, 0.0001)
        cam_img = self.sim.render(
            640, 480, camera_name='camera1'
        )[::-1, :, :]  # Rendered images are upside-down.
        # make camera crop be more like kinect
        #cam_img = self.sim.render(854, 480, camera_name='camera1')[::-1, 107:-107, :] # Rendered images are upside-down.

        #image_noise_variance = sample(IMAGE_NOISE_RVARIANCE)
        #cam_img = (skimage.util.random_noise(cam_img, mode='gaussian', var=image_noise_variance) * 255).astype(np.uint8)

        if self.display_data:
            print(ground_truth)
            #label = str(ground_truth[3:6])
            display_image(cam_img, mode='preproc')  #, label)

        # cam_img = preproc_image(cam_img)
        return cam_img

    def _randomize(self):
        self._rand_textures()
        self._rand_camera()
        self._rand_lights()
        #self._rand_robot()
        self._rand_object()
        self._rand_walls()
        self._rand_distract()

    def _rand_textures(self):
        """Randomize all the textures in the scene, including the skybox"""
        bright = np.random.binomial(1, 0.5)
        for name in self.sim.model.geom_names + ('skybox', ):
            self.tex_modder.rand_all(name)
            if bright:
                self.tex_modder.brighten(name, np.random.randint(0, 150))

    def _rand_camera(self):
        """Randomize pos, orientation, and fov of camera
        real camera pos is -1.75, 0, 1.62
        FOVY:
        Kinect2 is 53.8
        ASUS is 45
        https://www.asus.com/us/3D-Sensor/Xtion_PRO_LIVE/specifications/
        http://smeenk.com/kinect-field-of-view-comparison/
        """
        # Params
        FOVY_R = Range(40, 50)
        #X = Range(-3, -1)
        #Y = Range(-1, 3)
        #Z = Range(1, 2)
        #C_R3D = Range3D(X, Y, Z)
        #cam_pos = sample_xyz(C_R3D)
        #L_R3D = rto3d([-0.1, 0.1])

        C_R3D = Range3D([-0.07, 0.07], [-0.07, 0.07], [-0.07, 0.07])
        ANG3 = Range3D([-3, 3], [-3, 3], [-3, 3])

        # Look approximately at the robot, but then randomize the orientation around that
        cam_choices = np.array([[-1.75, 0, 1.62], [-1.3, 1.7, 1.62],
                                [-1.75, 0, 2]])
        cam_pos = cam_choices[np.random.choice(len(cam_choices))]
        # cam_pos = get_real_cam_pos(FLAGS.real_data_path)
        target_id = self.model.body_name2id(FLAGS.look_at)

        cam_off = 0  #sample_xyz(L_R3D)
        target_off = 0  #sample_xyz(L_R3D)
        quat = look_at(cam_pos + cam_off,
                       self.sim.data.body_xpos[target_id] + target_off)
        quat = jitter_angle(quat, ANG3)
        #quat = jitter_quat(quat, 0.01)

        cam_pos += sample_xyz(C_R3D)

        self.cam_modder.set_quat('camera1', quat)
        self.cam_modder.set_pos('camera1', cam_pos)
        self.cam_modder.set_fovy('camera1', 60)  # hard code to wide fovy

    def _next_camera(self):
        """Randomize pos, orientation, and fov of camera
        real camera pos is -1.75, 0, 1.62
        FOVY:
        Kinect2 is 53.8
        ASUS is 45
        https://www.asus.com/us/3D-Sensor/Xtion_PRO_LIVE/specifications/
        http://smeenk.com/kinect-field-of-view-comparison/
        """
        # Params
        FOVY_R = Range(40, 50)
        #X = Range(-3, -1)
        #Y = Range(-1, 3)
        #Z = Range(1, 2)
        #C_R3D = Range3D(X, Y, Z)
        #cam_pos = sample_xyz(C_R3D)
        #L_R3D = rto3d([-0.1, 0.1])

        C_R3D = Range3D([-0.07, 0.07], [-0.07, 0.07], [-0.07, 0.07])
        ANG3 = Range3D([-3, 3], [-3, 3], [-3, 3])

        # Look approximately at the robot, but then randomize the orientation around that

        # linearly interpolate to the next camera every K steps
        K = 5
        goal_cam_pos = self._cam_choices[(self._cam_step // K) + 1]
        offset = goal_cam_pos - self._curr_cam_pos
        offset *= (self._cam_step % K) / K
        self._curr_cam_pos += offset
        cam_pos = self._curr_cam_pos
        self._cam_step += 1

        # cam_pos = cam_choices[np.random.choice(len(cam_choices))]
        # cam_pos = get_real_cam_pos(FLAGS.real_data_path)
        target_id = self.model.body_name2id(FLAGS.look_at)

        cam_off = 0  #sample_xyz(L_R3D)
        target_off = 0  #sample_xyz(L_R3D)
        quat = look_at(cam_pos + cam_off,
                       self.sim.data.body_xpos[target_id] + target_off)
        quat = jitter_angle(quat, ANG3)
        #quat = jitter_quat(quat, 0.01)

        cam_pos += sample_xyz(C_R3D)

        self.cam_modder.set_quat('camera1', quat)
        self.cam_modder.set_pos('camera1', cam_pos)
        self.cam_modder.set_fovy('camera1', 60)  # hard code to wide fovy

    def _rand_lights(self):
        """Randomize pos, direction, and lights"""
        # light stuff
        #X = Range(-1.5, 1.5)
        #Y = Range(-1.2, 1.2)
        #Z = Range(0, 2.8)
        X = Range(-1.5, -0.5)
        Y = Range(-0.6, 0.6)
        Z = Range(1.0, 1.5)
        LIGHT_R3D = Range3D(X, Y, Z)
        LIGHT_UNIF = Range3D(Range(0, 1), Range(0, 1), Range(0, 1))

        # TODO: also try not altering the light dirs and just keeping them at like -1, or [0, -0.15, -1.0]
        for i, name in enumerate(self.model.light_names):
            lid = self.model.light_name2id(name)
            # random sample 80% of any given light being on
            if lid != 0:
                self.light_modder.set_active(name, sample([0, 1]) < 0.8)
                self.light_modder.set_dir(name, sample_light_dir())

            self.light_modder.set_pos(name, sample_xyz(LIGHT_R3D))

            #self.light_modder.set_dir(name, sample_xyz(rto3d([-1,1])))

            #self.light_modder.set_specular(name, sample_xyz(LIGHT_UNIF))
            #self.light_modder.set_diffuse(name, sample_xyz(LIGHT_UNIF))
            #self.light_modder.set_ambient(name, sample_xyz(LIGHT_UNIF))

            spec = np.array([sample(Range(0.5, 1))] * 3)
            diffuse = np.array([sample(Range(0.5, 1))] * 3)
            ambient = np.array([sample(Range(0.5, 1))] * 3)

            self.light_modder.set_specular(name, spec)
            self.light_modder.set_diffuse(name, diffuse)
            self.light_modder.set_ambient(name, ambient)
            #self.model.light_directional[lid] = sample([0,1]) < 0.2
            self.model.light_castshadow[lid] = sample([0, 1]) < 0.5

    def _rand_robot(self):
        """Randomize joint angles and jitter orientation"""
        jnt_shape = self.sim.data.qpos.shape
        self.sim.data.qpos[:] = sample_joints(self.model.jnt_range, jnt_shape)

        robot_gid = self.model.geom_name2id('robot_table_link')
        self.model.geom_quat[robot_gid] = jitter_quat(
            self.START_GEOM_QUAT[robot_gid], 0.01)

    def _rand_object(self):
        obj_gid = self.sim.model.geom_name2id('object')
        obj_bid = self.sim.model.geom_name2id('object')
        table_gid = self.model.geom_name2id('object_table')
        table_bid = self.model.body_name2id('object_table')

        obj_pose = self.start_obj_pose.copy()
        xval = self.model.geom_size[table_gid][
            0]  #- self.model.geom_size[obj_gid][0]
        yval = self.model.geom_size[table_gid][
            1]  #- self.model.geom_size[obj_gid][1]

        O_X = Range(-xval, xval)
        O_Y = Range(-yval, yval)
        O_Z = Range(0, 0)
        O_R3D = Range3D(O_X, O_Y, O_Z)

        newpos = obj_pose[:3] + sample_xyz(O_R3D)
        newquat = jitter_quat(obj_pose[3:], 0.1)
        obj_pose[:3] = newpos
        obj_pose[3:] = newquat
        self.sim.data.set_joint_qpos('object:joint', obj_pose)

        #T_X = Range(-0.1, 0.1)
        #T_Y = Range(-0.1, 0.1)
        #T_Z = Range(-0.1, 0.1)
        #T_R3D = Range3D(T_X, T_Y, T_Z)
        #self.model.body_pos[table_bid] = self.START_BODY_POS[table_bid] + sample_xyz(T_R3D)
        ## randomize orientation a wee bit
        #self.model.geom_quat[table_gid] = jitter_quat(self.START_GEOM_QUAT[table_gid], 0.01)

    def _rand_walls(self):
        wall_bids = {
            name: self.model.body_name2id(name)
            for name in ['wall_' + dir for dir in 'nesw']
        }
        window_gid = self.model.geom_name2id('west_window')
        #floor_gid = self.model.geom_name2id('floor')

        WA_X = Range(-0.2, 0.2)
        WA_Y = Range(-0.2, 0.2)
        WA_Z = Range(-0.1, 0.1)
        WA_R3D = Range3D(WA_X, WA_Y, WA_Z)

        WI_X = Range(-0.1, 0.1)
        WI_Y = Range(0, 0)
        WI_Z = Range(-0.5, 0.5)
        WI_R3D = Range3D(WI_X, WI_Y, WI_Z)

        R = Range(0, 0)
        P = Range(-10, 10)
        Y = Range(0, 0)
        RPY_R = Range3D(R, P, Y)

        #self.model.geom_quat[floor_gid] = jitter_quat(self.START_GEOM_QUAT[floor_gid], 0.01)
        #self.model.geom_pos[floor_gid] = self.START_GEOM_POS[floor_gid] + [0,0,sample(-0.1,0.1)

        self.model.geom_quat[window_gid] = sample_quat(RPY_R)
        #self.model.geom_quat[window_gid] = jitter_quat(self.START_GEOM_QUAT[window_gid], 0.01)
        self.model.geom_pos[
            window_gid] = self.START_GEOM_POS[window_gid] + sample_xyz(WI_R3D)

        for name in wall_bids:
            gid = wall_bids[name]
            self.model.body_quat[gid] = jitter_quat(self.START_BODY_QUAT[gid],
                                                    0.01)
            self.model.body_pos[gid] = self.START_BODY_POS[gid] + sample_xyz(
                WA_R3D)

    def _rand_distract(self):
        PREFIX = 'distract'
        geom_names = [
            name for name in self.model.geom_names if name.startswith(PREFIX)
        ]

        # Size range
        SX = Range(0.01, 0.5)
        SY = Range(0.01, 0.9)
        SZ = Range(0.01, 0.5)
        S3D = Range3D(SX, SY, SZ)
        # Back range
        B_PX = Range(-0.5, 2)
        B_PY = Range(-1.5, 2)
        B_PZ = Range(0, 3)
        B_P3D = Range3D(B_PX, B_PY, B_PZ)
        # Front range
        F_PX = Range(-2, -0.5)
        F_PY = Range(-2, 1)
        F_PZ = Range(0, 0.5)
        F_P3D = Range3D(F_PX, F_PY, F_PZ)

        for name in geom_names:
            gid = self.model.geom_name2id(name)
            range = B_P3D if np.random.binomial(1, 0.5) else F_P3D

            self.model.geom_pos[gid] = sample_xyz(range)
            self.model.geom_quat[gid] = random_quat()
            self.model.geom_size[gid] = sample_xyz(S3D, mode='logspace')
            self.model.geom_type[gid] = sample_geom_type()
            self.model.geom_rgba[gid][-1] = np.random.binomial(1, 0.5)

    def _set_visible(self, prefix, range_top, visible):
        """Helper function to set visibility of several objects"""
        if not visible:
            if range_top == 0:
                name = prefix
                gid = self.model.geom_name2id(name)
                self.model.geom_rgba[gid][-1] = 0.0

            for i in range(range_top):
                name = "{}{}".format(prefix, i)
                gid = self.model.geom_name2id(name)
                self.model.geom_rgba[gid][-1] = 0.0
        else:
            if range_top == 0:
                name = prefix
                gid = self.model.geom_name2id(name)
                self.model.geom_rgba[gid][-1] = 1.0

            for i in range(range_top):
                name = "{}{}".format(prefix, i)
                gid = self.model.geom_name2id(name)
                self.model.geom_rgba[gid][-1] = 1.0
コード例 #2
0
<?xml version="1.0" ?>
<mujoco>
    <worldbody>
        <body name="box" pos="0 0 0.2">
            <geom size="0.15 0.15 0.15" type="sphere"/>
            <joint axis="1 0 0" name="box:x" type="slide"/>
            <joint axis="0 1 0" name="box:y" type="slide"/>
        </body>
        <body name="floor" pos="0 0 0.025">
            <geom size="1.0 1.0 0.02" rgba="0 1 0 1" type="box"/>
        </body>
    </worldbody>
</mujoco>
"""

model = load_model_from_xml(MODEL_XML)
sim = MjSim(model)
viewer = MjViewer(sim)
step = 0
while True:
    t = time.time()
    x, y = math.cos(t), math.sin(t)
    viewer.add_marker(pos=np.array([x, y, 1]),
                      label=str(t),
                      type=const.GEOM_SPHERE)
    viewer.render()

    step += 1
    if step > 100 and os.getenv('TESTING') is not None:
        break
コード例 #3
0
class GatherEnv:
    @autoassign(exclude=('model_path'))
    def __init__(self,
                 model_path,
                 n_apples=8,
                 n_bombs=8,
                 apple_reward=10,
                 bomb_cost=1,
                 activity_range=6.0,
                 catch_range=10,
                 n_bins=10,
                 robot_object_spacing=2.0,
                 sensor_range=6.0,
                 sensor_span=pi):
        self.viewer = None

        self.apples = []
        self.bombs = []

        self.model = self.build_model(model_path)
        self.sim = MjSim(self.model)

        self._max_episode_steps = None
        self._step_num = 0

    def build_model(self, agent_xml_path):
        sim_size = self.activity_range + 1

        model_tree = ET.parse(agent_xml_path)
        worldbody = model_tree.find('.//worldbody')

        floor_attrs = dict(name='floor',
                           type='plane',
                           material='MatPlane',
                           pos='0 0 0',
                           size=f'{sim_size} {sim_size} {sim_size}',
                           conaffinity='1',
                           rgba='0.8 0.9 0.8 1',
                           condim='3')
        wall_attrs = dict(type='box',
                          conaffinity='1',
                          rgba='0.8 0.9 0.8 1',
                          condim='3')

        wall_poses = [f'0 {-sim_size} 0', f'0 {sim_size} 0', \
                      f'{-sim_size} 0 0', f'{sim_size} 0 0']
        wall_sizes = [f'{sim_size + 0.1} 0.1 1', f'{sim_size + 0.1} 0.1 1', \
                      f'0.1 {sim_size + 0.1} 1', f'0.1 {sim_size + 0.1} 1']

        # Add a floor to the model
        ET.SubElement(worldbody, 'geom', **floor_attrs)

        # Add walls to the model
        for i, pos, size in zip(range(4), wall_poses, wall_sizes):
            ET.SubElement(
                worldbody, 'geom',
                dict(**wall_attrs, name=f'wall{i}', pos=pos, size=size))

        model_xml = ET.tostring(model_tree.getroot(),
                                encoding='unicode',
                                method='xml')
        model = load_model_from_xml(model_xml)

        return model

    def reset(self):
        self.sim.reset()

        self.apples = []
        self.bombs = []
        obj_coords = []

        self._step_num = 0

        agent_x_pos, agent_y_pos = self.sim.data.qpos[0], self.sim.data.qpos[1]

        rand_coord = lambda: np.random.randint(-self.activity_range, self.
                                               activity_range)

        while (len(self.apples) < self.n_apples):
            x, y = rand_coord(), rand_coord()

            # Change this later to make (0, 0) the position of the agent
            if in_range(x, y, agent_x_pos, agent_y_pos,
                        self.robot_object_spacing):
                continue

            if (x, y) in obj_coords:
                continue

            self.apples.append(Object(APPLE, x, y))
            obj_coords.append((x, y))

        while (len(self.bombs) < self.n_bombs):
            x, y = rand_coord(), rand_coord()

            if in_range(x, y, 0, 0, self.robot_object_spacing):
                continue

            if (x, y) in obj_coords:
                continue

            self.bombs.append(Object(BOMB, x, y))
            obj_coords.append((x, y))

        return self._get_obs()

    def step(self, action):
        raise NotImplementedError

    def _do_simulation(self, action):
        raise NotImplementedError

    def _update_objects(self):
        n_apples = 0
        n_bombs = 0
        agent_x_pos, agent_y_pos = self.sim.data.qpos[0], self.sim.data.qpos[1]

        for apple in self.apples:
            if in_range(apple.x, apple.y, agent_x_pos, agent_y_pos,
                        self.catch_range):
                n_apples += 1
                self.apples.remove(apple)

        for bomb in self.bombs:
            if in_range(bomb.x, bomb.y, agent_x_pos, agent_y_pos,
                        self.catch_range):
                n_bombs += 1
                self.bombs.remove(bomb)

        return n_apples, n_bombs

    def _get_self_obs(self):
        raise NotImplementedError

    def _get_sensor_obs(self):
        apple_readings = np.zeros(self.n_bins)
        bomb_readings = np.zeros(self.n_bins)

        idx = self.model.body_names.index('torso')
        com = self.sim.data.subtree_com[idx].flat
        agent_x, agent_y = com[:2]

        sort_key = lambda obj: -euclidian_dist(obj.x, obj.y, agent_x, agent_y)
        sorted_objs = sorted(self.apples + self.bombs, key=sort_key)

        bin_res = self.sensor_span / self.n_bins
        half_span = self.sensor_span / 2
        orientation = std_radians(self._get_orientation())

        for obj_type, obj_x, obj_y in sorted_objs:
            dist = euclidian_dist(obj_x, obj_y, agent_x, agent_y)

            if dist > self.sensor_range:
                continue

            # Get the components of the vector from the agent to the object
            x_delta = obj_x - agent_x
            y_delta = obj_y - agent_y

            # Get the angle of the vector relative to the agent's sensor range
            angle = np.arctan2(y_delta, x_delta)
            angle = std_radians(angle - orientation + half_span)

            if angle > self.sensor_span:
                continue

            bin_number = int(angle / bin_res)

            if bin_number == int(angle / bin_res):
                bin_number -= 1

            intensity = 1.0 - dist / self.sensor_range

            if obj_type == APPLE:
                apple_readings[bin_number] = intensity
            else:
                bomb_readings[bin_number] = intensity

        sensor_obs = np.concatenate([apple_readings, bomb_readings])

        return sensor_obs

        #     dist = euclidian_dist(obj_x, obj_y, agent_x, agent_y)
        #
        #     if dist > self.sensor_range:
        #         continue
        #
        #     # Get the components of the vector from the agent to the object
        #     x_comp = obj_x - agent_x
        #     y_comp = obj_y - agent_y
        #
        #     # Get the angle of the vector relative to the agent's sensor range
        #     angle = np.arctan2(y_comp, x_comp)
        #     angle = standardize_radians(angle - orientation)
        #
        #     # Skip if object is outside sensor span
        #     if angle > half_span and angle < 2 * pi - half_span:
        #         continue
        #
        #     # Standardize angle to range [0, pi] to more easily find bin number
        #     angle = standardize_radians(angle + half_span)
        #     bin_number = int(angle / bin_res)
        #
        #     # The object is exactly on the upper bound of the sensor span
        #     if bin_number == self.n_bins:
        #         bin_number -= 1
        #
        #     intensity = 1.0 - dist / self.sensor_range
        #
        #     if obj_type == APPLE:
        #         apple_readings[bin_number] = intensity
        #     else:
        #         bomb_readings[bin_number] = intensity
        #
        # sensor_obs = np.concatenate([apple_readings, bomb_readings])
        #
        # return sensor_obs

    def _get_obs(self):
        return np.concatenate([self._get_self_obs(), self._get_sensor_obs()])

    def _get_orientation(self):
        raise NotImplementedError

    def _is_done(self):
        raise NotImplementedError

    def _unhealthy_cost(self):
        raise NotImplementedError

    def render(self):
        if not self.viewer:
            self.viewer = MjViewer(self.sim)

        objects = self.apples + self.bombs

        for object in objects:
            x, y = object.x, object.y
            rgba = APPLE_RGBA if object.type is APPLE else BOMB_RGBA
            self.viewer.add_marker(type=GEOM_SPHERE,
                                   pos=np.asarray([x, y, 0.5]),
                                   rgba=rgba,
                                   size=np.asarray([0.5] * 3))

        self.viewer.render()
コード例 #4
0
ファイル: markers_demo.py プロジェクト: xinsongyan/mujoco-py
MODEL_XML = """
<?xml version="1.0" ?>
<mujoco>
    <worldbody>
        <body name="box" pos="0 0 0.2">
            <geom size="0.15 0.15 0.15" type="box"/>
            <joint axis="1 0 0" name="box:x" type="slide"/>
            <joint axis="0 1 0" name="box:y" type="slide"/>
        </body>
        <body name="floor" pos="0 0 0.025">
            <geom size="1.0 1.0 0.02" rgba="0 1 0 1" type="box"/>
        </body>
    </worldbody>
</mujoco>
"""

model = load_model_from_xml(MODEL_XML)
sim = MjSim(model)
viewer = MjViewer(sim)
step = 0
while True:
    t = time.time()
    x, y = math.cos(t), math.sin(t)
    viewer.add_marker(pos=np.array([x, y, 1]), label="t: " + str(t))
    viewer.render()

    step += 1
    if step > 100 and os.getenv('TESTING') is not None:
        break
コード例 #5
0
class PointEnvMDP(MDP):
    def __init__(self,
                 init_mean=(-0.2, -0.2),
                 control_cost=False,
                 dense_reward=False,
                 render=False):
        xml = os.path.join(
            os.path.expanduser("~"),
            "git-repos/dm_control/dm_control/suite/point_mass.xml")
        model = load_model_from_path(xml)
        self.sim = MjSim(model)
        self.render = render
        self.init_mean = init_mean
        self.control_cost = control_cost
        self.dense_reward = dense_reward

        if self.render: self.viewer = MjViewer(self.sim)

        # Config
        self.env_name = "Point-Mass-Environment"
        self.target_position = np.array([0., 0.])
        self.target_tolerance = 0.02
        self.init_noise = 0.05

        self._initialize_mujoco_state()
        self.init_state = self.get_state()

        print("Loaded {} with dense_reward={}".format(self.env_name,
                                                      self.dense_reward))

        MDP.__init__(self, [0, 1], self._transition_func, self._reward_func,
                     self.init_state)

    def _reward_func(self, state, action):
        self.next_state = self._step(action)
        if self.render: self.viewer.render()
        if self.dense_reward:
            reward = -np.linalg.norm(self.next_state.position -
                                     self.target_position)
        else:
            reward = +0. if self.next_state.is_terminal() else -1.
        control_cost = 0.1 * np.linalg.norm(action)
        if self.control_cost: reward = reward - control_cost
        return reward

    def _transition_func(self, state, action):
        return self.next_state

    def execute_agent_action(self, action, option_idx=None):
        if self.render and option_idx is not None:
            self.viewer.add_marker(pos=np.array([0, 0, 0.1]),
                                   size=0.001 * np.ones(3),
                                   label="Option {}".format(option_idx))
        reward, next_state = super(PointEnvMDP,
                                   self).execute_agent_action(action)
        return reward, next_state

    def is_goal_state(self, state):
        position = state.features()[:2] if isinstance(
            state, PointEnvState) else state[:2]
        return self.is_goal_position(position)

    def is_goal_position(self, position):
        distance = np.linalg.norm(position - self.target_position)
        return distance <= self.target_tolerance

    def get_state(self):

        # Individually indexing to prevent Mujoco from altering state history
        x, y = self.sim.data.qpos[0], self.sim.data.qpos[1]
        x_dot, y_dot = self.sim.data.qvel[0], self.sim.data.qvel[1]

        # Create State object from simulator state
        position = np.array([x, y])
        velocity = np.array([x_dot, y_dot])

        # State is terminal when it is the goal state
        done = self.is_goal_position(position)
        state = PointEnvState(position, velocity, done)

        return state

    def _step(self, action):
        self.sim.data.ctrl[:] = action
        self.sim.step()
        return self.get_state()

    def _initialize_mujoco_state(self):
        init_position = np.array(self.init_mean) + np.random.uniform(
            0., self.init_noise, 2)
        init_state = MjSimState(time=0.,
                                qpos=init_position,
                                qvel=np.array([0., 0.]),
                                act=None,
                                udd_state={})
        self.sim.set_state(init_state)

    @staticmethod
    def state_space_size():
        return 4

    @staticmethod
    def action_space_size():
        return 2

    @staticmethod
    def is_primitive_action(action):
        return -1. <= action.all() <= 1.

    def reset(self):
        self._initialize_mujoco_state()
        self.init_state = self.get_state()

        super(PointEnvMDP, self).reset()

    def __str__(self):
        return self.env_name
コード例 #6
0
class SimManager():
    """"""
    def __init__(self,
                 filepath,
                 blender_path=None,
                 visualize=False,
                 num_sims=1):
        self.filepath = filepath
        self.blender_path = blender_path
        self.visualize = visualize
        self.num_sims = num_sims

        self.base_model = load_model_from_path(filepath)

        if self.num_sims > 1:
            self.pool = MjRenderPool(self.base_model,
                                     n_workers=num_sims,
                                     modder=ArenaModder)
        else:
            self.sim = MjSim(self.base_model)
            self.arena_modder = ArenaModder(self.sim)
            if self.visualize:
                self.arena_modder.visualize = True
                self.viewer = MjViewer(self.sim)
                self.arena_modder.viewer = self.viewer
            else:
                self.viewer = None

    def forward(self):
        """
        Advances simulator a step (NECESSARY TO MAKE CAMERA AND LIGHT MODDING WORK)
        """
        if self.num_sims <= 1:
            self.sim.forward()
            if self.visualize:
                # Get angle of camera and display it
                quat = np.quaternion(*self.base_model.cam_quat[0])
                ypr = quaternion.as_euler_angles(quat) * 180 / np.pi
                cam_pos = self.base_model.cam_pos[0]
                #self.viewer.add_marker(pos=cam_pos, label="CAM: {}{}".format(cam_pos, ypr))
                self.viewer.add_marker(pos=cam_pos,
                                       label="CAM: {}".format(ypr))
                self.viewer.render()

    def _get_pool_data(self):
        IMAGE_NOISE_RVARIANCE = Range(0.0, 0.0001)

        cam_imgs, ground_truths = self.pool.render(640,
                                                   360,
                                                   camera_name='camera1',
                                                   randomize=True)
        ground_truths = list(ground_truths)
        cam_imgs = list(
            cam_imgs[:, ::-1, :, :])  # Rendered images are upside-down.

        for i in range(len(cam_imgs)):
            image_noise_variance = sample(IMAGE_NOISE_RVARIANCE)
            cam_imgs[i] = (skimage.util.random_noise(
                cam_imgs[i], mode='gaussian', var=image_noise_variance) *
                           255).astype(np.uint8)
            cam_imgs[i] = preproc_image(cam_imgs[i])

        return cam_imgs, ground_truths

    def _get_cam_frame(self, display=False, ground_truth=None):
        """Grab an image from the camera (224, 244, 3) to feed into CNN"""
        IMAGE_NOISE_RVARIANCE = Range(0.0, 0.0001)
        cam_img = self.sim.render(
            640, 360, camera_name='camera1'
        )[::-1, :, :]  # Rendered images are upside-down.
        image_noise_variance = sample(IMAGE_NOISE_RVARIANCE)
        cam_img = (skimage.util.random_noise(
            cam_img, mode='gaussian', var=image_noise_variance) * 255).astype(
                np.uint8)
        cam_img = preproc_image(cam_img)
        if display:
            label = str(ground_truth[3:6])
            display_image(cam_img, label)
        return cam_img

    def _get_ground_truth(self):
        return self.arena_modder.get_ground_truth()

    def get_data(self):
        if self.num_sims > 1:
            return self._get_pool_data()
        else:
            self.arena_modder.randomize()
            gt = self._get_ground_truth()
            self.sim.forward()
            cam = self._get_cam_frame()
            return cam, gt

    def randrocks(self):
        """Generate a new set of 3 random rock meshes using a Blender script"""
        if self.blender_path is None:
            raise Exception(
                'You must install Blender and include the path to its exectubale in the constructor to use this method'
            )

        import subprocess
        subprocess.call(
            [self.blender_path, "--background", "--python", "randrock.py"])
コード例 #7
0
ファイル: markers_demo.py プロジェクト: jeffkaufman/mujoco-py
MODEL_XML = """
<?xml version="1.0" ?>
<mujoco>
    <worldbody>
        <body name="box" pos="0 0 0.2">
            <geom size="0.15 0.15 0.15" type="box"/>
            <joint axis="1 0 0" name="box:x" type="slide"/>
            <joint axis="0 1 0" name="box:y" type="slide"/>
        </body>
        <body name="floor" pos="0 0 0.025">
            <geom size="1.0 1.0 0.02" rgba="0 1 0 1" type="box"/>
        </body>
    </worldbody>
</mujoco>
"""

model = load_model_from_xml(MODEL_XML)
sim = MjSim(model)
viewer = MjViewer(sim)
step = 0
while True:
    t = time.time()
    x, y = math.cos(t), math.sin(t)
    viewer.add_marker(pos=np.array([x, y, 1]),
                      label=str(t))
    viewer.render()

    step += 1
    if step > 100 and os.getenv('TESTING') is not None:
        break