コード例 #1
0
    def step(self, a):
        pos_before = mass_center(self.model, self.sim)
        self.do_simulation(a, self.frame_skip)
        pos_after = mass_center(self.model, self.sim)

        pos_after_standup = self.sim.data.qpos[2]

        down = bool((pos_after_standup < 1.0) or (pos_after_standup > 2.0))

        alive_bonus = 5.0 if not down else 1.0

        data = self.sim.data

        uph_cost = (pos_after_standup - 0) / self.model.opt.timestep
        lin_vel_cost = 0.25 * (pos_after -
                               pos_before) / self.model.opt.timestep

        quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
        quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
        quad_impact_cost = min(quad_impact_cost, 10)

        reward = lin_vel_cost + uph_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
        done = bool(False)
        return self._get_obs(), reward, done, dict(
            reward_linup=uph_cost,
            reward_quadctrl=-quad_ctrl_cost,
            reward_impact=-quad_impact_cost,
            reward_alive=alive_bonus)
コード例 #2
0
    def _step(self, a):
        pos_before = mass_center(self.model)
        self.do_simulation(a, self.frame_skip)
        pos_after = mass_center(self.model)

        pos_after_standup =  self.model.data.qpos[2][0]

        down = bool(( self.model.data.qpos[2] < 1.0) or ( self.model.data.qpos[2] > 2.0))

        alive_bonus = 5.0 if not down else 1.0

        data = self.model.data

        uph_cost = (pos_after_standup - 0) / self.model.opt.timestep
        lin_vel_cost = 0.25 * (pos_after - pos_before) / self.model.opt.timestep

        quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
        quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
        quad_impact_cost = min(quad_impact_cost, 10)

        reward = lin_vel_cost + uph_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
        qpos = self.model.data.qpos

        done = bool(False)
        return self._get_obs(), reward, done, dict(reward_linup=uph_cost, reward_quadctrl=-quad_ctrl_cost, reward_impact=-quad_impact_cost, reward_alive=alive_bonus)
コード例 #3
0
    def step(self, a):
        # a = self._add_noise(a)
        ob, reward, done, info = super().step(a)
        mass_center_after = mass_center(self.model, self.sim)

        if mass_center_after > self._base_pos:
            self._base_pos += 1.
            sparse_rew = 1
        else:
            sparse_rew = 0

        info['original_rew'] = reward
        return ob, sparse_rew, done, info
コード例 #4
0
ファイル: humanoidstandup.py プロジェクト: zyyjjj/gym
    def step(self, a):
        pos_before = self.sim.data.qpos[2]
        self.do_simulation(a, self.frame_skip)
        pos_after = self.sim.data.qpos[2]
        data = self.sim.data
        uph_cost = (pos_after - 0) / self.model.opt.timestep
        up_vel = (pos_after - pos_before) / self.model.opt.timestep

        quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
        quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
        quad_impact_cost = min(quad_impact_cost, 10)
        reward = uph_cost - quad_ctrl_cost - quad_impact_cost + 1
        #print(reward)
        subtask_1_reward = mass_center(self.model, self.sim)
        #print('subtask_1_reward', mass_center(self.model, self.sim))
        subtask_2_reward = up_vel

        done = bool(False)
        return self._get_obs(), reward, done, \
            dict(reward_linup=uph_cost,
                 reward_quadctrl=-quad_ctrl_cost,
                 reward_impact=-quad_impact_cost,
                 subtask_1 = subtask_1_reward,
                 subtask_2 = subtask_2_reward)
コード例 #5
0
ファイル: humanoid.py プロジェクト: roosephu/boots
 def _get_obs(self):
     data = self.sim.data
     center = mass_center(self.model, self.sim)
     obs = super()._get_obs()
     return np.concatenate([obs, [center]])