def _init():
        env_config = solo8v2vanilla.Solo8VanillaConfig()
        env = gym.make('solo8vanilla-v0',
                       config=env_config,
                       normalize_actions=False)

        env.obs_factory.register_observation(obs.TorsoIMU(env.robot))
        env.obs_factory.register_observation(obs.MotorEncoder(env.robot))
        env.termination_factory.register_termination(
            terms.TimeBasedTermination(length))

        stand_reward = rewards.AdditiveReward()
        stand_reward.client = env.client

        stand_reward.add_term(0.5, rewards.FlatTorsoReward(env.robot))
        stand_reward.add_term(
            0.5, rewards.TorsoHeightReward(env.robot, quad_standing_height))

        home_pos_reward = rewards.MultiplicitiveReward(
            1, stand_reward, rewards.SmallControlReward(env.robot),
            rewards.HorizontalMoveSpeedReward(env.robot, 0))
        home_pos_reward.client = env.client

        env.reward_factory.register_reward(1, home_pos_reward)

        return env
Exemplo n.º 2
0
    def test_computation(self, name, speed, target, hard, soft, low, high):
        mock_client = mock.MagicMock()
        mock_client.getBaseVelocity.return_value = (speed / math.sqrt(2),
                                                    speed / math.sqrt(2),
                                                    None), None
        r = rewards.HorizontalMoveSpeedReward(1, target, hard, soft)
        r.client = mock_client

        val = r.compute()

        self.assertGreaterEqual(val, low)
        self.assertLessEqual(val, high)
Exemplo n.º 3
0
    def test_init(self):
        robot_id = 5
        target_speed = 0
        hard_margin = 20
        soft_margin = 12

        r = rewards.HorizontalMoveSpeedReward(robot_id, target_speed,
                                              hard_margin, soft_margin)

        self.assertEqual(robot_id, r._robot_id)
        self.assertEqual(target_speed, r._target_speed)
        self.assertEqual(hard_margin, r._hard_margin)
        self.assertEqual(soft_margin, r._soft_margin)
Exemplo n.º 4
0
    def _init():
        env_config = solo8v2vanilla.Solo8VanillaConfig()
        env = gym.make('solo8vanilla-v0',
                       config=env_config,
                       normalize_actions=False)

        env.obs_factory.register_observation(obs.TorsoIMU(env.robot))
        env.obs_factory.register_observation(obs.MotorEncoder(env.robot))
        env.termination_factory.register_termination(
            terms.TimeBasedTermination(length))

        env.reward_factory.register_reward(
            .2, rewards.SmallControlReward(env.robot))
        env.reward_factory.register_reward(
            .2, rewards.HorizontalMoveSpeedReward(env.robot, 0))
        env.reward_factory.register_reward(.3,
                                           rewards.FlatTorsoReward(env.robot))
        env.reward_factory.register_reward(
            .3, rewards.TorsoHeightReward(env.robot, quad_standing_height))

        return env
Exemplo n.º 5
0
    def _init():
        env_config = solo8v2vanilla.Solo8VanillaConfig()
        env_config.max_motor_rotation = max_motor_rot
        env = gym.make('solo8vanilla-v0',
                       config=env_config,
                       normalize_actions=True,
                       normalize_observations=True)

        env.obs_factory.register_observation(obs.TorsoIMU(env.robot))
        env.obs_factory.register_observation(
            obs.MotorEncoder(env.robot, max_rotation=max_motor_rot))
        env.termination_factory.register_termination(
            terms.TimeBasedTermination(length))

        stand_reward = rewards.AdditiveReward()
        stand_reward.client = env.client

        stand_reward.add_term(
            0.5,
            rewards.FlatTorsoReward(env.robot,
                                    hard_margin=fhm,
                                    soft_margin=fsm))
        stand_reward.add_term(
            0.5,
            rewards.TorsoHeightReward(env.robot,
                                      stand_height,
                                      hard_margin=thm,
                                      soft_margin=tsm))

        home_pos_reward = rewards.MultiplicitiveReward(
            1, stand_reward, rewards.SmallControlReward(env.robot, margin=scm),
            rewards.HorizontalMoveSpeedReward(env.robot,
                                              0,
                                              hard_margin=hmhm,
                                              soft_margin=hmsm))

        env.reward_factory.register_reward(1, home_pos_reward)
        return env
Exemplo n.º 6
0
from gym_solo.core import termination as terms


if __name__ == '__main__':
  config = solo8v2vanilla.Solo8VanillaConfig()
  env: solo8v2vanilla.Solo8VanillaEnv = gym.make('solo8vanilla-v0', use_gui=True, 
                                                 realtime=True, config=config)

  env.obs_factory.register_observation(obs.TorsoIMU(env.robot))
  env.termination_factory.register_termination(terms.PerpetualTermination())

  flat = rewards.FlatTorsoReward(env.robot, hard_margin=.1, soft_margin=np.pi)
  height = rewards.TorsoHeightReward(env.robot, 0.33698, 0.025, 0.15)
  
  small_control = rewards.SmallControlReward(env.robot, margin=10)
  no_move = rewards.HorizontalMoveSpeedReward(env.robot, 0, hard_margin=.5, 
                                              soft_margin=3)
  
  stand = rewards.AdditiveReward()
  stand.client = env.client
  stand.add_term(0.5, flat)
  stand.add_term(0.5, height)

  home_pos = rewards.MultiplicitiveReward(1, stand, small_control, no_move)
  env.reward_factory.register_reward(1, home_pos)

  joint_params = []
  num_joints = env.client.getNumJoints(env.robot)

  for joint in range(num_joints):
    joint_params.append(env.client.addUserDebugParameter(
      'Joint {}'.format(