def _init(): env_config = solo8v2vanilla.Solo8VanillaConfig() env = gym.make('solo8vanilla-v0', config=env_config, normalize_actions=False) env.obs_factory.register_observation(obs.TorsoIMU(env.robot)) env.obs_factory.register_observation(obs.MotorEncoder(env.robot)) env.termination_factory.register_termination( terms.TimeBasedTermination(length)) stand_reward = rewards.AdditiveReward() stand_reward.client = env.client stand_reward.add_term(0.5, rewards.FlatTorsoReward(env.robot)) stand_reward.add_term( 0.5, rewards.TorsoHeightReward(env.robot, quad_standing_height)) home_pos_reward = rewards.MultiplicitiveReward( 1, stand_reward, rewards.SmallControlReward(env.robot), rewards.HorizontalMoveSpeedReward(env.robot, 0)) home_pos_reward.client = env.client env.reward_factory.register_reward(1, home_pos_reward) return env
def test_init(self): robot_id = 5 hard_margin = 20 soft_margin = 12 r = rewards.FlatTorsoReward(robot_id, hard_margin, soft_margin) self.assertEqual(robot_id, r._robot_id) self.assertEqual(hard_margin, r._hard_margin) self.assertEqual(soft_margin, r._soft_margin)
def test_computation(self, name, theta, hard, soft, low, high): mock_client = mock.MagicMock() mock_client.getBasePositionAndOrientation.return_value = None, None mock_client.getEulerFromQuaternion.return_value = (theta / math.sqrt(2), theta / math.sqrt(2), None) r = rewards.FlatTorsoReward(1, hard, soft) r.client = mock_client val = r.compute() self.assertGreaterEqual(val, low) self.assertLessEqual(val, high)
def _init(): env_config = solo8v2vanilla.Solo8VanillaConfig() env = gym.make('solo8vanilla-v0', config=env_config, normalize_actions=False) env.obs_factory.register_observation(obs.TorsoIMU(env.robot)) env.obs_factory.register_observation(obs.MotorEncoder(env.robot)) env.termination_factory.register_termination( terms.TimeBasedTermination(length)) env.reward_factory.register_reward( .2, rewards.SmallControlReward(env.robot)) env.reward_factory.register_reward( .2, rewards.HorizontalMoveSpeedReward(env.robot, 0)) env.reward_factory.register_reward(.3, rewards.FlatTorsoReward(env.robot)) env.reward_factory.register_reward( .3, rewards.TorsoHeightReward(env.robot, quad_standing_height)) return env
def _init(): env_config = solo8v2vanilla.Solo8VanillaConfig() env_config.max_motor_rotation = max_motor_rot env = gym.make('solo8vanilla-v0', config=env_config, normalize_actions=True, normalize_observations=True) env.obs_factory.register_observation(obs.TorsoIMU(env.robot)) env.obs_factory.register_observation( obs.MotorEncoder(env.robot, max_rotation=max_motor_rot)) env.termination_factory.register_termination( terms.TimeBasedTermination(length)) stand_reward = rewards.AdditiveReward() stand_reward.client = env.client stand_reward.add_term( 0.5, rewards.FlatTorsoReward(env.robot, hard_margin=fhm, soft_margin=fsm)) stand_reward.add_term( 0.5, rewards.TorsoHeightReward(env.robot, stand_height, hard_margin=thm, soft_margin=tsm)) home_pos_reward = rewards.MultiplicitiveReward( 1, stand_reward, rewards.SmallControlReward(env.robot, margin=scm), rewards.HorizontalMoveSpeedReward(env.robot, 0, hard_margin=hmhm, soft_margin=hmsm)) env.reward_factory.register_reward(1, home_pos_reward) return env
from gym_solo.envs import solo8v2vanilla from gym_solo.core import obs from gym_solo.core import rewards from gym_solo.core import termination as terms if __name__ == '__main__': config = solo8v2vanilla.Solo8VanillaConfig() env: solo8v2vanilla.Solo8VanillaEnv = gym.make('solo8vanilla-v0', use_gui=True, realtime=True, config=config) env.obs_factory.register_observation(obs.TorsoIMU(env.robot)) env.termination_factory.register_termination(terms.PerpetualTermination()) flat = rewards.FlatTorsoReward(env.robot, hard_margin=.1, soft_margin=np.pi) height = rewards.TorsoHeightReward(env.robot, 0.33698, 0.025, 0.15) small_control = rewards.SmallControlReward(env.robot, margin=10) no_move = rewards.HorizontalMoveSpeedReward(env.robot, 0, hard_margin=.5, soft_margin=3) stand = rewards.AdditiveReward() stand.client = env.client stand.add_term(0.5, flat) stand.add_term(0.5, height) home_pos = rewards.MultiplicitiveReward(1, stand, small_control, no_move) env.reward_factory.register_reward(1, home_pos) joint_params = []