コード例 #1
0
    def __init__(self, visualize, integrator_accuracy, full=False, action_repeat=5, fail_reward=-0.2, 
                 exclude_centering_frame=False):
        """
        Initialize the environment:
        Parameters:
        - full: uses as observation vector the full observation vector
        - skipFrame : How many frame to skip every action
        - exclude_centering_frame: put or not the pelvis x in obs vector (obs are centered wrt pelvis x)
        """
        env = L2RunEnv(visualize=visualize)
        env.osim_model.set_integrator_accuracy(integrator_accuracy)
        gym.Wrapper.__init__(self, env)
        env.reset()
        self.integrator_accuracy = integrator_accuracy
        self.visualize = visualize
        self.full = full
        self.env = env
        self.action_repeat = action_repeat
        self.fail_reward = fail_reward
        self.exclude_centering_frame = exclude_centering_frame
        self.env_step = 0
        if self.full:
            self.get_observation = self.get_observation_full
        else:
            self.get_observation = self.get_observation_basic

        self.observation_space = ( [0] * self.get_observation_space_size(), [0] * self.get_observation_space_size() )
        self.observation_space = convert_to_gym(self.observation_space)
コード例 #2
0
ファイル: opensim_envs.py プロジェクト: jimimvp/torch_rl
    def __init__(self, visualize=True, integrator_accuracy=5e-5):
        self.osim_model = OsimModel(self.model_path,
                                    visualize,
                                    integrator_accuracy=integrator_accuracy)

        # Create specs, action and observation spaces mocks for compatibility with OpenAI gym
        self.spec = Spec()
        self.spec.timestep_limit = self.time_limit

        if not self.action_space:
            self.action_space = ([0.0] *
                                 self.osim_model.get_action_space_size(),
                                 [1.0] *
                                 self.osim_model.get_action_space_size())
        if not self.observation_space:
            #            self.observation_space = ( [-math.pi*100] * self.get_observation_space_size(), [math.pi*100] * self.get_observation_space_s
            self.observation_space = ([0] * self.get_observation_space_size(),
                                      [0] * self.get_observation_space_size())
        self.action_space = convert_to_gym(self.action_space)
        self.observation_space = convert_to_gym(self.observation_space)
コード例 #3
0
ファイル: train.ddpg.py プロジェクト: csy888000/osim-rl-csy
def gymify_osim_env(env):
    env.action_space = ([-1.0] * env.osim_model.get_action_space_size(),
                        [1.0] * env.osim_model.get_action_space_size())
    env.action_space = convert_to_gym(env.action_space)

    env._step = env.step

    def step(self, action):
        return self._step(action * 2 - 1)

    env.step = types.MethodType(step, env)
    return env
コード例 #4
0
def make_osim_env(env):
    env.action_space = ([-1.0] * env.osim_model.get_action_space_size(),
                        [1.0] * env.osim_model.get_action_space_size())
    env.action_space = convert_to_gym(env.action_space)

    env._step = env.step

    def step(self, action):
        #print("ACTION SPACE: {}".format(action))
        return self._step(action * 2 - 1)

    env.step = types.MethodType(step, env)
    return env
コード例 #5
0
ファイル: run.py プロジェクト: toanngosy/robustprosthetics
# Examine the action space ##
action_size = env.action_space.shape[0]
print('Size of each action:', action_size)
action_low = env.action_space.low
print('Action low:', action_low)
action_high = env.action_space.high
print('Action high: ', action_high)

# Examine the state space ##
state_size = env.observation_space.shape[0]
print('Size of state:', state_size)

# Redefine action_space to -1/1 (sac implementation needs a symmetric action space) #
env.action_space = ([-1.0] * env.get_action_space_size(),
                    [1.0] * env.get_action_space_size())
env.action_space = convert_to_gym(env.action_space)


# Set observation space to a length of 43 (43 values are returned, but the initial environnement
# returns a observation space size of 41)
def new_get_observation_space_size():
    return 43


env.get_observation_space_size = new_get_observation_space_size

env.observation_space = ([0] * env.get_observation_space_size(),
                         [0] * env.get_observation_space_size())
env.observation_space = convert_to_gym(env.observation_space)

# Create log dir for callback model saving
コード例 #6
0
obs_body_space[:, [
    20 + x for x in list(range(0, 33, 3)) + list(range(44, 77, 3))
]] = np.array([[
    0, 3
]]).transpose()  # (r, l) muscle forces, normalized to maximum isometric force
obs_body_space[:, [
    21 + x for x in list(range(0, 33, 3)) + list(range(44, 77, 3))
]] = np.array(
    [[0,
      3]]).transpose()  # (r, l) muscle lengths, normalized to optimal length
obs_body_space[:, [
    22 + x for x in list(range(0, 33, 3)) + list(range(44, 77, 3))
]] = np.array([[-50, 50]]).transpose(
)  # (r, l) muscle velocities, normalized to optimal length per second
observation_space = np.concatenate((obs_vtgt_space, obs_body_space), axis=1)
observation_space = convert_to_gym(observation_space)


def vtgt_field_to_single_vec(field):
    return np.array(field)[:, 5, 5]


def obs_dict_to_list(obs_dict):
    LENGTH0 = 1.

    # Augmented environment from the L2R challenge
    res = []

    # target velocity field (in body frame)
    v_tgt = np.ndarray.flatten(obs_dict['v_tgt_field'])
    res += v_tgt.tolist()