Exemplo n.º 1
0
    def reset(self):
        # use the initialization strategy
        if self._internal_env is None:
            if self.test_mode:
                init_strat = InitializationStrategy.START
            else:
                init_strat = InitializationStrategy.RANDOM
            self._internal_env = PyBulletDeepMimicEnv(
                self._arg_parser,
                self._renders,
                time_step=self._time_step,
                init_strategy=init_strat)

        self._internal_env.reset()
        self._p = self._internal_env._pybullet_client
        agent_id = self.agent_id  # unused here
        self._state_offset = self._internal_env.build_state_offset(
            self.agent_id)
        self._state_scale = self._internal_env.build_state_scale(self.agent_id)
        self._action_offset = self._internal_env.build_action_offset(
            self.agent_id)
        self._action_scale = self._internal_env.build_action_scale(
            self.agent_id)
        self._numSteps = 0
        # Record state
        self.state = self._internal_env.record_state(agent_id)

        # return state as ndarray
        state = np.array(self.state)
        if self._rescale_observations:
            mean = -self._state_offset
            std = 1. / self._state_scale
            state = (state - mean) / (std + 1e-8)
        return state
Exemplo n.º 2
0
def build_world(args, enable_draw):
    arg_parser = build_arg_parser(args)
    print("enable_draw=", enable_draw)
    env = PyBulletDeepMimicEnv(args, enable_draw)
    world = RLWorld(env, arg_parser)
    #world.env.set_playback_speed(playback_speed)

    motion_file = arg_parser.parse_string("motion_file")
    print("motion_file=", motion_file)
    bodies = arg_parser.parse_ints("fall_contact_bodies")
    print("bodies=", bodies)
    int_output_path = arg_parser.parse_string("int_output_path")
    print("int_output_path=", int_output_path)
    agent_files = pybullet_data.getDataPath() + "/" + arg_parser.parse_string(
        "agent_files")

    AGENT_TYPE_KEY = "AgentType"

    print("agent_file=", agent_files)
    with open(agent_files) as data_file:
        json_data = json.load(data_file)
        print("json_data=", json_data)
        assert AGENT_TYPE_KEY in json_data
        agent_type = json_data[AGENT_TYPE_KEY]
        print("agent_type=", agent_type)
        agent = PPOAgent(world, id, json_data)

        agent.set_enable_training(True)
        world.reset()
    return world
Exemplo n.º 3
0
 def reset(self):
     #    print("-----------reset simulation---------------")
     if self._internal_env == None:
         self._internal_env = PyBulletDeepMimicEnv(self._arg_parser,
                                                   self._renders)
     self._internal_env.reset()
     self._p = self._internal_env._pybullet_client
     agent_id = -1  #unused here
     state = self._internal_env.record_state(agent_id)
     return state
def build_world(enable_draw, arg_file):
    arg_parser = ArgParser()

    arg_parser.load_file(arg_file)
    arg_parser.parse_string("motion_file")

    env = PyBulletDeepMimicEnv(arg_parser=arg_parser, enable_draw=enable_draw)

    world = RLWorld(env, arg_parser)

    agent_data = load_agent_data(agent_file)

    PPOAgent(world=world, id=id, json_data=agent_data)

    return world