示例#1
0
class ActionGiver():
    def __init__(self, args):
        arg_parser = build_arg_parser(args)
        self.world = RLWorld(arg_parser)

    def get_ac(self, s, g):
        return self.world.get_action(s, g)
示例#2
0
def build_world(args, enable_draw, playback_speed=1):
    arg_parser = build_arg_parser(args)
    # build env: core, seed
    env = DeepMimicEnv(args, enable_draw)
    # build world for agent
    world = RLWorld(env, arg_parser)
    world.env.set_playback_speed(playback_speed)
    return world
示例#3
0
def build_world(args, enable_draw, playback_speed=1):
    arg_parser = build_arg_parser(args)
    env = DeepMimicEnv(args, enable_draw)
    world = RLWorld(env, arg_parser)
    world.env.set_playback_speed(playback_speed)
    return world
示例#4
0
 def __init__(self, args):
     arg_parser = build_arg_parser(args)
     self.world = RLWorld(arg_parser)
示例#5
0
    enable_draw = True
    timestep = 1. / 240.

    args = sys.argv[1:]

    arg_parser = ArgParser()
    arg_parser.load_args(args)
    arg_file = arg_parser.parse_string('arg_file',
                                       "run_humanoid3d_spinkick_args.txt")
    arg_parser.load_file(pybullet_data.getDataPath() + "/args/" + arg_file)

    motion_file = arg_parser.parse_strings('motion_file')
    fall_contact_bodies = arg_parser.parse_ints("fall_contact_bodies")

    env = PyBulletDeepMimicEnv(motion_file, enable_draw, fall_contact_bodies)
    world = RLWorld(env, arg_parser)

    world.reset()

    total_reward = 0
    steps = 0

    while True:

        world.update(timestep)
        total_reward += world.env.calc_reward(agent_id=0)

        steps += 1

        if world.env.is_episode_end() or steps >= 1000:
            total_reward = 0