Ejemplo n.º 1
0
def main():

    core_dir = "./environment"
    envs_dir = './environment/envs',
    xmls_dir = './environment/assets/xmls',

    examine_env("./environment/envs/icra.py", {},
                core_dir=core_dir, envs_dir=envs_dir, xmls_dir=xmls_dir,
                env_viewer=EnvViewer)
Ejemplo n.º 2
0
def main():

    core_dir = "./"
    envs_dir = './envs',
    xmls_dir = './assets/xmls',

    examine_env("./envs/base.py", {},
                core_dir=core_dir,
                envs_dir=envs_dir,
                xmls_dir=xmls_dir,
                env_viewer=EnvViewer)
Ejemplo n.º 3
0
def main(argv):
    '''
    examine.py is used to display environments

    Example uses:
        bin/examine.py simple_particle
        bin/examine.py examples/particle_gather.py
        bin/examine.py particle_gather n_food=5 floorsize=5
        bin/examine.py example_env_examine.jsonnet
    '''
    env_names, env_kwargs = parse_arguments(argv)
    assert len(
        env_names) == 1, 'You must provide exactly 1 environment to examine.'
    env_name = env_names[0]
    examine_env(env_name,
                env_kwargs,
                core_dir=worldgen_path(),
                envs_dir='examples',
                xmls_dir='xmls',
                env_viewer=EnvViewer)

    print(main.__doc__)
Ejemplo n.º 4
0
def main(argv):
    '''
    examine.py is used to display environments and run policies.

    For an example environment jsonnet, see
        mujoco-worldgen/examples/example_env_examine.jsonnet
    You can find saved policies and the in the 'examples' together with the environment they were
    trained in and the hyperparameters used. The naming used is 'examples/<env_name>.jsonnet' for
    the environment jsonnet file and 'examples/<env_name>.npz' for the policy weights file.
    Example uses:
        bin/examine.py hide_and_seek
        bin/examine.py mae_envs/envs/base.py
        bin/examine.py base n_boxes=6 n_ramps=2 n_agents=3
        bin/examine.py my_env_jsonnet.jsonnet
        bin/examine.py my_env_jsonnet.jsonnet my_policy.npz
        bin/examine.py hide_and_seek my_policy.npz n_hiders=3 n_seekers=2 n_boxes=8 n_ramps=1
    '''
    names, kwargs = parse_arguments(argv)

    env_name = names[0]
    core_dir = abspath(join(dirname(__file__), '..'))
    envs_dir = 'mae_envs/envs',
    xmls_dir = 'xmls',

    if len(names) == 1:  # examine the environment
        examine_env(env_name,
                    kwargs,
                    core_dir=core_dir,
                    envs_dir=envs_dir,
                    xmls_dir=xmls_dir,
                    env_viewer=EnvViewer)

    if len(names) >= 2:  # run policies on the environment
        # importing PolicyViewer and load_policy here because they depend on several
        # packages which are only needed for playing policies, not for any of the
        # environments code.
        from mae_envs.viewer.policy_viewer import PolicyViewer
        from ma_policy.load_policy import load_policy
        policy_names = names[1:]
        env, args_remaining_env = load_env(env_name,
                                           core_dir=core_dir,
                                           envs_dir=envs_dir,
                                           xmls_dir=xmls_dir,
                                           return_args_remaining=True,
                                           **kwargs)

        if isinstance(env.action_space, Tuple):
            env = JoinMultiAgentActions(env)
        if env is None:
            raise Exception(
                f'Could not find environment based on pattern {env_name}')

        env.reset()  # generate action and observation spaces
        assert np.all([name.endswith('.npz') for name in policy_names])
        policies = [
            load_policy(name, env=env, scope=f'policy_{i}')
            for i, name in enumerate(policy_names)
        ]

        args_remaining_policy = args_remaining_env

        if env is not None and policies is not None:
            args_to_pass, args_remaining_viewer = extract_matching_arguments(
                PolicyViewer, kwargs)
            args_remaining = set(args_remaining_env)
            args_remaining = args_remaining.intersection(
                set(args_remaining_policy))
            args_remaining = args_remaining.intersection(
                set(args_remaining_viewer))
            assert len(args_remaining) == 0, (
                f"There left unused arguments: {args_remaining}. There shouldn't be any."
            )
            viewer = PolicyViewer(env, policies, **args_to_pass)
            viewer.run()

    print(main.__doc__)
Ejemplo n.º 5
0
def main(args):
    '''
    examine.py is used to display environments and run policies.

    For an example environment jsonnet, see
        mujoco-worldgen/examples/example_env_examine.jsonnet
    You can find saved policies and the in the 'examples' together with the environment they were
    trained in and the hyperparameters used. The naming used is 'examples/<env_name>.jsonnet' for
    the environment jsonnet file and 'examples/<env_name>.npz' for the policy weights file.
    Example uses:
        bin/examine.py hide_and_seek
        bin/examine.py mae_envs/envs/base.py
        bin/examine.py base n_boxes=6 n_ramps=2 n_agents=3
        bin/examine.py my_env_jsonnet.jsonnet
        bin/examine.py my_env_jsonnet.jsonnet my_policy.npz
        bin/examine.py hide_and_seek my_policy.npz n_hiders=3 n_seekers=2 n_boxes=8 n_ramps=1
        bin/examine.py examples/hide_and_seek_quadrant.jsonnet examples/hide_and_seek_quadrant.npz
    '''
    #names, kwargs = parse_arguments(argv)
    parser = get_config()
    args = parse_args(args, parser)
    kwargs={'args': args}

    env_name = args.env_name
    num_hiders = args.num_hiders
    num_seekers = args.num_seekers
    num_agents = num_hiders + num_seekers
    core_dir = abspath(join(dirname(__file__)))
    envs_dir = 'envs/hns/envs'  # where hide_and_seek.py is.
    xmls_dir = 'xmls'

    if args.use_render:  # run policies on the environment
        # importing PolicyViewer and load_policy here because they depend on several
        # packages which are only needed for playing policies, not for any of the
        # environments code.
        from onpolicy.envs.hns.viewer.policy_viewer import PolicyViewer_hs      
        from onpolicy.envs.hns.ma_policy.load_policy import load_policy
        env, args_remaining_env = load_env(env_name, core_dir=core_dir,
                                           envs_dir=envs_dir, xmls_dir=xmls_dir,
                                           return_args_remaining=True, **kwargs)
        
        if isinstance(env.action_space, Tuple):
            env = JoinMultiAgentActions(env)
        if env is None:
            raise Exception(f'Could not find environment based on pattern {env_name}')
        
        env.reset()  # generate action and observation spaces
        
        policies = []
        for agent_id in range(num_agents):
            if args.share_policy:
                actor_critic = torch.load(str(args.model_dir) + "/agent_model.pt")['model']
            else:
                actor_critic = torch.load(str(args.model_dir) + "/agent" + str(agent_id) + "_model.pt")['model']
            policies.append(actor_critic)

        args_remaining_policy = args_remaining_env
        
        if env is not None and policies is not None:
            args_to_pass, args_remaining_viewer = extract_matching_arguments(PolicyViewer_hs, kwargs)
            args_remaining = set(args_remaining_env)
            args_remaining = args_remaining.intersection(set(args_remaining_policy))
            args_remaining = args_remaining.intersection(set(args_remaining_viewer))
            assert len(args_remaining) == 0, (
                f"There left unused arguments: {args_remaining}. There shouldn't be any.")
            viewer = PolicyViewer_hs(env, policies, **args_to_pass)
            viewer.run()
    else:
        # examine the environment
        examine_env(env_name, kwargs,
                    core_dir=core_dir, envs_dir=envs_dir, xmls_dir=xmls_dir,
                    env_viewer=EnvViewer)