예제 #1
0
def mt50_usage():
    profile = {}
    for env_cls in ALL_ENVIRONMENTS.values():
        target = (build_and_step, [env_cls], {})
        memory_usage = memory_profiler.memory_usage(target)
        profile[env_cls] = max(memory_usage)

    return profile
예제 #2
0
from gym.spaces import Discrete
import pytest
import numpy as np

from tests.helpers import step_env

from metaworld.envs.mujoco.env_dict import ALL_ENVIRONMENTS, _hard_mode_args_kwargs


@pytest.fixture(scope='module', params=list(ALL_ENVIRONMENTS.keys()))
def env(request):
    env_cls = ALL_ENVIRONMENTS[request.param]
    env_args_kwargs = _hard_mode_args_kwargs(env_cls, request.param)
    env_args = env_args_kwargs['args']
    env_kwargs = env_args_kwargs['kwargs']
    del env_kwargs['task_id']
    env = env_cls(*env_args, **env_kwargs)

    yield env

    # clean-up
    env.close()


def test_all_envs_step(env):
    step_env(env, max_path_length=10)


def test_obs_type(env):
    o = env.reset()
    o_g = env._get_obs()
예제 #3
0
def test_avg_memory_usage():
    # average usage no greater than 60MB/env
    target = (build_and_step_all, [ALL_ENVIRONMENTS.values()], {})
    usage = memory_profiler.memory_usage(target)
    average = max(usage) / len(ALL_ENVIRONMENTS)
    assert average < 60
예제 #4
0
        envs += [env]


@pytest.fixture(scope='module')
def mt50_usage():
    profile = {}
    for env_cls in ALL_ENVIRONMENTS.values():
        target = (build_and_step, [env_cls], {})
        memory_usage = memory_profiler.memory_usage(target)
        profile[env_cls] = max(memory_usage)

    return profile


@pytest.mark.skip
@pytest.mark.parametrize('env_cls', ALL_ENVIRONMENTS.values())
def test_max_memory_usage(env_cls, mt50_usage):
    # No env should use more  than 250MB
    #
    # Note: this is quite a bit higher than the average usage cap, because
    # loading a single environment incurs a fixed memory overhead which can't
    # be shared among environment in the same process
    assert mt50_usage[env_cls] < 250


@pytest.mark.skip
def test_avg_memory_usage():
    # average usage no greater than 60MB/env
    target = (build_and_step_all, [ALL_ENVIRONMENTS.values()], {})
    usage = memory_profiler.memory_usage(target)
    average = max(usage) / len(ALL_ENVIRONMENTS)
예제 #5
0
def test_static_task_ids(task_name):
    env = ML45.from_task(task_name)
    assert env.active_task == list(ALL_ENVIRONMENTS.keys()).index(task_name)
예제 #6
0
import pytest
import numpy as np

from metaworld.benchmarks import ML10, ML45
from metaworld.envs.mujoco.env_dict import HARD_MODE_CLS_DICT, MEDIUM_MODE_CLS_DICT, MEDIUM_MODE_ARGS_KWARGS, ALL_ENVIRONMENTS
from metaworld.envs.mujoco.multitask_env import MultiClassMultiTaskEnv
from metaworld.envs.mujoco.sawyer_xyz import SawyerReachPushPickPlaceEnv
from metaworld.envs.mujoco.sawyer_xyz import SawyerReachPushPickPlaceWallEnv


HARD_MODE_LIST = (list(HARD_MODE_CLS_DICT['train'].values()) +
                  list(HARD_MODE_CLS_DICT['test'].values()))


@pytest.mark.parametrize('env_cls', ALL_ENVIRONMENTS.values())
def test_single_env_multi_goals_discrete(env_cls):
    env_cls_dict = {'wrapped': env_cls}
    env_args_kwargs = {'wrapped': dict(args=[], kwargs={'task_id' : 1})}
    multi_task_env = MultiClassMultiTaskEnv(
        task_env_cls_dict=env_cls_dict,
        task_args_kwargs=env_args_kwargs,
        sample_goals=True,
        obs_type='with_goal_id'
    )
    goals = multi_task_env.active_env.sample_goals_(2)
    assert len(goals) == 2
    goals_dict = {'wrapped': goals}
    multi_task_env.discretize_goal_space(goals_dict)

    assert multi_task_env._fully_discretized
    tasks_with_goals = multi_task_env.sample_tasks(2)
예제 #7
0
        assert multi_task_env.observation_space.shape == (9, )
    elif observation_type == 'with_goal_id':
        assert multi_task_env.observation_space.shape == (59, )
    elif observation_type == 'with_goal_and_id':
        assert multi_task_env.observation_space.shape == (62, )
    elif observation_type == 'with_goal':
        assert multi_task_env.observation_space.shape == (12, )


def test_action_space():
    env_cls_dict = {
        'pick-place-v1': MEDIUM_MODE_CLS_DICT['train']['pick-place-v1']
    }
    env_args_kwargs = {
        key: MEDIUM_MODE_ARGS_KWARGS['train'][key]
        for key in env_cls_dict.keys()
    }
    multi_task_env = MultiClassMultiTaskEnv(
        task_env_cls_dict=env_cls_dict,
        task_args_kwargs=env_args_kwargs,
        sample_goals=True,
        obs_type="plain",
    )
    assert multi_task_env.action_space.shape == (4, )


@pytest.mark.parametrize('task_name', list(ALL_ENVIRONMENTS.keys())[:10])
def test_static_task_ids(task_name):
    env = ML45.from_task(task_name)
    assert env.active_task == list(ALL_ENVIRONMENTS.keys()).index(task_name)