def test_commandline_args(mock_file): # No args raises with pytest.raises(SystemExit): parse_command_line([]) # Test with defaults opt = parse_command_line(["mytrainerpath"]) assert opt.trainer_config == {} assert opt.env_path is None assert opt.curriculum_config is None assert opt.sampler_config is None assert opt.keep_checkpoints == 5 assert opt.lesson == 0 assert opt.resume is False assert opt.inference is False assert opt.run_id == "ppo" assert opt.save_freq == 50000 assert opt.seed == -1 assert opt.base_port == 5005 assert opt.num_envs == 1 assert opt.no_graphics is False assert opt.debug is False assert opt.env_args is None full_args = [ "mytrainerpath", "--env=./myenvfile", "--curriculum=./mycurriculum", "--sampler=./mysample", "--keep-checkpoints=42", "--lesson=3", "--resume", "--inference", "--run-id=myawesomerun", "--save-freq=123456", "--seed=7890", "--train", "--base-port=4004", "--num-envs=2", "--no-graphics", "--debug", ] opt = parse_command_line(full_args) assert opt.trainer_config == {} assert opt.env_path == "./myenvfile" assert opt.curriculum_config == {} assert opt.sampler_config == {} assert opt.keep_checkpoints == 42 assert opt.lesson == 3 assert opt.run_id == "myawesomerun" assert opt.save_freq == 123456 assert opt.seed == 7890 assert opt.base_port == 4004 assert opt.num_envs == 2 assert opt.no_graphics is True assert opt.debug is True assert opt.inference is True assert opt.resume is True
def test_commandline_args(mock_file): # No args raises # with pytest.raises(SystemExit): # parse_command_line([]) # Test with defaults opt = parse_command_line(["mytrainerpath"]) assert opt.behaviors == {} assert opt.env_settings.env_path is None assert opt.checkpoint_settings.resume is False assert opt.checkpoint_settings.inference is False assert opt.checkpoint_settings.run_id == "ppo" assert opt.checkpoint_settings.initialize_from is None assert opt.env_settings.seed == -1 assert opt.env_settings.base_port == 5005 assert opt.env_settings.num_envs == 1 assert opt.env_settings.num_areas == 1 assert opt.engine_settings.no_graphics is False assert opt.debug is False assert opt.env_settings.env_args is None full_args = [ "mytrainerpath", "--env=./myenvfile", "--inference", "--run-id=myawesomerun", "--seed=7890", "--train", "--base-port=4004", "--initialize-from=testdir", "--num-envs=2", "--num-areas=2", "--no-graphics", "--debug", ] opt = parse_command_line(full_args) assert opt.behaviors == {} assert opt.env_settings.env_path == "./myenvfile" assert opt.checkpoint_settings.run_id == "myawesomerun" assert opt.checkpoint_settings.initialize_from == "testdir" assert opt.env_settings.seed == 7890 assert opt.env_settings.base_port == 4004 assert opt.env_settings.num_envs == 2 assert opt.env_settings.num_areas == 2 assert opt.engine_settings.no_graphics is True assert opt.debug is True assert opt.checkpoint_settings.inference is True assert opt.checkpoint_settings.resume is False # ignore init if resume is set full_args.append("--resume") opt = parse_command_line(full_args) assert opt.checkpoint_settings.initialize_from is None # ignore init if resume set assert opt.checkpoint_settings.resume is True
def test_commandline_args(mock_file): # No args raises # with pytest.raises(SystemExit): # parse_command_line([]) # Test with defaults opt = parse_command_line(["mytrainerpath"]) assert opt.behaviors == {} assert opt.env_settings.env_path is None assert opt.parameter_randomization is None assert opt.checkpoint_settings.lesson == 0 assert opt.checkpoint_settings.resume is False assert opt.checkpoint_settings.inference is False assert opt.checkpoint_settings.run_id == "ppo" assert opt.checkpoint_settings.save_freq == 50000 assert opt.env_settings.seed == -1 assert opt.env_settings.base_port == 5005 assert opt.env_settings.num_envs == 1 assert opt.engine_settings.no_graphics is False assert opt.debug is False assert opt.env_settings.env_args is None full_args = [ "mytrainerpath", "--env=./myenvfile", "--lesson=3", "--resume", "--inference", "--run-id=myawesomerun", "--save-freq=123456", "--seed=7890", "--train", "--base-port=4004", "--num-envs=2", "--no-graphics", "--debug", ] opt = parse_command_line(full_args) assert opt.behaviors == {} assert opt.env_settings.env_path == "./myenvfile" assert opt.parameter_randomization is None assert opt.checkpoint_settings.lesson == 3 assert opt.checkpoint_settings.run_id == "myawesomerun" assert opt.checkpoint_settings.save_freq == 123456 assert opt.env_settings.seed == 7890 assert opt.env_settings.base_port == 4004 assert opt.env_settings.num_envs == 2 assert opt.engine_settings.no_graphics is True assert opt.debug is True assert opt.checkpoint_settings.inference is True assert opt.checkpoint_settings.resume is True
def test_yaml_args(mock_file): # Test with opts loaded from YAML DetectDefault.non_default_args.clear() opt = parse_command_line(["mytrainerpath"]) assert opt.behaviors == {} assert opt.env_path == "./oldenvfile" assert opt.parameter_randomization is None assert opt.keep_checkpoints == 34 assert opt.lesson == 2 assert opt.run_id == "uselessrun" assert opt.save_freq == 654321 assert opt.seed == 9870 assert opt.base_port == 4001 assert opt.num_envs == 4 assert opt.no_graphics is False assert opt.debug is False assert opt.env_args is None # Test that CLI overrides YAML full_args = [ "mytrainerpath", "--env=./myenvfile", "--keep-checkpoints=42", "--lesson=3", "--resume", "--inference", "--run-id=myawesomerun", "--save-freq=123456", "--seed=7890", "--train", "--base-port=4004", "--num-envs=2", "--no-graphics", "--debug", ] opt = parse_command_line(full_args) assert opt.behaviors == {} assert opt.env_path == "./myenvfile" assert opt.parameter_randomization is None assert opt.keep_checkpoints == 42 assert opt.lesson == 3 assert opt.run_id == "myawesomerun" assert opt.save_freq == 123456 assert opt.seed == 7890 assert opt.base_port == 4004 assert opt.num_envs == 2 assert opt.no_graphics is True assert opt.debug is True assert opt.inference is True assert opt.resume is True
def test_yaml_args(mock_file): # Test with opts loaded from YAML DetectDefault.non_default_args.clear() opt = parse_command_line(["mytrainerpath"]) assert opt.behaviors == {} assert opt.env_settings.env_path == "./oldenvfile" assert opt.checkpoint_settings.run_id == "uselessrun" assert opt.checkpoint_settings.initialize_from == "notuselessrun" assert opt.env_settings.seed == 9870 assert opt.env_settings.base_port == 4001 assert opt.env_settings.num_envs == 4 assert opt.env_settings.num_areas == 4 assert opt.engine_settings.no_graphics is False assert opt.debug is False assert opt.env_settings.env_args is None # Test that CLI overrides YAML full_args = [ "mytrainerpath", "--env=./myenvfile", "--resume", "--inference", "--run-id=myawesomerun", "--seed=7890", "--train", "--base-port=4004", "--num-envs=2", "--num-areas=2", "--no-graphics", "--debug", "--results-dir=myresults", ] opt = parse_command_line(full_args) assert opt.behaviors == {} assert opt.env_settings.env_path == "./myenvfile" assert opt.checkpoint_settings.run_id == "myawesomerun" assert opt.env_settings.seed == 7890 assert opt.env_settings.base_port == 4004 assert opt.env_settings.num_envs == 2 assert opt.env_settings.num_areas == 2 assert opt.engine_settings.no_graphics is True assert opt.debug is True assert opt.checkpoint_settings.inference is True assert opt.checkpoint_settings.resume is True assert opt.checkpoint_settings.results_dir == "myresults"
def test_env_args(mock_file): full_args = [ "mytrainerpath", "--env=./myenvfile", "--env-args", # Everything after here will be grouped in a list "--foo=bar", "--blah", "baz", "100", ] opt = parse_command_line(full_args) assert opt.env_args == ["--foo=bar", "--blah", "baz", "100"]
def test_sampler_configs(mock_file): opt = parse_command_line(["mytrainerpath"]) assert opt.parameter_randomization == "sampler1"
def basic_options(extra_args=None): extra_args = extra_args or {} args = ["basic_path"] if extra_args: args += [f"{k}={v}" for k, v in extra_args.items()] return parse_command_line(args)
def test_commandline_args(): # No args raises with pytest.raises(SystemExit): parse_command_line([]) # Test with defaults opt = parse_command_line(["mytrainerpath"]) assert opt.trainer_config_path == "mytrainerpath" assert opt.env_path is None assert opt.curriculum_folder is None assert opt.sampler_file_path is None assert opt.keep_checkpoints == 5 assert opt.lesson == 0 assert opt.load_model is False assert opt.run_id == "ppo" assert opt.save_freq == 50000 assert opt.seed == -1 assert opt.train_model is False assert opt.base_port == 5005 assert opt.num_envs == 1 assert opt.docker_target_name is None assert opt.no_graphics is False assert opt.debug is False assert opt.multi_gpu is False assert opt.env_args is None full_args = [ "mytrainerpath", "--env=./myenvfile", "--curriculum=./mycurriculum", "--sampler=./mysample", "--keep-checkpoints=42", "--lesson=3", "--load", "--run-id=myawesomerun", "--num-runs=3", "--save-freq=123456", "--seed=7890", "--train", "--base-port=4004", "--num-envs=2", "--docker-target-name=mydockertarget", "--no-graphics", "--debug", "--multi-gpu", ] opt = parse_command_line(full_args) assert opt.trainer_config_path == "mytrainerpath" assert opt.env_path == "./myenvfile" assert opt.curriculum_folder == "./mycurriculum" assert opt.sampler_file_path == "./mysample" assert opt.keep_checkpoints == 42 assert opt.lesson == 3 assert opt.load_model is True assert opt.run_id == "myawesomerun" assert opt.save_freq == 123456 assert opt.seed == 7890 assert opt.train_model is True assert opt.base_port == 4004 assert opt.num_envs == 2 assert opt.docker_target_name == "mydockertarget" assert opt.no_graphics is True assert opt.debug is True assert opt.multi_gpu is True
def test_sampler_configs(mock_file): opt = parse_command_line(["mytrainerpath"]) assert opt.parameter_randomization == {"sampler1": "foo"} assert len(opt.curriculum.keys()) == 2
from mlagents_envs.environment import UnityEnvironment from mlagents.trainers import learn # This is a non-blocking call that only loads the environment. # env = UnityEnvironment(file_name=None, seed=1, side_channels=[]) # Start interacting with the evironment. # env.reset() # behavior_names = env.behavior_spec.keys() if __name__ == "__main__": opts = learn.parse_command_line([ "../../ml-agents/config/trainer_config.yaml", "--run-id=testing_3", "--force" ]) print(opts) learn.run_training(1, opts)
def test_sampler_configs(mock_file): opt = parse_command_line(["mytrainerpath"]) assert isinstance(opt.parameter_randomization["sampler1"], UniformSettings) assert len(opt.curriculum.keys()) == 2