def test_reward_provider_save(tmp_path, optimizer):
    OptimizerClass, HyperparametersClass = optimizer

    trainer_settings = TrainerSettings()
    trainer_settings.hyperparameters = HyperparametersClass()
    trainer_settings.reward_signals = {
        RewardSignalType.CURIOSITY: CuriositySettings(),
        RewardSignalType.GAIL: GAILSettings(demo_path=DEMO_PATH),
        RewardSignalType.RND: RNDSettings(),
    }
    policy = create_policy_mock(trainer_settings, use_discrete=False)
    optimizer = OptimizerClass(policy, trainer_settings)

    # save at path 1
    path1 = os.path.join(tmp_path, "runid1")
    model_saver = TorchModelSaver(trainer_settings, path1)
    model_saver.register(policy)
    model_saver.register(optimizer)
    model_saver.initialize_or_load()
    policy.set_step(2000)
    model_saver.save_checkpoint("MockBrain", 2000)

    # create a new optimizer and policy
    optimizer2 = OptimizerClass(policy, trainer_settings)
    policy2 = create_policy_mock(trainer_settings, use_discrete=False)

    # load weights
    model_saver2 = TorchModelSaver(trainer_settings, path1, load=True)
    model_saver2.register(policy2)
    model_saver2.register(optimizer2)
    model_saver2.initialize_or_load()  # This is to load the optimizers

    # assert the models have the same weights
    module_dict_1 = optimizer.get_modules()
    module_dict_2 = optimizer2.get_modules()
    assert "Module:GAIL" in module_dict_1
    assert "Module:GAIL" in module_dict_2
    assert "Module:Curiosity" in module_dict_1
    assert "Module:Curiosity" in module_dict_2
    assert "Module:RND-pred" in module_dict_1
    assert "Module:RND-pred" in module_dict_2
    assert "Module:RND-target" in module_dict_1
    assert "Module:RND-target" in module_dict_2
    for name, module1 in module_dict_1.items():
        assert name in module_dict_2
        module2 = module_dict_2[name]
        if hasattr(module1, "parameters"):
            for param1, param2 in zip(module1.parameters(),
                                      module2.parameters()):
                assert param1.data.ne(param2.data).sum() == 0

    # Run some rewards
    data = create_agent_buffer(policy.behavior_spec, 1)
    for reward_name in optimizer.reward_signals.keys():
        rp_1 = optimizer.reward_signals[reward_name]
        rp_2 = optimizer2.reward_signals[reward_name]
        assert np.array_equal(rp_1.evaluate(data), rp_2.evaluate(data))
Ejemplo n.º 2
0
def test_continuous_action_prediction(behavior_spec: BehaviorSpec, seed: int) -> None:
    np.random.seed(seed)
    torch.manual_seed(seed)
    curiosity_settings = CuriositySettings(32, 0.1)
    curiosity_rp = CuriosityRewardProvider(behavior_spec, curiosity_settings)
    buffer = create_agent_buffer(behavior_spec, 5)
    for _ in range(200):
        curiosity_rp.update(buffer)
    prediction = curiosity_rp._network.predict_action(buffer)[0]
    target = torch.tensor(buffer["actions"][0])
    error = torch.mean((prediction - target) ** 2).item()
    assert error < 0.001
Ejemplo n.º 3
0
def test_next_state_prediction(behavior_spec: BehaviorSpec, seed: int) -> None:
    np.random.seed(seed)
    torch.manual_seed(seed)
    curiosity_settings = CuriositySettings(32, 0.1)
    curiosity_rp = CuriosityRewardProvider(behavior_spec, curiosity_settings)
    buffer = create_agent_buffer(behavior_spec, 5)
    for _ in range(100):
        curiosity_rp.update(buffer)
    prediction = curiosity_rp._network.predict_next_state(buffer)[0]
    target = curiosity_rp._network.get_next_state(buffer)[0]
    error = float(ModelUtils.to_numpy(torch.mean((prediction - target) ** 2)))
    assert error < 0.001
Ejemplo n.º 4
0
def test_reward_decreases(behavior_spec: BehaviorSpec, seed: int) -> None:
    np.random.seed(seed)
    torch.manual_seed(seed)
    curiosity_settings = CuriositySettings(32, 0.01)
    curiosity_rp = CuriosityRewardProvider(behavior_spec, curiosity_settings)
    buffer = create_agent_buffer(behavior_spec, 5)
    curiosity_rp.update(buffer)
    reward_old = curiosity_rp.evaluate(buffer)[0]
    for _ in range(20):
        curiosity_rp.update(buffer)
        reward_new = curiosity_rp.evaluate(buffer)[0]
    assert reward_new < reward_old
Ejemplo n.º 5
0
def test_load_different_reward_provider(caplog, tmp_path, optimizer):
    OptimizerClass, HyperparametersClass = optimizer

    trainer_settings = TrainerSettings()
    trainer_settings.hyperparameters = HyperparametersClass()
    trainer_settings.reward_signals = {
        RewardSignalType.CURIOSITY: CuriositySettings(),
        RewardSignalType.RND: RNDSettings(),
    }

    policy = create_policy_mock(trainer_settings, use_discrete=False)
    optimizer = OptimizerClass(policy, trainer_settings)

    # save at path 1
    path1 = os.path.join(tmp_path, "runid1")
    model_saver = TorchModelSaver(trainer_settings, path1)
    model_saver.register(policy)
    model_saver.register(optimizer)
    model_saver.initialize_or_load()
    assert len(optimizer.critic.value_heads.stream_names) == 2
    policy.set_step(2000)
    model_saver.save_checkpoint("MockBrain", 2000)

    trainer_settings2 = TrainerSettings()
    trainer_settings2.hyperparameters = HyperparametersClass()
    trainer_settings2.reward_signals = {
        RewardSignalType.GAIL: GAILSettings(demo_path=DEMO_PATH)
    }

    # create a new optimizer and policy
    policy2 = create_policy_mock(trainer_settings2, use_discrete=False)
    optimizer2 = OptimizerClass(policy2, trainer_settings2)

    # load weights
    model_saver2 = TorchModelSaver(trainer_settings2, path1, load=True)
    model_saver2.register(policy2)
    model_saver2.register(optimizer2)
    assert len(optimizer2.critic.value_heads.stream_names) == 1
    model_saver2.initialize_or_load()  # This is to load the optimizers
    messages = [
        rec.message for rec in caplog.records if rec.levelno == WARNING
    ]
    assert len(messages) > 0
Ejemplo n.º 6
0
def curiosity_dummy_config():
    return {RewardSignalType.CURIOSITY: CuriositySettings()}
Ejemplo n.º 7
0
def test_factory(behavior_spec: BehaviorSpec) -> None:
    curiosity_settings = CuriositySettings(32, 0.01)
    curiosity_rp = create_reward_provider(
        RewardSignalType.CURIOSITY, behavior_spec, curiosity_settings
    )
    assert curiosity_rp.name == "Curiosity"
Ejemplo n.º 8
0
def test_construction(behavior_spec: BehaviorSpec) -> None:
    curiosity_settings = CuriositySettings(32, 0.01)
    curiosity_settings.strength = 0.1
    curiosity_rp = CuriosityRewardProvider(behavior_spec, curiosity_settings)
    assert curiosity_rp.strength == 0.1
    assert curiosity_rp.name == "Curiosity"