Exemplo n.º 1
0
def test_save_load():
    cfg = {"rng": np.random}
    input_shape = 8
    output_shape = 8
    instructions = \
        {
            "init_std": 0.05,
            "layers": [64, 64],
            "layer_functions": ['relu', 'relu'],
            "layer_extras": ['bn', 'bn'],
            "output_function": 'linear',
            "output_extras": 'bn',
        }
    policy = FeedForward(input_shape, output_shape, None, cfg)
    policy.build_model(instructions)

    print("BUILT POLICY LAYERS:")
    for layer in policy.model:
        print(layer)

    inp = np.ones(input_shape)
    out = policy.activate(inp)

    print("\nOUTPUT ON ONES BEFORE SAVING:", out)
    policy.save("data/experiments/exp_name/epochs/epoch_0/policy")
    policy.load("data/experiments/exp_name/epochs/epoch_0/policy")

    out = policy.activate(inp)
    print("OUTPUT ON ONES AFTER SAVING:", out)
Exemplo n.º 2
0
def run_test():
    # cfg = {"rng": np.random}
    # input_shape = 8
    # output_shape = 8
    # instructions = \
    #     {
    #         "init_std": 0.05,
    #         "layers": [64, 64],
    #         "layer_functions": ['relu', 'relu'],
    #         "layer_extras": ['bn', 'bn'],
    #         "output_function": 'linear',
    #         "output_extras": 'bn',
    #     }

    cfg = ConfigLoader.load_config(file_name="test_config.json")
    env = EnvironmentFactory.get_from_config(cfg)
    input_shape = env.get_policy_input_shape()
    output_shape = env.get_policy_output_shape()
    instructions = cfg["policy"]
    cfg["rng"] = np.random.RandomState(cfg["seed"])

    policy = FeedForward(input_shape, output_shape, None, cfg)
    policy.build_model(instructions)

    print("BUILT POLICY LAYERS:")
    for layer in policy.model:
        print(layer)

    num = np.prod(input_shape)
    vbn = [np.random.randn(num) for _ in range(1000)]
    inp = np.ones(num)
    out = policy.activate(inp)

    print("\nOUTPUT ON ONES BEFORE VBN:", out)
    policy.compute_virtual_normalization(vbn)
    out = policy.activate(inp)
    print("OUTPUT ON ONES AFTER VBN:", out)

    policy.save("data/test")
    out = policy.activate(inp)
    print("OUTPUT ON ONES AFTER SAVE:", out)
    del policy

    policy = FeedForward(input_shape, output_shape, None, cfg)
    policy.build_model(instructions)
    policy.load("data/test")
    out = policy.activate(inp)
    print("OUTPUT ON ONES AFTER LOAD:", out)
    policy.compute_virtual_normalization(vbn)
    out = policy.activate(inp)
    print("OUTPUT ON ONES AFTER LOAD AND VBN:", out)
Exemplo n.º 3
0
def test_save_load_vbn():
    cfg = {"rng": np.random}
    input_shape = 8
    output_shape = 8
    instructions = \
        {
            "init_std": 0.05,
            "layers": [64, 64],
            "layer_functions": ['relu', 'relu'],
            "layer_extras": ['bn', 'bn'],
            "output_function": 'linear',
            "output_extras": 'bn',
        }
    policy = FeedForward(input_shape, output_shape, None, cfg)
    policy.build_model(instructions)

    print("BUILT POLICY LAYERS:")
    for layer in policy.model:
        print(layer)

    vbn = [np.random.randn(input_shape) for _ in range(1000)]
    policy.compute_virtual_normalization(vbn)

    inp = np.ones(input_shape)
    out = policy.activate(inp)

    print("\nOUTPUT ON ONES WITH VBN BEFORE SAVING:", out)
    policy.save("data/experiments/exp_name/epochs/epoch_0/policy")

    out = policy.activate(inp)
    print("\nOUTPUT ON ONES WITH VBN AFTER SAVING:", out)

    policy.set_trainable_flat(policy.get_trainable_flat() +
                              np.random.randn(policy.num_params))
    out = policy.activate(inp)
    print("\nJIGGLED OUTPUT ON ONES WITH VBN BEFORE LOADING", out)

    policy.load("data/experiments/exp_name/epochs/epoch_0/policy")
    out = policy.activate(inp)
    print("OUTPUT ON ONES WITH VBN AFTER LOADING:", out)