Example #1
0
    def test_subclassing_network_with_primitive_int_output_space(self):
        input_space = Float(-1.0, 1.0, shape=(5,), main_axes="B")
        output_space = Int(3, main_axes="B")

        # Using keras subclassing.
        network = self.MyModel()
        nn = Network(network=network, output_space=output_space)

        # Simple function call -> Expect output for all int-inputs.
        input_ = input_space.sample(6)
        result = nn(input_)
        weights = nn.get_weights()
        expected = dense(np.concatenate(
            [dense(input_, weights[0], weights[1]), dense(input_, weights[2], weights[3])],
            axis=-1
        ), weights[4], weights[5])

        check(result, expected)

        # Function call with value -> Expect output for only that int-value
        input_ = input_space.sample(6)
        values = output_space.sample(6)
        result = nn(input_, values)
        weights = nn.get_weights()
        expected = dense(np.concatenate(
            [dense(input_, weights[0], weights[1]), dense(input_, weights[2], weights[3])],
            axis=-1
        ), weights[4], weights[5])
        expected = np.sum(expected * one_hot(values, depth=output_space.num_categories), axis=-1)

        check(result, expected)
Example #2
0
    def test_func_api_network_with_automatically_handling_container_input_space(self):
        # Simple vectors plus image as inputs (see e.g. SAC).
        input_space = Dict(A=Float(-1.0, 1.0, shape=(2,)), B=Int(5), C=Float(-1.0, 1.0, shape=(2, 2, 3)), main_axes="B")
        output_space = Float(shape=(3,), main_axes="B")  # simple output

        # Only define a base-core network and let the automation handle the complex input structure via
        # `pre-concat` nets.
        core_nn = tf.keras.models.Sequential()
        core_nn.add(tf.keras.layers.Dense(3, activation="relu"))
        core_nn.add(tf.keras.layers.Dense(3))

        # Use no distributions.
        nn = Network(
            network=core_nn,
            input_space=input_space,
            pre_concat_networks=dict(
                # leave "A" out -> "A" input will go unaltered into concat step.
                B=lambda i: tf.one_hot(i, depth=input_space["B"].num_categories, axis=-1),
                C=tf.keras.layers.Flatten()
            ),
            output_space=output_space,
            distributions=False
        )

        # Simple function call.
        input_ = input_space.sample(6)
        result = nn(input_)
        weights = nn.get_weights()
        expected = dense(dense(relu(dense(np.concatenate([
            input_["A"],
            one_hot(input_["B"], depth=input_space["B"].num_categories),
            np.reshape(input_["C"], newshape=(6, -1))
        ], axis=-1), weights[0], weights[1])), weights[2], weights[3]), weights[4], weights[5])

        check(result, expected)
Example #3
0
    def test_copying_a_network(self):
        # Using keras layer as network spec.
        layer = tf.keras.layers.Dense(4)

        nn = Network(network=layer, output_space=Dict({"a": Float(shape=(2,)), "b": Int(2)}))
        # Simple call -> Should return dict with "a"->float(2,) and "b"->float(2,)
        input_ = Float(-1.0, 1.0, shape=(5,), main_axes="B").sample(5)
        _ = nn(input_)
        weights = nn.get_weights()
        expected_a = dense(dense(input_, weights[0], weights[1]), weights[2], weights[3])
        expected_b = dense(dense(input_, weights[0], weights[1]), weights[4], weights[5])

        # Do the copy.
        nn_copy = nn.copy()
        result = nn_copy(input_)
        check(result, dict(a=expected_a, b=expected_b))
Example #4
0
    def test_layer_network_with_container_output_space(self):
        # Using keras layer as network spec.
        layer = tf.keras.layers.Dense(10)

        nn = Network(
            network=layer,
            output_space=Dict({"a": Float(shape=(2, 3)), "b": Int(3)})
        )
        # Simple call -> Should return dict with "a"->float(2,3) and "b"->float(3,)
        input_ = Float(-1.0, 1.0, shape=(5,), main_axes="B").sample(5)
        result = nn(input_)
        weights = nn.get_weights()
        expected_a = np.reshape(dense(dense(input_, weights[0], weights[1]), weights[2], weights[3]), newshape=(-1, 2, 3))
        expected_b = dense(dense(input_, weights[0], weights[1]), weights[4], weights[5])

        check(result, dict(a=expected_a, b=expected_b))
Example #5
0
    def test_func_api_network_with_primitive_int_output_space_and_distribution(self):
        input_space = Float(-1.0, 1.0, shape=(3,), main_axes="B")
        output_space = Int(5, main_axes="B")

        # Using keras functional API to create network.
        i = tf.keras.layers.Input(shape=(3,))
        d = tf.keras.layers.Dense(10)(i)
        e = tf.keras.layers.Dense(5)(i)
        o = tf.concat([d, e], axis=-1)
        network = tf.keras.Model(inputs=i, outputs=o)

        # Use default distributions (i.e. categorical for Int).
        nn = Network(
            network=network,
            output_space=output_space,
            distributions="default"
        )
        input_ = input_space.sample(1000)
        result = nn(input_)
        # Check the sample for a proper mean value.
        check(np.mean(result), 2, decimals=0)

        # Function call with value -> Expect probabilities for given int-values.
        input_ = input_space.sample(6)
        values = output_space.sample(6)
        result = nn(input_, values)
        weights = nn.get_weights()
        expected = dense(np.concatenate(
            [dense(input_, weights[0], weights[1]), dense(input_, weights[2], weights[3])],
            axis=-1
        ), weights[4], weights[5])
        expected = softmax(expected)
        expected = np.sum(expected * one_hot(values, depth=output_space.num_categories), axis=-1)

        check(result, expected)

        # Function call with "likelihood" option set -> Expect sample plus probabilities for sampled int-values.
        input_ = input_space.sample(1000)
        sample, probs = nn(input_, likelihood=True)
        check(np.mean(sample), 2, decimals=0)
        check(np.mean(probs), 1.0 / output_space.num_categories, decimals=1)
Example #6
0
    def test_func_api_network_with_manually_handling_container_input_space(self):
        # Simple vector plus image as inputs (see e.g. SAC).
        input_space = Dict(A=Float(-1.0, 1.0, shape=(2,)), B=Float(-1.0, 1.0, shape=(2, 2, 3)), main_axes="B")
        output_space = Float(shape=(3,), main_axes="B")  # simple output

        # Using keras functional API to create network.
        keras_input = input_space.create_keras_input()
        # Simply flatten an concat everything, then output.
        o = tf.keras.layers.Flatten()(keras_input["B"])
        o = tf.concat([keras_input["A"], o], axis=-1)
        network = tf.keras.Model(inputs=keras_input, outputs=o)

        # Use no distributions.
        nn = Network(
            network=network,
            output_space=output_space,
            distributions=False
        )

        # Simple function call.
        input_ = input_space.sample(6)
        result = nn(input_)
        weights = nn.get_weights()
        expected = dense(np.concatenate([input_["A"], np.reshape(input_["B"], newshape=(6, -1))], axis=-1), weights[0], weights[1])

        check(result, expected)

        # Function call with value -> Expect error as we only have float outputs (w/o distributions).
        input_ = input_space.sample(6)
        values = output_space.sample(6)
        error = True
        try:
            nn(input_, values)
            error = False
        except SurrealError:
            pass
        self.assertTrue(error)
Example #7
0
    def test_dueling_network(self):
        input_space = Float(-1.0, 1.0, shape=(2,), main_axes="B")
        output_space = Dict({"A": Float(shape=(4,)), "V": Float()}, main_axes="B")  # V=single node
        # Using keras layer as main network spec.
        layer = tf.keras.layers.Dense(5)

        nn = Network(
            network=layer,
            output_space=output_space,
            # Only two output components are distributions (a and b), the others not (c=Float, d=Int).
            adapters=dict(A=dict(pre_network=tf.keras.layers.Dense(2)), V=dict(pre_network=tf.keras.layers.Dense(3)))
        )
        # Simple call -> Should return sample dict.
        input_ = input_space.sample(10)
        result = nn(input_)

        weights = nn.get_weights()
        expected_a = dense(dense(dense(input_, weights[0], weights[1]), weights[2], weights[3]), weights[4], weights[5])
        expected_v = np.reshape(
            dense(dense(dense(input_, weights[0], weights[1]), weights[6], weights[7]), weights[8], weights[9]),
            newshape=(10,)
        )
        check(result["A"], expected_a, decimals=5)
        check(result["V"], expected_v, decimals=5)