Пример #1
0
    def test_activation_functions(self):
        # Test single activation functions (no other custom computations in layer).
        space = FloatBox(shape=(3, ), add_batch_rank=True)

        # ReLU.
        relu_layer = NNLayer(activation="relu")
        test = ComponentTest(component=relu_layer,
                             input_spaces=dict(inputs=space))

        input_ = space.sample(size=5)
        expected = relu(input_)
        test.test(("apply", input_), expected_outputs=expected)

        # Again manually in case util numpy-relu is broken.
        input_ = np.array([[1.0, 2.0, -5.0], [-10.0, -100.1, 4.5]])
        expected = np.array([[1.0, 2.0, 0.0], [0.0, 0.0, 4.5]])
        test.test(("apply", input_), expected_outputs=expected)

        # Sigmoid.
        sigmoid_layer = NNLayer(activation="sigmoid")
        test = ComponentTest(component=sigmoid_layer,
                             input_spaces=dict(inputs=space))

        input_ = space.sample(size=10)
        expected = sigmoid(input_)
        test.test(("apply", input_), expected_outputs=expected)
    def test_keras_style_simple_nn(self):
        # Input Space of the network.
        input_space = FloatBox(shape=(3,), add_batch_rank=True)

        # Create a DenseLayer with a fixed `call` method input space for the arg `inputs`.
        output1 = DenseLayer(units=5, activation="linear", scope="a")(input_space)
        # Create a DenseLayer whose `inputs` arg is the resulting DataOpRec of output1's `call` output.
        output2 = DenseLayer(units=7, activation="relu", scope="b")(output1)

        # This will trace back automatically through the given output DataOpRec(s) and add all components
        # on the way to the input-space to this network.
        neural_net = NeuralNetwork(outputs=output2)

        test = ComponentTest(component=neural_net, input_spaces=dict(inputs=input_space))

        # Batch of size=n.
        input_ = input_space.sample(5)
        # Calculate output manually.
        var_dict = neural_net.get_variables("a/dense/kernel", "a/dense/bias", "b/dense/kernel", "b/dense/bias", global_scope=False)
        w1_value = test.read_variable_values(var_dict["a/dense/kernel"])
        b1_value = test.read_variable_values(var_dict["a/dense/bias"])
        w2_value = test.read_variable_values(var_dict["b/dense/kernel"])
        b2_value = test.read_variable_values(var_dict["b/dense/bias"])

        expected = relu(dense_layer(dense_layer(input_, w1_value, b1_value), w2_value, b2_value))

        test.test(("call", input_), expected_outputs=expected, decimals=5)

        test.terminate()
Пример #3
0
    def test_dueling_action_adapter(self):
        # Last NN layer.
        last_nn_layer_space = FloatBox(shape=(7, ), add_batch_rank=True)
        # Action Space.
        action_space = IntBox(4, shape=(2, ))

        action_adapter = DuelingActionAdapter(
            action_space=action_space,
            units_state_value_stream=5,
            units_advantage_stream=4,
            weights_spec_state_value_stream=1.0,
            weights_spec_advantage_stream=0.5,
            activation_advantage_stream="linear",
            scope="aa")
        test = ComponentTest(component=action_adapter,
                             input_spaces=dict(nn_output=last_nn_layer_space),
                             action_space=action_space)

        # Batch of 2 samples.
        batch_size = 2
        inputs = last_nn_layer_space.sample(size=batch_size)

        dueling_action_adapter_vars = test.read_variable_values(
            action_adapter.variables)

        # Expected action layer output are the advantage nodes.
        expected_raw_advantages = np.matmul(
            np.matmul(
                inputs, dueling_action_adapter_vars[
                    "aa/dense-layer-advantage-stream/dense/kernel"]),
            dueling_action_adapter_vars["aa/action-layer/dense/kernel"])
        expected_state_values = np.matmul(
            relu(
                np.matmul(
                    inputs, dueling_action_adapter_vars[
                        "aa/dense-layer-state-value-stream/dense/kernel"])),
            dueling_action_adapter_vars["aa/state-value-node/dense/kernel"])

        test.test(("get_action_layer_output", inputs),
                  expected_outputs=dict(state_value_node=expected_state_values,
                                        output=expected_raw_advantages),
                  decimals=5)

        expected_advantages = np.reshape(expected_raw_advantages,
                                         newshape=(batch_size, 2, 4))

        # Expected q-values/logits, probabilities (softmaxed q) and log(p).
        expanded_state_values = np.expand_dims(expected_state_values, axis=1)
        expected_q_values = expanded_state_values + expected_advantages - \
            np.mean(expected_advantages, axis=-1, keepdims=True)
        expected_probs = softmax(expected_q_values)

        test.test(("get_logits_probabilities_log_probs", inputs),
                  expected_outputs=dict(state_values=expected_state_values,
                                        logits=expected_q_values,
                                        probabilities=expected_probs,
                                        log_probs=np.log(expected_probs)),
                  decimals=3)
Пример #4
0
    def test_bernoulli_action_adapter(self):
        # Last NN layer.
        previous_nn_layer_space = FloatBox(shape=(16, ), add_batch_rank=True)
        adapter_outputs_space = FloatBox(shape=(2, ), add_batch_rank=True)
        # Action Space.
        action_space = BoolBox(shape=(2, ))

        action_adapter = BernoulliDistributionAdapter(
            action_space=action_space, activation="relu")
        test = ComponentTest(component=action_adapter,
                             input_spaces=dict(
                                 inputs=previous_nn_layer_space,
                                 adapter_outputs=adapter_outputs_space,
                             ),
                             action_space=action_space)
        action_adapter_params = test.read_variable_values(
            action_adapter.variable_registry)

        # Batch of n samples.
        inputs = previous_nn_layer_space.sample(32)

        expected_logits = relu(
            np.matmul(
                inputs, action_adapter_params[
                    "action-adapter/action-network/action-layer/dense/kernel"])
        )
        test.test(("call", inputs),
                  expected_outputs=expected_logits,
                  decimals=5)

        expected_probs = sigmoid(expected_logits)
        expected_log_probs = np.log(expected_probs)
        test.test(("get_parameters", inputs),
                  expected_outputs=dict(adapter_outputs=expected_logits,
                                        parameters=expected_probs,
                                        probabilities=expected_probs,
                                        log_probs=expected_log_probs),
                  decimals=5)