def test_simple_nn_using_layers(self): # Space must contain batch dimension (otherwise, NNlayer will complain). space = FloatBox(shape=(4, ), add_batch_rank=True) # Create a simple neural net from json. nn_layers = config_from_path("configs/test_simple_nn.json") neural_net = NeuralNetwork(*nn_layers["layers"]) # Do not seed, we calculate expectations manually. test = ComponentTest(component=neural_net, input_spaces=dict(inputs=space)) # Batch of size=3. input_ = space.sample(4) # Calculate output manually. var_dict = neural_net.get_variables("hidden-layer/dense/kernel", "hidden-layer/dense/bias", global_scope=False) w1_value = test.read_variable_values( var_dict["hidden-layer/dense/kernel"]) b1_value = test.read_variable_values( var_dict["hidden-layer/dense/bias"]) expected = dense_layer(input_, w1_value, b1_value) test.test(("call", input_), expected_outputs=expected, decimals=5) test.terminate()
def test_keras_style_simple_nn(self): # Input Space of the network. input_space = FloatBox(shape=(3,), add_batch_rank=True) # Create a DenseLayer with a fixed `call` method input space for the arg `inputs`. output1 = DenseLayer(units=5, activation="linear", scope="a")(input_space) # Create a DenseLayer whose `inputs` arg is the resulting DataOpRec of output1's `call` output. output2 = DenseLayer(units=7, activation="relu", scope="b")(output1) # This will trace back automatically through the given output DataOpRec(s) and add all components # on the way to the input-space to this network. neural_net = NeuralNetwork(outputs=output2) test = ComponentTest(component=neural_net, input_spaces=dict(inputs=input_space)) # Batch of size=n. input_ = input_space.sample(5) # Calculate output manually. var_dict = neural_net.get_variables("a/dense/kernel", "a/dense/bias", "b/dense/kernel", "b/dense/bias", global_scope=False) w1_value = test.read_variable_values(var_dict["a/dense/kernel"]) b1_value = test.read_variable_values(var_dict["a/dense/bias"]) w2_value = test.read_variable_values(var_dict["b/dense/kernel"]) b2_value = test.read_variable_values(var_dict["b/dense/bias"]) expected = relu(dense_layer(dense_layer(input_, w1_value, b1_value), w2_value, b2_value)) test.test(("call", input_), expected_outputs=expected, decimals=5) test.terminate()
def test_functional_api_one_output_is_discarded(self): # Input Space of the network. input_space = FloatBox(shape=(3, ), add_batch_rank=True, add_time_rank=True) # Pass input through an LSTM and get two outputs (output and internal states), only one of which will be used. lstm_out, _ = LSTMLayer(units=2, return_sequences=False)(input_space) # A NN with 1 output (don't return internal_states of LSTM). neural_net = NeuralNetwork(outputs=lstm_out) test = ComponentTest(component=neural_net, input_spaces=dict(inputs=input_space)) # Batch of size=n. input_ = input_space.sample((5, 3)) # Calculate output manually. var_dict = neural_net.variable_registry w1_value = test.read_variable_values( var_dict["neural-network/lstm-layer/lstm-cell/kernel"]) b1_value = test.read_variable_values( var_dict["neural-network/lstm-layer/lstm-cell/bias"]) expected_out, _ = lstm_layer(input_, w1_value, b1_value) expected_out = expected_out[:, -1, :] # last time step only # Don't expect internal states (our NN does not return these as per the functional API definition above). test.test(("call", input_), expected_outputs=expected_out, decimals=5) test.terminate()
def test_add_layer_to_simple_nn(self): # Space must contain batch dimension (otherwise, NNlayer will complain). space = FloatBox(shape=(3, ), add_batch_rank=True) # Create a simple neural net from json. neural_net = NeuralNetwork.from_spec( config_from_path( "configs/test_simple_nn.json")) # type: NeuralNetwork # Add another layer to it. neural_net.add_layer(DenseLayer(units=10, scope="last-layer")) # Do not seed, we calculate expectations manually. test = ComponentTest(component=neural_net, input_spaces=dict(nn_input=space)) # Batch of size=3. input_ = space.sample(3) # Calculate output manually. var_dict = test.read_variable_values(neural_net.variable_registry) expected = dense_layer( dense_layer(input_, var_dict["test-network/hidden-layer/dense/kernel"], var_dict["test-network/hidden-layer/dense/bias"]), var_dict["test-network/last-layer/dense/kernel"], var_dict["test-network/last-layer/dense/bias"]) test.test(("apply", input_), expected_outputs=dict(output=expected), decimals=5) test.terminate()
def test_simple_nn(self): # Space must contain batch dimension (otherwise, NNlayer will complain). space = FloatBox(shape=(3, ), add_batch_rank=True) # Create a simple neural net from json. neural_net = NeuralNetwork.from_spec( config_from_path( "configs/test_simple_nn.json")) # type: NeuralNetwork # Do not seed, we calculate expectations manually. test = ComponentTest(component=neural_net, input_spaces=dict(nn_input=space)) # Batch of size=3. input_ = np.array([[0.1, 0.2, 0.3], [1.0, 2.0, 3.0], [10.0, 20.0, 30.0]]) # Calculate output manually. var_dict = neural_net.get_variables("hidden-layer/dense/kernel", "hidden-layer/dense/bias", global_scope=False) w1_value = test.read_variable_values( var_dict["hidden-layer/dense/kernel"]) b1_value = test.read_variable_values( var_dict["hidden-layer/dense/bias"]) expected = dense_layer(input_, w1_value, b1_value) test.test(("apply", input_), expected_outputs=dict(output=expected), decimals=5) test.terminate()
def test_keras_style_two_separate_input_spaces(self): # Define two input Spaces first. Independently (no container). input_space_1 = IntBox(3, add_batch_rank=True) input_space_2 = FloatBox(shape=(4,), add_batch_rank=True) # One-hot flatten the int tensor. flatten_layer_out = ReShape(flatten=True, flatten_categories=True)(input_space_1) # Run the float tensor through two dense layers. dense_1_out = DenseLayer(units=3, scope="d1")(input_space_2) dense_2_out = DenseLayer(units=5, scope="d2")(dense_1_out) # Concat everything. cat_out = ConcatLayer()(flatten_layer_out, dense_2_out) # Use the `outputs` arg to allow your network to trace back the data flow until the input space. neural_net = NeuralNetwork(inputs=[input_space_1, input_space_2], outputs=cat_out) test = ComponentTest(component=neural_net, input_spaces=dict(inputs=[input_space_1, input_space_2])) var_dict = neural_net.variable_registry w1_value = test.read_variable_values(var_dict["neural-network/d1/dense/kernel"]) b1_value = test.read_variable_values(var_dict["neural-network/d1/dense/bias"]) w2_value = test.read_variable_values(var_dict["neural-network/d2/dense/kernel"]) b2_value = test.read_variable_values(var_dict["neural-network/d2/dense/bias"]) # Batch of size=n. input_ = [input_space_1.sample(4), input_space_2.sample(4)] expected = np.concatenate([ # concat everything one_hot(input_[0]), # int flattening dense_layer(dense_layer(input_[1], w1_value, b1_value), w2_value, b2_value) # float -> 2 x dense ], axis=-1) out = test.test(("call", input_), expected_outputs=expected) test.terminate()
def test_keras_style_one_container_input_space(self): # Define one container input Space. input_space = Tuple(IntBox(3), FloatBox(shape=(4,)), add_batch_rank=True) # One-hot flatten the int tensor. flatten_layer_out = ReShape(flatten=True, flatten_categories=True)(input_space[0]) # Run the float tensor through two dense layers. dense_1_out = DenseLayer(units=3, scope="d1")(input_space[1]) dense_2_out = DenseLayer(units=5, scope="d2")(dense_1_out) # Concat everything. cat_out = ConcatLayer()(flatten_layer_out, dense_2_out) # Use the `outputs` arg to allow your network to trace back the data flow until the input space. # `inputs` is not needed here as we only have one single input (the Tuple). neural_net = NeuralNetwork(outputs=cat_out) test = ComponentTest(component=neural_net, input_spaces=dict(inputs=input_space)) var_dict = neural_net.variable_registry w1_value = test.read_variable_values(var_dict["neural-network/d1/dense/kernel"]) b1_value = test.read_variable_values(var_dict["neural-network/d1/dense/bias"]) w2_value = test.read_variable_values(var_dict["neural-network/d2/dense/kernel"]) b2_value = test.read_variable_values(var_dict["neural-network/d2/dense/bias"]) # Batch of size=n. input_ = input_space.sample(4) expected = np.concatenate([ # concat everything one_hot(input_[0]), # int flattening dense_layer(dense_layer(input_[1], w1_value, b1_value), w2_value, b2_value) # float -> 2 x dense ], axis=-1) out = test.test(("call", tuple([input_])), expected_outputs=expected) test.terminate()
def test_lstm_nn(self): # Space must contain batch dimension (otherwise, NNlayer will complain). #units = 3 batch_size = 2 time_steps = 4 input_nodes = 2 input_space = FloatBox(shape=(input_nodes, ), add_batch_rank=True, add_time_rank=True) #internal_states_space = Tuple(FloatBox(shape=(units,)), FloatBox(shape=(units,)), add_batch_rank=True) neural_net = NeuralNetwork.from_spec( config_from_path("configs/test_dense_to_lstm_nn.json")) # Do not seed, we calculate expectations manually. test = ComponentTest(component=neural_net, input_spaces=dict(inputs=input_space)) # Batch of size=2, time-steps=3. input_ = input_space.sample((batch_size, time_steps)) # Calculate output manually. w0_value = test.read_variable_values( neural_net. variable_registry["test-lstm-network/dense-layer/dense/kernel"]) b0_value = test.read_variable_values( neural_net. variable_registry["test-lstm-network/dense-layer/dense/bias"]) lstm_w_value = test.read_variable_values( neural_net. variable_registry["test-lstm-network/lstm-layer/lstm-cell/kernel"]) lstm_b_value = test.read_variable_values( neural_net. variable_registry["test-lstm-network/lstm-layer/lstm-cell/bias"]) d0_out = dense_layer(input_, w0_value, b0_value) lstm_out, last_internal_states = lstm_layer(d0_out, lstm_w_value, lstm_b_value, time_major=False) expected = [lstm_out, last_internal_states] test.test(("call", input_), expected_outputs=tuple(expected), decimals=5) test.terminate()
def test_functional_api_multi_stream_nn(self): # Input Space of the network. input_space = Dict( { "img": FloatBox(shape=(6, 6, 3)), # some RGB img "txt": TextBox() # some text }, add_batch_rank=True, add_time_rank=True) img, txt = ContainerSplitter("img", "txt")(input_space) # Complex NN assembly via our Keras-style functional API. # Fold text input into single batch rank. folded_text = ReShape(fold_time_rank=True)(txt) # String layer will create batched AND time-ranked (individual words) hash outputs (int64). string_bucket_out, lengths = StringToHashBucket( num_hash_buckets=5)(folded_text) # Batched and time-ranked embedding output (floats) with embed dim=n. embedding_out = EmbeddingLookup(embed_dim=10, vocab_size=5)(string_bucket_out) # Pass embeddings through a text LSTM and use last output (reduce time-rank). string_lstm_out, _ = LSTMLayer(units=2, return_sequences=False, scope="lstm-layer-txt")( embedding_out, sequence_length=lengths) # Unfold to get original time-rank back. string_lstm_out_unfolded = ReShape(unfold_time_rank=True)( string_lstm_out, txt) # Parallel image stream via 1 CNN layer plus dense. folded_img = ReShape(fold_time_rank=True, scope="img-fold")(img) cnn_out = Conv2DLayer(filters=1, kernel_size=2, strides=2)(folded_img) unfolded_cnn_out = ReShape(unfold_time_rank=True, scope="img-unfold")(cnn_out, img) unfolded_cnn_out_flattened = ReShape( flatten=True, scope="img-flat")(unfolded_cnn_out) dense_out = DenseLayer(units=2, scope="dense-0")(unfolded_cnn_out_flattened) # Concat everything. concat_out = ConcatLayer()(string_lstm_out_unfolded, dense_out) # LSTM output has batch+time. main_lstm_out, internal_states = LSTMLayer( units=2, scope="lstm-layer-main")(concat_out) dense1_after_lstm_out = DenseLayer(units=3, scope="dense-1")(main_lstm_out) dense2_after_lstm_out = DenseLayer( units=2, scope="dense-2")(dense1_after_lstm_out) dense3_after_lstm_out = DenseLayer( units=1, scope="dense-3")(dense2_after_lstm_out) # A NN with 2 outputs. neural_net = NeuralNetwork( outputs=[dense3_after_lstm_out, main_lstm_out, internal_states]) test = ComponentTest(component=neural_net, input_spaces=dict(inputs=input_space)) # Batch of size=n. sample_shape = (4, 2) input_ = input_space.sample(sample_shape) out = test.test(("call", input_), expected_outputs=None) # Main output (Dense out after LSTM). self.assertTrue(out[0].shape == sample_shape + (1, )) # 1=1 unit in dense layer self.assertTrue(out[0].dtype == np.float32) # main-LSTM out. self.assertTrue(out[1].shape == sample_shape + (2, )) # 2=2 LSTM units self.assertTrue(out[1].dtype == np.float32) # main-LSTM internal-states. self.assertTrue(out[2][0].shape == sample_shape[:1] + (2, )) # 2=2 LSTM units self.assertTrue(out[2][0].dtype == np.float32) self.assertTrue(out[2][1].shape == sample_shape[:1] + (2, )) # 2=2 LSTM units self.assertTrue(out[2][1].dtype == np.float32) test.terminate()
def test_lstm_nn_with_custom_apply(self): # Space must contain batch dimension (otherwise, NNlayer will complain). units = 3 batch_size = 2 time_steps = 4 input_nodes = 2 input_space = FloatBox(shape=(input_nodes, ), add_batch_rank=True, add_time_rank=True) internal_states_space = Tuple(FloatBox(shape=(units, )), FloatBox(shape=(units, )), add_batch_rank=True) def custom_apply(self, input_, internal_states=None): d0_out = self.get_sub_component_by_name("d0").apply(input_) lstm_out = self.get_sub_component_by_name("lstm").apply( d0_out, internal_states) d1_out = self.get_sub_component_by_name("d1").apply( lstm_out["output"]) return dict(output=d1_out, last_internal_states=lstm_out["last_internal_states"]) # Create a simple neural net with the above custom API-method. neural_net = NeuralNetwork(DenseLayer(units, scope="d0"), LSTMLayer(units, scope="lstm"), DenseLayer(units, scope="d1"), api_methods={("apply", custom_apply)}) # Do not seed, we calculate expectations manually. test = ComponentTest(component=neural_net, input_spaces=dict( input_=input_space, internal_states=internal_states_space)) # Batch of size=2, time-steps=3. input_ = input_space.sample((batch_size, time_steps)) internal_states = internal_states_space.sample(batch_size) # Calculate output manually. w0_value = test.read_variable_values( neural_net.variable_registry["neural-network/d0/dense/kernel"]) b0_value = test.read_variable_values( neural_net.variable_registry["neural-network/d0/dense/bias"]) w1_value = test.read_variable_values( neural_net.variable_registry["neural-network/d1/dense/kernel"]) b1_value = test.read_variable_values( neural_net.variable_registry["neural-network/d1/dense/bias"]) lstm_w_value = test.read_variable_values( neural_net. variable_registry["neural-network/lstm/lstm-cell/kernel"]) lstm_b_value = test.read_variable_values( neural_net.variable_registry["neural-network/lstm/lstm-cell/bias"]) d0_out = dense_layer(input_, w0_value, b0_value) lstm_out, last_internal_states = lstm_layer( d0_out, lstm_w_value, lstm_b_value, initial_internal_states=internal_states, time_major=False) d1_out = dense_layer(lstm_out, w1_value, b1_value) expected = dict(output=d1_out, last_internal_states=last_internal_states) test.test(("apply", [input_, internal_states]), expected_outputs=expected, decimals=5) test.terminate()
def test_keras_style_complex_multi_stream_nn(self): # 3 inputs. input_spaces = [ Dict({ "img": FloatBox(shape=(6, 6, 3)), "int": IntBox(3) }, add_batch_rank=True, add_time_rank=True), FloatBox(shape=(2,), add_batch_rank=True), Tuple(IntBox(2), TextBox(), add_batch_rank=True, add_time_rank=True) ] # Same NN as in test above, only using some of the sub-Spaces from the input spaces. # Tests whether this NN can add automatically the correct splitters. folded_text = ReShape(fold_time_rank=True)(input_spaces[2][1]) # String layer will create batched AND time-ranked (individual words) hash outputs (int64). string_bucket_out, lengths = StringToHashBucket(num_hash_buckets=5)(folded_text) # Batched and time-ranked embedding output (floats) with embed dim=n. embedding_out = EmbeddingLookup(embed_dim=10, vocab_size=5)(string_bucket_out) # Pass embeddings through a text LSTM and use last output (reduce time-rank). string_lstm_out, _ = LSTMLayer(units=2, return_sequences=False, scope="lstm-layer-txt")( embedding_out, sequence_length=lengths ) # Unfold to get original time-rank back. string_lstm_out_unfolded = ReShape(unfold_time_rank=True)(string_lstm_out, input_spaces[2][1]) # Parallel image stream via 1 CNN layer plus dense. folded_img = ReShape(fold_time_rank=True, scope="img-fold")(input_spaces[0]["img"]) cnn_out = Conv2DLayer(filters=1, kernel_size=2, strides=2)(folded_img) unfolded_cnn_out = ReShape(unfold_time_rank=True, scope="img-unfold")(cnn_out, input_spaces[0]["img"]) unfolded_cnn_out_flattened = ReShape(flatten=True, scope="img-flat")(unfolded_cnn_out) dense_out = DenseLayer(units=2, scope="dense-0")(unfolded_cnn_out_flattened) # Concat everything. concat_out = ConcatLayer()(string_lstm_out_unfolded, dense_out) # LSTM output has batch+time. main_lstm_out, internal_states = LSTMLayer(units=2, scope="lstm-layer-main")(concat_out) dense1_after_lstm_out = DenseLayer(units=3, scope="dense-1")(main_lstm_out) dense2_after_lstm_out = DenseLayer(units=2, scope="dense-2")(dense1_after_lstm_out) dense3_after_lstm_out = DenseLayer(units=1, scope="dense-3")(dense2_after_lstm_out) # A NN with 3 outputs. neural_net = NeuralNetwork(inputs=input_spaces, outputs=[dense3_after_lstm_out, main_lstm_out, internal_states]) test = ComponentTest(component=neural_net, input_spaces=dict(inputs=input_spaces)) # Batch of size=n. sample_shape = (4, 2) input_ = [input_spaces[0].sample(sample_shape), input_spaces[1].sample(sample_shape[0]), input_spaces[2].sample(sample_shape)] out = test.test(("call", tuple(input_)), expected_outputs=None) # Main output (Dense out after LSTM). self.assertTrue(out[0].shape == sample_shape + (1,)) # 1=1 unit in dense layer self.assertTrue(out[0].dtype == np.float32) # main-LSTM out. self.assertTrue(out[1].shape == sample_shape + (2,)) # 2=2 LSTM units self.assertTrue(out[1].dtype == np.float32) # main-LSTM internal-states. self.assertTrue(out[2][0].shape == sample_shape[:1] + (2,)) # 2=2 LSTM units self.assertTrue(out[2][0].dtype == np.float32) self.assertTrue(out[2][1].shape == sample_shape[:1] + (2,)) # 2=2 LSTM units self.assertTrue(out[2][1].dtype == np.float32) test.terminate()