def test_stacked_lstm(self): x_train, x_test, y_train, y_test = self.data network = algorithms.RMSProp( [ layers.Input(self.n_time_steps), layers.Embedding(self.n_categories, 10), layers.LSTM( n_units=10, only_return_final=False, input_weights=init.Normal(0.1), hidden_weights=init.Normal(0.1), ), layers.LSTM( n_units=2, input_weights=init.Normal(0.1), hidden_weights=init.Normal(0.1), ), layers.Sigmoid(1), ], step=0.05, verbose=False, batch_size=1, loss='binary_crossentropy', ) network.train(x_train, y_train, x_test, y_test, epochs=20) y_predicted = network.predict(x_test).round() accuracy = (y_predicted.T == y_test).mean() self.assertGreaterEqual(accuracy, 0.8)
def test_embedding_variables(self): network = layers.join( layers.Input(2), layers.Embedding(3, 5, name='embed'), ) self.assertDictEqual(network.layer('embed').variables, {}) network.outputs variables = network.layer('embed').variables self.assertSequenceEqual(list(variables.keys()), ['weight']) self.assertShapesEqual(variables['weight'].shape, (3, 5))
def test_embedding_layer(self): weight = np.arange(10).reshape((5, 2)) network = layers.join( layers.Input(1), layers.Embedding(5, 2, weight=weight), ) input_vector = asfloat(np.array([[0, 1, 4]]).T) expected_output = np.array([ [[0, 1]], [[2, 3]], [[8, 9]], ]) actual_output = self.eval(network.output(input_vector)) self.assertShapesEqual(network.output_shape, (None, 1, 2)) np.testing.assert_array_equal(expected_output, actual_output)
def train_lstm(self, data, **lstm_options): x_train, x_test, y_train, y_test = data network = algorithms.RMSProp( [ layers.Input(self.n_time_steps), layers.Embedding(self.n_categories, 10), layers.LSTM(20, **lstm_options), layers.Sigmoid(1), ], step=0.05, verbose=False, batch_size=16, error='binary_crossentropy', ) network.train(x_train, y_train, x_test, y_test, epochs=20) y_predicted = network.predict(x_test).round() accuracy = (y_predicted.T == y_test).mean() return accuracy
def test_embedding_layer(self): weight = np.arange(10).reshape((5, 2)) input_layer = layers.Input(1) embedding_layer = layers.Embedding(5, 2, weight=weight) connection = layers.join(input_layer, embedding_layer) connection.initialize() input_vector = asfloat(np.array([[0, 1, 4]]).T) expected_output = np.array([ [[0, 1]], [[2, 3]], [[8, 9]], ]) actual_output = self.eval(connection.output(input_vector)) self.assertEqual(embedding_layer.output_shape, (1, 2)) np.testing.assert_array_equal(expected_output, actual_output)
def test_stacked_gru(self): x_train, x_test, y_train, y_test = self.data network = algorithms.RMSProp( [ layers.Input(self.n_time_steps), layers.Embedding(self.n_categories, 10), layers.GRU(10, only_return_final=False), layers.GRU(1), layers.Sigmoid(1), ], step=0.01, verbose=False, batch_size=1, loss='binary_crossentropy', ) network.train(x_train, y_train, x_test, y_test, epochs=10) y_predicted = network.predict(x_test).round() accuracy = (y_predicted.T == y_test).mean() self.assertGreaterEqual(accuracy, 0.8)
def test_gru_with_4d_input(self): x_train, x_test, y_train, y_test = self.data network = algorithms.RMSProp( [ layers.Input(self.n_time_steps), layers.Embedding(self.n_categories, 10), # Make 4D input layers.Reshape((self.n_time_steps, 5, 2), name='reshape'), layers.GRU(10), layers.Sigmoid(1), ], step=0.1, verbose=False, batch_size=1, error='binary_crossentropy', ) network.train(x_train, y_train, x_test, y_test, epochs=2) reshape = network.connection.end('reshape') # +1 for batch size output_dimension = len(reshape.output_shape) + 1 self.assertEqual(4, output_dimension)
def test_stacked_gru_with_enabled_backwards_option(self): x_train, x_test, y_train, y_test = self.data x_train = x_train[:, ::-1] x_test = x_test[:, ::-1] network = algorithms.RMSProp( [ layers.Input(self.n_time_steps), layers.Embedding(self.n_categories, 10), layers.GRU(10, only_return_final=False, backwards=True), layers.GRU(2, backwards=True), layers.Sigmoid(1), ], step=0.1, verbose=False, batch_size=1, error='binary_crossentropy', ) network.train(x_train, y_train, x_test, y_test, epochs=20) y_predicted = network.predict(x_test).round() accuracy = (y_predicted.T == y_test).mean() self.assertGreaterEqual(accuracy, 0.9)
# that all samples has the same length. This trick allows to # train network with multiple independent samples. data = add_padding(data) x_train, x_test, y_train, y_test = train_test_split(data, labels, train_size=0.8) n_categories = len(reber.avaliable_letters) + 1 # +1 for zero paddings n_time_steps = x_train.shape[1] network = algorithms.RMSProp( [ layers.Input(n_time_steps), # shape: (n_samples, n_time_steps) layers.Embedding(n_categories, 10), # shape: (n_samples, n_time_steps, 10) # unroll_scan - speed up calculation for short sequences layers.GRU(20, unroll_scan=True), # shape: (n_samples, 20) layers.Sigmoid(1), # shape: (n_samples, 1) ], step=0.05, verbose=True, batch_size=64, error='binary_crossentropy', ) network.train(x_train, y_train, x_test, y_test, epochs=20)
x_test_cat = convert_categorical.transform(x_test[:, :3]) x_test_num = only_numerical(x_test) network = algorithms.Momentum( [ [ [ # 3 categorical inputs layers.Input(3), # Train embedding matrix for categorical inputs. # It has 18 different unique categories (6 categories # per each of the 3 columns). Next layer projects each # category into 4 dimensional space. Output shape from # the layer should be: (batch_size, 3, 4) layers.Embedding(n_unique_categories, 4), # Reshape (batch_size, 3, 4) to (batch_size, 12) layers.Reshape(), ], [ # 17 numerical inputs layers.Input(17), ] ], # Concatenate (batch_size, 12) and (batch_size, 17) # into one matrix with shape (batch_size, 29) layers.Concatenate(), layers.Relu(128), layers.Relu(32) > layers.Dropout(0.5),
def test_embedding_output_shape(self): layer = layers.Embedding(5, 2) self.assertEqual(layer.output_shape, None)
def test_embedding_layer_repr(self): layer = layers.Embedding(5, 2) self.assertEqual("Embedding(5, 2)", str(layer))
def test_embedding_layer_repr(self): self.assertEqual(str(layers.Embedding(5, 2)), ("Embedding(5, 2, weight=HeNormal(gain=1.0), " "name='embedding-1')"))