def test_add_lstm_cell(self): ''' Add LSTM cell. ''' net = Network('LSTM') net.set_input_layer(InputLayer(512, 1)) c, h = nns.add_lstm_cell(net, 'cell0', 512, net.INPUT_LAYER_KEY, net.INPUT_LAYER_KEY, net.INPUT_LAYER_KEY) c, h = nns.add_lstm_cell(net, 'cell1', 512, net.INPUT_LAYER_KEY, c, h) c, h = nns.add_lstm_cell(net, 'cell2', 512, net.INPUT_LAYER_KEY, c, h) num_weights = 0 for layer in net: try: num_weights += net[layer].total_filter_size() except AttributeError: pass self.assertEqual(num_weights, 512 * 512 * 2 * 4 * 3)
def test_add_lstm_cell_not_in(self): ''' Add LSTM cell input not in. ''' net = Network('LSTM') net.set_input_layer(InputLayer(512, 1)) with self.assertRaisesRegex(ValueError, 'add_lstm_cell: .*in.*'): _ = nns.add_lstm_cell(net, 'cell0', 512, 'a', net.INPUT_LAYER_KEY, net.INPUT_LAYER_KEY) net = Network('LSTM') net.set_input_layer(InputLayer(512, 1)) with self.assertRaisesRegex(ValueError, 'add_lstm_cell: .*in.*'): _ = nns.add_lstm_cell(net, 'cell0', 512, net.INPUT_LAYER_KEY, 'a', net.INPUT_LAYER_KEY) net = Network('LSTM') net.set_input_layer(InputLayer(512, 1)) with self.assertRaisesRegex(ValueError, 'add_lstm_cell: .*in.*'): _ = nns.add_lstm_cell(net, 'cell0', 512, net.INPUT_LAYER_KEY, net.INPUT_LAYER_KEY, 'a')
from nn_dataflow.core import Network from nn_dataflow.core import InputLayer, EltwiseLayer from nn_dataflow.nns import add_lstm_cell ''' LSTM from GNMT. Sutskever, Vinyals, Le, Google, NIPS 2014 ''' NN = Network('GNMT') NN.set_input_layer(InputLayer(1000, 1)) NL = 4 # Word embedding is a simple lookup. # Exclude or ignore embedding processing. WE = NN.INPUT_LAYER_KEY # layered LSTM. X = WE for l in range(NL): cell = 'cell_l{}'.format(l) C, H = add_lstm_cell(NN, cell, 1000, X) X = H # log(p), softmax. NN.add('Wd', EltwiseLayer(1000, 1, 1), prevs=(X, ))
def test_add_lstm_cell_invalid_type(self): ''' Add LSTM cell with invalid type. ''' with self.assertRaisesRegexp(TypeError, 'add_lstm_cell: .*network.*'): _ = nns.add_lstm_cell(InputLayer(512, 1), 'cell0', 512, None, None, None)
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details. You should have received a copy of the Modified BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. """ from nn_dataflow.core import Network from nn_dataflow.core import InputLayer, EltwiseLayer from nn_dataflow.nns import add_lstm_cell ''' LSTM from Show and Tell. Vinyals et al., Google, CVPR 2015 ''' NN = Network('ShowTell') NN.set_input_layer(InputLayer(512, 1)) # Word embedding is a simple lookup. # Exclude or ignore embedding processing. WE = NN.INPUT_LAYER_KEY # LSTM. C, H = add_lstm_cell(NN, 'cell', 512, WE) # log(p), softmax. NN.add('Wd', EltwiseLayer(512, 1, 1), prevs=(H, ))
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details. You should have received a copy of the Modified BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. """ from nn_dataflow.core import Network from nn_dataflow.core import InputLayer, FCLayer from nn_dataflow.nns import add_lstm_cell ''' LSTM for phoneme classification. Graves and Schmidhuber, 2005 ''' NN = Network('PHONEME') NN.set_input_layer(InputLayer(26, 1)) # Input. NN.add('We', FCLayer(26, 140), prevs=(NN.INPUT_LAYER_KEY, )) # LSTM. C, H = add_lstm_cell(NN, 'cell', 140, 'We') # Output. NN.add('Wd', FCLayer(140, 61), prevs=(H, ))