def testNNMultipleInputs(self): nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=tf.tanh) x = [tf.placeholder(dtype=tf.float32, shape=[5, 7]), tf.placeholder(dtype=tf.float32, shape=[5, 3]), tf.placeholder(dtype=tf.float32, shape=[5, 5])] y = nn(*x) xs = self.CheckNN(y, nn, 'Tanh') self.assertEqual(len(x), len(xs)) for u, v in zip(x, xs): self.assertIs(u, v)
def __init__(self, depth, bias=LSTMBiasInit, initializer=block_util.RsqrtInitializer(), name=None): super(LSTM, self).__init__([depth], name) with self._BlockScope(): self._depth = depth self._nn = blocks_std.NN( 4 * depth, bias=bias, act=None, initializer=initializer) self._hidden_linear = blocks_std.Linear( 4 * depth, initializer=initializer)
def testNNWithBiasWithAct(self): nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=tf.square) x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) y = nn(x) self.assertIs(x, self.CheckNN(y, nn, 'Square')[0])
def testNNWithoutBiasWithAct(self): nn = blocks_std.NN(10, act=tf.nn.relu, bias=None) x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) y = nn(x) self.assertIs(x, self.CheckNN(y, nn, 'Relu')[0])
def testNNWithoutActWithoutBias(self): nn = blocks_std.NN(10, act=None, bias=None) x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) y = nn(x) self.assertIs(x, self.CheckNN(y, nn)[0])