예제 #1
0
    def testTransformerFeedForwardLayer(self):
        with self.session(use_gpu=True) as sess:
            tf.set_random_seed(3980847392)
            inputs = tf.random_normal([5, 2, 3], seed=948387483)
            paddings = tf.zeros([5, 2])
            p = layers_with_attention.TransformerFeedForwardLayer.Params()
            p.name = 'transformer_fflayer'
            p.input_dim = 3
            p.hidden_dim = 7
            transformer_fflayer = layers_with_attention.TransformerFeedForwardLayer(
                p)

            h = transformer_fflayer.FPropDefaultTheta(inputs, paddings)
            tf.global_variables_initializer().run()
            actual_layer_output = sess.run(h)
            # pylint: disable=bad-whitespace
            # pyformat: disable
            expected_output = [[[-0.88366592, -0.05049637, 0.01003706],
                                [-0.10550675, 1.68050027, 2.29110384]],
                               [[-1.30083609, -0.40521634, 0.1911681],
                                [1.2597878, 1.45850968, 1.58734488]],
                               [[0.10373873, -0.2716777, 0.2314173],
                                [0.46293864, -0.06359965, 1.20189023]],
                               [[0.3673597, -0.1691664, 0.78656065],
                                [-1.51081395, -0.70281881, -0.9093715]],
                               [[-1.04800868, -0.70610946, -0.35321558],
                                [-1.92480004, 0.08361804, 0.62713993]]]
            # pyformat: enable
            # pylint: enable=bad-whitespace
            print(np.array_repr(actual_layer_output))
            self.assertAllClose(actual_layer_output, expected_output)
예제 #2
0
  def testTransformerFeedForwardLayerSpecOutDim(self):
    with self.session(use_gpu=True) as sess:
      tf.set_random_seed(3980847392)
      inputs = tf.random_normal([5, 2, 3], seed=948387483)
      paddings = tf.zeros([5, 2])
      p = layers_with_attention.TransformerFeedForwardLayer.Params()
      p.name = 'transformer_fflayer'
      p.input_dim = 3
      p.output_dim = 5
      p.hidden_dim = 7
      transformer_fflayer = layers_with_attention.TransformerFeedForwardLayer(p)

      h = transformer_fflayer.FPropDefaultTheta(inputs, paddings)
      tf.global_variables_initializer().run()
      actual_layer_output = sess.run(h)
      # pylint: disable=bad-whitespace
      # pyformat: disable
      expected_output = [
          [[ 1.42697251,  0.79269135, -0.85500956, -0.8122285 , -1.56555367],
           [-1.7876718 ,  0.26025945, -3.18244219,  1.34756351,  0.25739765]],
          [[ 1.27962363,  0.88677615, -1.23556185, -1.06855559, -1.27293301],
           [ 0.89336467,  2.46229172,  0.11302143,  1.19385004, -2.37805009]],
          [[ 2.80146003, -0.66912627,  1.50160134, -2.30645609, -1.18872762],
           [ 1.61967182, -0.51639485,  0.24441491, -1.0871532 , -0.95539457]],
          [[ 2.03333473, -0.78205228,  0.71245927, -1.63276744, -0.91654319],
           [ 1.54542768, -0.30343491,  0.10666496, -1.67965126, -0.15671858]],
          [[ 1.60873222, -1.88402128,  0.79040933, -1.97199082,  0.4778356 ],
           [-0.13516766, -0.42583361, -1.86275542, -1.09650302,  0.83263111]]]
      # pyformat: enable
      # pylint: enable=bad-whitespace
      print(np.array_repr(actual_layer_output))
      self.assertAllClose(actual_layer_output, expected_output)