def testConcatWithSequence(self):
    x = prettytensor.wrap_sequence([self.input_data, self.input_data * 2])
    x = x.concat(0)
    result = self.RunTensor(x)

    testing.assert_allclose(result, numpy.concatenate([self.input_data,
                                                       self.input_data * 2]))

    x = prettytensor.wrap_sequence([self.input_data])
    x = x.concat(0)
    result = self.RunTensor(x)

    testing.assert_allclose(result, self.input_data)

    x = prettytensor.wrap_sequence([self.input_data])
    x = x.concat(0, [self.input_data * 2])
    result = self.RunTensor(x)

    testing.assert_allclose(result, numpy.concatenate([self.input_data,
                                                       self.input_data * 2]))
예제 #2
0
  def testConcatWithSequence(self):
    x = prettytensor.wrap_sequence([self.input_data, self.input_data * 2])
    x = x.concat(0)
    result = self.RunTensor(x)

    testing.assert_allclose(result, numpy.concatenate([self.input_data,
                                                       self.input_data * 2]))

    x = prettytensor.wrap_sequence([self.input_data])
    x = x.concat(0)
    result = self.RunTensor(x)

    testing.assert_allclose(result, self.input_data)

    x = prettytensor.wrap_sequence([self.input_data])
    x = x.concat(0, [self.input_data * 2])
    result = self.RunTensor(x)

    testing.assert_allclose(result, numpy.concatenate([self.input_data,
                                                       self.input_data * 2]))
예제 #3
0
    def testMathOperators(self):
        operators = [operator.add, operator.sub, operator.mul]

        input2 = self.input * 4
        sequence_input = prettytensor.wrap_sequence([self.input, input2])

        # Test reverse ops
        for op in operators:
            print(op.__name__)
            t1 = op(2., self.input)
            t2 = op(2., self.input_layer)
            seq1 = op([2., 1.], sequence_input)
            seq2 = op(2., sequence_input)

            # Used to validate the sequence.
            t3 = op(1., input2)
            t4 = op(2., input2)

            r1 = self.RunTensor(t1)
            r2 = self.RunTensor(t2)
            r3 = self.RunTensor(t3)
            r4 = self.RunTensor(t4)
            seq_r1 = self.RunTensor(seq1)
            seq_r2 = self.RunTensor(seq2)

            self.assertTrue(isinstance(t2, pretty_tensor_class.Layer))
            testing.assert_allclose(r1, r2, rtol=TOLERANCE)

            testing.assert_allclose(seq_r1[0], r2, rtol=TOLERANCE)
            testing.assert_allclose(seq_r1[1], r3, rtol=TOLERANCE)
            testing.assert_allclose(seq_r2[0], r2, rtol=TOLERANCE)
            testing.assert_allclose(seq_r2[1], r4, rtol=TOLERANCE)

        # Test forward ops
        for op in operators:
            t1 = op(self.input, 2.)
            t2 = op(self.input_layer, 2.)
            seq1 = op(sequence_input, [2., 1.])
            seq2 = op(sequence_input, 2.)

            # Used to validate the sequence.
            t3 = op(input2, 1.)
            t4 = op(input2, 2.)

            r1 = self.RunTensor(t1)
            r2 = self.RunTensor(t2)
            r3 = self.RunTensor(t3)
            r4 = self.RunTensor(t4)
            seq_r1 = self.RunTensor(seq1)
            seq_r2 = self.RunTensor(seq2)

            self.assertTrue(isinstance(t2, pretty_tensor_class.Layer))
            testing.assert_allclose(r1,
                                    r2,
                                    rtol=TOLERANCE,
                                    err_msg='Op: %s' % op.__name__)

            testing.assert_allclose(seq_r1[0], r2, rtol=TOLERANCE)
            testing.assert_allclose(seq_r1[1], r3, rtol=TOLERANCE)
            testing.assert_allclose(seq_r2[0], r2, rtol=TOLERANCE)
            testing.assert_allclose(seq_r2[1], r4, rtol=TOLERANCE)

        operators.extend([operator.truediv])
        for op in operators:
            t1 = op(self.input, self.input_layer)
            t2 = op(self.input_layer, self.input)
            r1 = self.RunTensor(t1)
            r2 = self.RunTensor(t2)

            self.assertFalse(isinstance(t1, pretty_tensor_class.Layer))
            self.assertTrue(isinstance(t2, pretty_tensor_class.Layer))
            testing.assert_allclose(r1,
                                    r2,
                                    rtol=TOLERANCE,
                                    err_msg='Op: %s' % op.__name__)

        unary = [operator.neg, operator.abs]
        for op in unary:
            t1 = op(self.input)
            t2 = op(self.input_layer)
            r1 = self.RunTensor(t1)
            r2 = self.RunTensor(t2)

            seq = op(sequence_input)
            seq_r = self.RunTensor(seq)
            t3 = op(input2)
            r3 = self.RunTensor(t3)

            self.assertTrue(isinstance(t2, pretty_tensor_class.Layer))
            testing.assert_allclose(r1,
                                    r2,
                                    rtol=TOLERANCE,
                                    err_msg='Op: %s' % op.__name__)
            testing.assert_allclose(r1,
                                    seq_r[0],
                                    rtol=TOLERANCE,
                                    err_msg='Op: %s' % op.__name__)
            testing.assert_allclose(r3,
                                    seq_r[1],
                                    rtol=TOLERANCE,
                                    err_msg='Op: %s' % op.__name__)
예제 #4
0
  def testArbitraryBatchSizeLstm(self):
    # Tests whether the LSTM / Bookkeeper function when batch_size is not
    # specified at graph creation time (i.e., None).
    super(self.__class__, self).SetBookkeeper(
        prettytensor.bookkeeper_for_new_graph())

    # Build a graph. Specify None for the batch_size dimension.
    placeholder = tf.placeholder(tf.float32, [None, 1])
    input_pt = prettytensor.wrap_sequence([placeholder])
    output, _ = (input_pt
                 .sequence_lstm(4)
                 .squash_sequence()
                 .softmax_classifier(2))

    self.sess.run(tf.initialize_all_variables())

    # Use RecurrentRunner for state saving and managing feeds.
    recurrent_runner = recurrent_networks.RecurrentRunner(batch_size=1)

    # Run with a batch size of 1 for 10 steps, save output for reference.
    out_orig = []
    for t in xrange(10):
      outs = recurrent_runner.run(
          [output.name],
          {placeholder.name: numpy.array([[1.2]])},
          sess=self.sess)
      out = outs[0]
      self.assertEqual(1, len(out))
      self.assertEqual(2, len(out[0]))
      out_orig.append(out[0])

    # Test the reset functionality - after a reset, the results must be
    # identical to what we just got above.
    recurrent_runner.reset()
    for t in xrange(10):
      outs = recurrent_runner.run(
          [output.name],
          {placeholder.name: numpy.array([[1.2]])},
          sess=self.sess)
      out = outs[0]
      self.assertEqual(1, len(out))
      self.assertEqual(2, len(out[0]))
      testing.assert_allclose(out[0], out_orig[t])

    # Test whether the recurrent runner detects changes to the default graph.
    # It should raise an Assertion because RecurrentRunner's state saver
    # information (collected during __init__) is not valid anymore.
    with tf.Graph().as_default():
      placeholder2 = tf.placeholder(tf.float32, [None, 1])
      input_pt2 = prettytensor.wrap_sequence([placeholder2])
      output2, _ = (input_pt2
                    .sequence_lstm(4)
                    .squash_sequence()
                    .softmax_classifier(2))
      self.assertRaises(ValueError,
                        recurrent_runner.run,
                        [output2.name], None, self.sess)

    # Run with a batch size of 3; first and third input are identical and must
    # yield identical output, and the same output as in the single batch run
    # above (up to floating point rounding errors).
    recurrent_runner = recurrent_networks.RecurrentRunner(batch_size=3)
    for t in xrange(10):
      outs = recurrent_runner.run(
          [output.name],
          {placeholder.name: numpy.array([[1.2], [3.4], [1.2]])},
          sess=self.sess)
      out = outs[0]
      self.assertEqual(3, len(out))
      self.assertEqual(2, len(out[0]))
      testing.assert_allclose(out[0], out[2], rtol=TOLERANCE)
      testing.assert_allclose(out[0], out_orig[t], rtol=TOLERANCE)
      self.assertFalse((out[0] == out[1]).all())
예제 #5
0
    def performTestArbitraryBatchSizeRnn(self, cell_type):
        # Tests whether LSTM / GRU / Bookkeeper function when batch_size is not
        # specified at graph creation time (i.e., None).
        self.assertTrue(cell_type == 'lstm' or cell_type == 'gru')
        super(self.__class__,
              self).SetBookkeeper(prettytensor.bookkeeper_for_new_graph())

        # Build a graph. Specify None for the batch_size dimension.
        placeholder = tf.placeholder(tf.float32, [None, 1])
        input_pt = prettytensor.wrap_sequence([placeholder])
        if cell_type == 'lstm':
            output, _ = (input_pt.sequence_lstm(
                4).squash_sequence().softmax_classifier(2))
        elif cell_type == 'gru':
            output, _ = (input_pt.sequence_gru(
                4).squash_sequence().softmax_classifier(2))

        self.sess.run(tf.global_variables_initializer())

        # Use RecurrentRunner for state saving and managing feeds.
        recurrent_runner = recurrent_networks.RecurrentRunner(batch_size=1)

        # Run with a batch size of 1 for 10 steps, save output for reference.
        out_orig = []
        for t in xrange(10):
            outs = recurrent_runner.run(
                [output.name], {placeholder.name: numpy.array([[1.2]])},
                sess=self.sess)
            out = outs[0]
            self.assertEqual(1, len(out))
            self.assertEqual(2, len(out[0]))
            out_orig.append(out[0])

        # Test the reset functionality - after a reset, the results must be
        # identical to what we just got above.
        recurrent_runner.reset()
        for t in xrange(10):
            outs = recurrent_runner.run(
                [output.name], {placeholder.name: numpy.array([[1.2]])},
                sess=self.sess)
            out = outs[0]
            self.assertEqual(1, len(out))
            self.assertEqual(2, len(out[0]))
            testing.assert_allclose(out[0], out_orig[t])

        # Test whether the recurrent runner detects changes to the default graph.
        # It should raise an Assertion because RecurrentRunner's state saver
        # information (collected during __init__) is not valid anymore.
        with tf.Graph().as_default():
            placeholder2 = tf.placeholder(tf.float32, [None, 1])
            input_pt2 = prettytensor.wrap_sequence([placeholder2])
            if cell_type == 'lstm':
                output2, _ = (input_pt2.sequence_lstm(
                    4).squash_sequence().softmax_classifier(2))
            elif cell_type == 'gru':
                output2, _ = (input_pt2.sequence_gru(
                    4).squash_sequence().softmax_classifier(2))
            self.assertRaises(ValueError, recurrent_runner.run, [output2.name],
                              None, self.sess)

        # Run with a batch size of 3; first and third input are identical and must
        # yield identical output, and the same output as in the single batch run
        # above (up to floating point rounding errors).
        recurrent_runner = recurrent_networks.RecurrentRunner(batch_size=3)
        for t in xrange(10):
            outs = recurrent_runner.run(
                [output.name],
                {placeholder.name: numpy.array([[1.2], [3.4], [1.2]])},
                sess=self.sess)
            out = outs[0]
            self.assertEqual(3, len(out))
            self.assertEqual(2, len(out[0]))
            testing.assert_allclose(out[0], out[2], rtol=TOLERANCE)
            testing.assert_allclose(out[0], out_orig[t], rtol=TOLERANCE)
            # Sanity check to protect against trivial outputs that might hide errors.
            # Need to avoid checking after t = 2 since untrained GRUs have a
            # tendency to converge to large state values, leading to outputs like
            # 1.0, 0.0.
            if cell_type == 'gru' and t > 2:
                continue
            self.assertFalse((out[0] == out[1]).all())
예제 #6
0
  def testMathOperators(self):
    operators = [operator.add, operator.sub, operator.mul]

    input2 = self.input * 4
    sequence_input = prettytensor.wrap_sequence([self.input, input2])

    # Test reverse ops
    for op in operators:
      t1 = op(2., self.input)
      t2 = op(2., self.input_layer)
      seq1 = op([2., 1.], sequence_input)
      seq2 = op(2., sequence_input)

      # Used to validate the sequence.
      t3 = op(1., input2)
      t4 = op(2., input2)

      r1 = self.RunTensor(t1)
      r2 = self.RunTensor(t2)
      r3 = self.RunTensor(t3)
      r4 = self.RunTensor(t4)
      seq_r1 = self.RunTensor(seq1)
      seq_r2 = self.RunTensor(seq2)

      self.assertTrue(isinstance(t2, pretty_tensor_class.Layer))
      testing.assert_allclose(r1, r2, rtol=TOLERANCE)

      testing.assert_allclose(seq_r1[0], r2, rtol=TOLERANCE)
      testing.assert_allclose(seq_r1[1], r3, rtol=TOLERANCE)
      testing.assert_allclose(seq_r2[0], r2, rtol=TOLERANCE)
      testing.assert_allclose(seq_r2[1], r4, rtol=TOLERANCE)

    # Test forward ops
    for op in operators:
      t1 = op(self.input, 2.)
      t2 = op(self.input_layer, 2.)
      seq1 = op(sequence_input, [2., 1.])
      seq2 = op(sequence_input, 2.)

      # Used to validate the sequence.
      t3 = op(input2, 1.)
      t4 = op(input2, 2.)

      r1 = self.RunTensor(t1)
      r2 = self.RunTensor(t2)
      r3 = self.RunTensor(t3)
      r4 = self.RunTensor(t4)
      seq_r1 = self.RunTensor(seq1)
      seq_r2 = self.RunTensor(seq2)

      self.assertTrue(isinstance(t2, pretty_tensor_class.Layer))
      testing.assert_allclose(r1,
                              r2,
                              rtol=TOLERANCE,
                              err_msg='Op: %s' % op.__name__)

      testing.assert_allclose(seq_r1[0], r2, rtol=TOLERANCE)
      testing.assert_allclose(seq_r1[1], r3, rtol=TOLERANCE)
      testing.assert_allclose(seq_r2[0], r2, rtol=TOLERANCE)
      testing.assert_allclose(seq_r2[1], r4, rtol=TOLERANCE)

    operators.extend([operator.truediv])
    for op in operators:
      t1 = op(self.input, self.input_layer)
      t2 = op(self.input_layer, self.input)
      r1 = self.RunTensor(t1)
      r2 = self.RunTensor(t2)

      self.assertFalse(isinstance(t1, pretty_tensor_class.Layer))
      self.assertTrue(isinstance(t2, pretty_tensor_class.Layer))
      testing.assert_allclose(r1,
                              r2,
                              rtol=TOLERANCE,
                              err_msg='Op: %s' % op.__name__)

    unary = [operator.neg, operator.abs]
    for op in unary:
      t1 = op(self.input)
      t2 = op(self.input_layer)
      r1 = self.RunTensor(t1)
      r2 = self.RunTensor(t2)

      seq = op(sequence_input)
      seq_r = self.RunTensor(seq)
      t3 = op(input2)
      r3 = self.RunTensor(t3)

      self.assertTrue(isinstance(t2, pretty_tensor_class.Layer))
      testing.assert_allclose(r1,
                              r2,
                              rtol=TOLERANCE,
                              err_msg='Op: %s' % op.__name__)
      testing.assert_allclose(r1,
                              seq_r[0],
                              rtol=TOLERANCE,
                              err_msg='Op: %s' % op.__name__)
      testing.assert_allclose(r3,
                              seq_r[1],
                              rtol=TOLERANCE,
                              err_msg='Op: %s' % op.__name__)