示例#1
0
  def test_rnn_with_cells(self):
    gru_cell1 = tdl.ScopedLayer(tf.contrib.rnn.GRUCell(num_units=16), 'gru1')
    gru_cell2 = tdl.ScopedLayer(tf.contrib.rnn.GRUCell(num_units=16), 'gru2')

    with tf.variable_scope('gru3') as vscope:
      gru_cell3 = tdl.ScopedLayer(tf.contrib.rnn.GRUCell(num_units=16), vscope)

    lstm_cell = tdl.ScopedLayer(
        tf.contrib.rnn.BasicLSTMCell(num_units=16), 'lstm')

    gru1 = (tdb.InputTransform(lambda s: [ord(c) for c in s]) >>
            tdb.Map(tdb.Scalar('int32') >>
                    tdb.Function(tdl.Embedding(128, 8))) >>
            tdb.RNN(gru_cell1))

    gru2 = (tdb.InputTransform(lambda s: [ord(c) for c in s]) >>
            tdb.Map(tdb.Scalar('int32') >>
                    tdb.Function(tdl.Embedding(128, 8))) >>
            tdb.RNN(gru_cell2, initial_state=tf.ones(16)))

    gru3 = (tdb.InputTransform(lambda s: [ord(c) for c in s]) >>
            tdb.Map(tdb.Scalar('int32') >>
                    tdb.Function(tdl.Embedding(128, 8))) >>
            tdb.RNN(gru_cell3, initial_state=tdb.FromTensor(tf.ones(16))))

    lstm = (tdb.InputTransform(lambda s: [ord(c) for c in s]) >>
            tdb.Map(tdb.Scalar('int32') >>
                    tdb.Function(tdl.Embedding(128, 8))) >>
            tdb.RNN(lstm_cell))

    with self.test_session():
      gru1.eval('abcde')
      gru2.eval('ABCDE')
      gru3.eval('vghj')
      lstm.eval('123abc')
示例#2
0
 def test_one_of_mixed_input_type(self):
   block = (tdb.Identity(), tdb.Scalar('int32')) >> tdb.OneOf(
       key_fn=tdb.GetItem(0),
       case_blocks=(tdb.Function(tf.square), tdb.Function(tf.negative)),
       pre_block=tdb.GetItem(1))
   self.assertBuilds(4, block, (0, 2))
   self.assertBuilds(-2, block, (1, 2))
示例#3
0
 def test_forward_declaration_orphaned_nested(self):
   fwd1 = tdb.ForwardDeclaration(tdt.VoidType(), tdt.TensorType([]))
   fwd2 = tdb.ForwardDeclaration(tdt.SequenceType(tdt.TensorType([])),
                                 tdt.TensorType([]))
   b = tdb.Map(tdb.Scalar()) >> fwd2() >> tdb.Function(tf.negative)
   fwd2.resolve_to(tdb.Fold(tdb.Function(tf.add), fwd1()))
   fwd1.resolve_to(tdb.FromTensor(tf.ones([])))
   self.assertBuilds(-8., b, [3, 4], max_depth=3)
示例#4
0
 def test_reduce_default_zero(self):
   sum_or_ten = (tdb.Map(tdb.Scalar()) >> tdb.Reduce(tdb.Function(tf.add)))
   self.assertBuilds(0.0, sum_or_ten, [], max_depth=0)
   self.assertBuilds(3.0, sum_or_ten, [1.0, 2.0], max_depth=1)
   self.assertBuilds(20.0, sum_or_ten, [2.0, 4.0, 6.0, 8.0], max_depth=2)
   self.assertBuilds(6.0, sum_or_ten, [1.0, 2.0, 3.0], max_depth=2)
   self.assertBuilds(21.0, sum_or_ten, range(7), max_depth=3)
示例#5
0
 def test_composition_void(self):
   c = tdb.Composition()
   with c.scope():
     a = tdb.Scalar().reads(c.input)
     b = tdb.Function(tf.negative).reads(a)
     tdm.Metric('foo').reads(b)
     c.output.reads(a)
   self.assertBuilds((42., {'foo': [-42.]}), c, 42, max_depth=2)
示例#6
0
 def test_fold(self):
   const_ten = np.array(10.0, dtype='float32')
   ten_plus_sum = (tdb.Map(tdb.Scalar()) >>
                   tdb.Fold(tdb.Function(tf.add), const_ten))
   self.assertBuilds(16.0, ten_plus_sum, [1.0, 2.0, 3.0], max_depth=3)
   self.assertBuilds(16.0, ten_plus_sum, [3.0, 2.0, 1.0], max_depth=3)
   self.assertBuilds(20.0, ten_plus_sum, [1.0, 2.0, 3.0, 4.0], max_depth=4)
   self.assertBuilds(20.0, ten_plus_sum, [4.0, 3.0, 2.0, 1.0], max_depth=4)
示例#7
0
 def test_reduce(self):
   const_ten = np.array(10.0, dtype='float32')
   sum_or_ten = (tdb.Map(tdb.Scalar()) >>
                 tdb.Reduce(tdb.Function(tf.add), const_ten))
   self.assertBuilds(10.0, sum_or_ten, [], max_depth=0)
   self.assertBuilds(3.0, sum_or_ten, [1.0, 2.0], max_depth=1)
   self.assertBuilds(20.0, sum_or_ten, [2.0, 4.0, 6.0, 8.0], max_depth=2)
   self.assertBuilds(6.0, sum_or_ten, [1.0, 2.0, 3.0], max_depth=2)
   self.assertBuilds(21.0, sum_or_ten, range(7), max_depth=3)
示例#8
0
 def test_composition_nested(self):
   fn1 = times_scalar_block(2.0)
   fn2 = times_scalar_block(3.0)
   c = tdb.Composition([fn1, fn2])
   c.connect(c.input, fn1)
   c.connect(c.input, fn2)
   c.connect((fn1, fn2), c.output)
   c2 = tdb.Scalar() >> c >> tdb.Function(tf.add)
   self.assertBuilds(5.0, c2, 1.0, max_depth=2)
示例#9
0
  def test_composition_diamond_with_block(self):
    # out = in*2 + in*3
    c = tdb.Composition()
    with c.scope():
      scalar = tdb.Scalar().reads(c.input)
      fn1 = times_scalar_block(2.0).reads(scalar)
      fn2 = times_scalar_block(3.0).reads(scalar)
      c.output.reads(tdb.Function(tf.add).reads(fn1, fn2))

    self.assertBuilds(25., c, 5, max_depth=2)
示例#10
0
 def test_composition_nested_with_block(self):
   c1 = tdb.Composition()
   with c1.scope():
     scalar = tdb.Scalar().reads(c1.input)
     c2 = tdb.Composition().reads(scalar)
     with c2.scope():
       fn1 = times_scalar_block(2.0).reads(c2.input)
       fn2 = times_scalar_block(3.0).reads(c2.input)
       c2.output.reads(fn1, fn2)
     c1.output.reads(tdb.Function(tf.add).reads(c2))
   self.assertBuilds(5.0, c1, 1.0, max_depth=2)
示例#11
0
  def test_rnn(self):
    # We have to expand_dims to broadcast x over the batch.
    def f(x, st):
      return (tf.multiply(x, x), tf.add(st, tf.expand_dims(x, 1)))

    intup = (tdb.Map(tdb.Scalar()), tdb.Vector(2))
    block = intup >> tdb.RNN(tdb.Function(f), initial_state_from_input=True)
    self.assertBuilds(([], [0.0, 0.0]), block,
                      ([], [0.0, 0.0]), max_depth=0)
    self.assertBuilds(([1.0, 4.0, 9.0, 16.0], [10.0, 10.0]), block,
                      ([1.0, 2.0, 3.0, 4.0], [0.0, 0.0]), max_depth=4)
    self.assertBuilds(([1.0, 4.0, 9.0, 16.0], [10.0, 10.0]), block,
                      ([1.0, 2.0, 3.0, 4.0], [0.0, 0.0]), max_depth=4)
示例#12
0
  def test_record_composition(self):
    d = tdb.Record({'a': tdb.Scalar(), 'b': tdb.Scalar()})
    fn1 = times_scalar_block(2.0)
    fn2 = times_scalar_block(3.0)
    fn3 = tdb.Function(tf.add)

    c = tdb.Composition([d, fn1, fn2, fn3])
    c.connect(c.input, d)
    c.connect(d['a'], fn1)
    c.connect(d['b'], fn2)
    c.connect((fn1, fn2), fn3)
    c.connect(fn3, c.output)

    self.assertBuilds(17.0, c, {'a': 1.0, 'b': 5.0}, max_depth=2)
示例#13
0
  def test_composition_toposort(self):
    fn0 = tdb.Scalar()
    fn1 = times_scalar_block(2.0)
    fn2 = times_scalar_block(3.0)
    fn3 = times_scalar_block(1.0)
    fn4 = tdb.Function(tf.add)

    c = tdb.Composition([fn4, fn3, fn0, fn2, fn1])
    c.connect(c.input, fn0)
    c.connect(fn0, fn1)
    c.connect(fn0, fn2)
    c.connect(fn2, fn3)
    c.connect((fn1, fn3), fn4)
    c.connect(fn4, c.output)
    self.assertBuilds(5.0, c, 1.0, max_depth=3)
示例#14
0
 def test_run_no_key_fn(self):
   p = plan.InferPlan()
   p.compiler = block_compiler.Compiler.create(
       blocks.Scalar() >> blocks.Function(tf.negative))
   p.logdir = self.get_temp_dir()
   p.examples = xrange(5)
   p.outputs = p.compiler.output_tensors
   results = []
   p.results_fn = results.append
   p.batch_size = 3
   p.chunk_size = 2
   with self.test_session() as sess:
     p.run(session=sess)
   self.assertEqual(1, len(results))
   self.assertEqual([(0,), (-1,), (-2,), (-3,), (-4,)], list(results[0]))
示例#15
0
  def test_hierarchical_rnn(self):
    char_cell = tdl.ScopedLayer(
        tf.contrib.rnn.BasicLSTMCell(num_units=16), 'char_cell')
    word_cell = tdl.ScopedLayer(
        tf.contrib.rnn.BasicLSTMCell(num_units=32), 'word_cell')

    char_lstm = (tdb.InputTransform(lambda s: [ord(c) for c in s]) >>
                 tdb.Map(tdb.Scalar('int32') >>
                         tdb.Function(tdl.Embedding(128, 8))) >>
                 tdb.RNN(char_cell))
    word_lstm = (tdb.Map(char_lstm >> tdb.GetItem(1) >> tdb.Concat()) >>
                 tdb.RNN(word_cell))

    with self.test_session():
      word_lstm.eval(['the', 'cat', 'sat', 'on', 'a', 'mat'])
示例#16
0
  def test_composition_diamond(self):
    sc = tdb.Scalar()
    fn1 = times_scalar_block(2.0)
    fn2 = times_scalar_block(3.0)
    fn3 = tdb.Function(tf.add)

    # out = in*2 + in*3
    c = tdb.Composition([sc, fn1, fn2, fn3])
    c.connect(c.input, sc)
    c.connect(sc, fn1)
    c.connect(sc, fn2)
    c.connect((fn1, fn2), fn3)
    c.connect(fn3, c.output)

    self.assertBuilds(25., c, 5, max_depth=2)
示例#17
0
  def test_forward_declarations(self):
    # Define a simple expression data structure
    nlit = lambda x: {'op': 'lit', 'val': x}
    nadd = lambda x, y: {'op': 'add', 'left': x, 'right': y}
    nexpr = nadd(nadd(nlit(3.0), nlit(5.0)), nlit(2.0))

    # Define a recursive block using forward declarations
    expr_fwd = tdb.ForwardDeclaration(tdt.PyObjectType(),
                                      tdt.TensorType((), 'float32'))
    lit_case = tdb.GetItem('val') >> tdb.Scalar()
    add_case = (tdb.Record({'left': expr_fwd(), 'right': expr_fwd()})
                >> tdb.Function(tf.add))
    expr = tdb.OneOf(lambda x: x['op'], {'lit': lit_case, 'add': add_case})
    expr_fwd.resolve_to(expr)

    self.assertBuilds(10.0, expr, nexpr, max_depth=2)
示例#18
0
 def create_plan(self, loom_input_tensor):
   p = plan.TrainPlan()
   foo = tf.get_variable('foo', [], tf.float32, tf.constant_initializer(12))
   p.compiler = block_compiler.Compiler.create(
       blocks.Scalar() >> blocks.Function(lambda x: x * foo),
       loom_input_tensor=loom_input_tensor)
   p.losses['foo'] = p.compiler.output_tensors[0]
   p.finalize_stats()
   p.train_op = tf.train.GradientDescentOptimizer(1.0).minimize(
       p.loss_total, global_step=p.global_step)
   p.logdir = self.get_temp_dir()
   p.dev_examples = [2]
   p.is_chief_trainer = True
   p.batch_size = 2
   p.epochs = 3
   p.print_file = six.StringIO()
   return p
示例#19
0
 def test_max_depth(self):
   self.assertEqual(0, tdb.Scalar().max_depth(42))
   block = (tdb.Map(tdb.Scalar()) >>
            tdb.Fold(tdb.Function(tf.add), tf.zeros([])))
   for i in xrange(5):
     self.assertEqual(i, block.max_depth(range(i)))
示例#20
0
 def test_zip_with(self):
   block = ((tdb.Map(tdb.Scalar()), tdb.Map(tdb.Scalar())) >>
            tdb.ZipWith(tdb.Function(tf.add)))
   self.assertBuilds([5., 7., 9.], block, ([1, 2, 3], [4, 5, 6]))
示例#21
0
 def test_broadcast_zip_map(self):
   block = ({'x': tdb.Scalar() >> tdb.Broadcast(),
             'y': tdb.Map(tdb.Scalar())} >> tdb.Zip() >>
            tdb.Map(tdb.Function(tf.add)))
   self.assertBuilds([3., 4., 5.], block, {'x': 2, 'y': [1, 2, 3]})
示例#22
0
 def test_broadcast_map(self):
   block = tdb.Scalar() >> tdb.Broadcast() >> tdb.Map(
       tdb.Function(tf.negative))
   self.assertBuilds(itertools.repeat(-42.), block, 42)
示例#23
0
 def test_optional_default_none_type_inference(self):
   child = tdb.Scalar() >> tdb.Function(tf.negative)
   block = tdb.Optional(child)
   self.assertEqual(child.output_type, None)
   child.set_output_type([])
   self.assertEqual(block.output_type, tdt.TensorType([]))
示例#24
0
 def test_map(self):
   block = tdb.Map(tdb.Scalar() >> tdb.Function(tf.abs))
   self.assertBuilds([], block, [], max_depth=0)
   self.assertBuilds([1.], block, [-1])
   self.assertBuilds([1., 2., 3.], block, [-1, -2, -3])
示例#25
0
 def test_one_of(self):
   block = tdb.OneOf(lambda x: x > 0,
                     {True: tdb.Scalar(),
                      False: tdb.Scalar() >> tdb.Function(tf.negative)})
   self.assertBuildsConst(3., block, 3)
   self.assertBuildsConst(3., block, -3)
示例#26
0
 def test_input_transform(self):
   block = tdb.Map(tdb.InputTransform(lambda x: 1 + ord(x) - ord('a')) >>
                   tdb.Scalar('int32') >> tdb.Function(tf.negative))
   self.assertBuilds([-1, -2, -3, -4], block, 'abcd')
示例#27
0
 def test_tuple_of_seq(self):
   block = tdb.AllOf(
       tdb.Map(tdb.Scalar() >> tdb.Function(tf.negative)),
       tdb.Map(tdb.Scalar() >> tdb.Function(tf.identity)))
   self.assertBuilds(([], []), block, [], max_depth=0)
   self.assertBuilds(([-1., -2.], [1., 2.]), block, [1, 2])
示例#28
0
 def test_map_map(self):
   block = tdb.Map(tdb.Map(tdb.Scalar() >> tdb.Function(tf.abs)))
   self.assertBuilds([[]], block, [[]], max_depth=0)
   self.assertBuilds([[1., 2., 3.], [4., 5.], [6.], []], block,
                     [[-1, -2, -3], [-4, -5], [-6], []])
示例#29
0
 def test_map_tuple(self):
   block = (tdb.Scalar(), tdb.Scalar()) >> tdb.Map(tdb.Function(tf.negative))
   self.assertBuilds([-3., -4.], block, (3, 4))
示例#30
0
 def test_fold_tuple(self):
   block = ((tdb.Scalar(), tdb.Scalar()) >>
            tdb.Fold(tdb.Function(tf.add), tf.ones([])))
   self.assertBuilds(6., block, (2, 3), max_depth=2)