Пример #1
0
 def test_explicit_dtype(self):
     self.assertConverts('int64', tdt.TensorType((), 'int64'))
     self.assertConverts(['int64'], tdt.TensorType((), 'int64'))
     self.assertConverts([1, 'int64'], tdt.TensorType((1, ), 'int64'))
     self.assertConverts([1, 2, 'int64'], tdt.TensorType((1, 2), 'int64'))
     self.assertConverts([1, 2, 3, 'int64'],
                         tdt.TensorType((1, 2, 3), 'int64'))
Пример #2
0
 def test_forward_declaration_orphaned_nested(self):
   fwd1 = tdb.ForwardDeclaration(tdt.VoidType(), tdt.TensorType([]))
   fwd2 = tdb.ForwardDeclaration(tdt.SequenceType(tdt.TensorType([])),
                                 tdt.TensorType([]))
   b = tdb.Map(tdb.Scalar()) >> fwd2() >> tdb.Function(tf.negative)
   fwd2.resolve_to(tdb.Fold(tdb.Function(tf.add), fwd1()))
   fwd1.resolve_to(tdb.FromTensor(tf.ones([])))
   self.assertBuilds(-8., b, [3, 4], max_depth=3)
Пример #3
0
 def test_conversion(self):
     t = tdt.TupleType(tdt.TensorType(()), tdt.TensorType((1, ), 'int32'))
     self.assertEqual(
         repr(t),
         'TupleType(TensorType((), \'float32\'), TensorType((1,), \'int32\'))'
     )
     self.assertEqual(repr(tdt.TupleType(tdt.TensorType(()))),
                      'TupleType(TensorType((), \'float32\'))')
Пример #4
0
 def test_size(self):
     scalar = tdt.TensorType(())
     vector3 = tdt.TensorType((3, ))
     seq = tdt.SequenceType(scalar)
     self.assertEqual(tdt.TupleType().size, 0)
     self.assertEqual(tdt.TupleType(scalar).size, 1)
     self.assertEqual(tdt.TupleType(scalar, seq, scalar).size, None)
     self.assertEqual(tdt.TupleType(scalar, vector3, scalar).size, 5)
Пример #5
0
 def test_flatten_unflatten(self):
     instance = [([(1, 2), (3, 4)], 5)]
     t = tdt.SequenceType(
         tdt.TupleType(
             tdt.SequenceType(
                 tdt.TupleType(tdt.TensorType([6]), tdt.TensorType([7]))),
             tdt.TensorType([8])))
     self.assertEqual(list(t.terminal_types()),
                      list(tdt.convert_to_type(([6], [7], [8]))))
     flat = t.flatten(instance)
     self.assertEqual(flat, [1, 2, 3, 4, 5])
     self.assertEqual(t.unflatten(iter(flat), [([(0, 0), (0, 0)], 0)]),
                      instance)
Пример #6
0
 def test_flatten_unflatten(self):
     t = tdt.BroadcastSequenceType(tdt.SequenceType(tdt.TensorType([])))
     instance = itertools.repeat([1, 2])
     self.assertEqual(t.flatten(instance), [1, 2])
     unflat = t.unflatten(iter([1, 2]), itertools.repeat([0, 0]))
     self.assertTrue(isinstance(unflat, itertools.repeat))
     self.assertEqual(next(unflat), [1, 2])
Пример #7
0
 def test_function_otype_inference_tuple_to_tuple(self):
   infer = tdb._infer_tf_output_type_from_input_type
   def f(x, y):
     return [tf.matmul(x, y), tf.placeholder('int32', [None, 42])]
   self.assertEqual(
       tdt.TupleType(tdt.TensorType([1, 1]), tdt.TensorType([42], 'int32')),
       infer(f, tdt.TupleType(tdt.TensorType([1, 1]), tdt.TensorType([1, 1]))))
   self.assertEqual(
       tdt.TupleType(tdt.TensorType([3, 5]), tdt.TensorType([42], 'int32')),
       infer(f, tdt.TupleType(tdt.TensorType([3, 2]), tdt.TensorType([2, 5]))))
Пример #8
0
 def test_function_otype_inference_tuple_to_tensor(self):
   infer = tdb._infer_tf_output_type_from_input_type
   f = tf.matmul
   self.assertEqual(tdt.TensorType([1, 1]), infer(
       f, tdt.TupleType(tdt.TensorType([1, 1]), tdt.TensorType([1, 1]))))
   self.assertEqual(tdt.TensorType([3, 5]), infer(
       f, tdt.TupleType(tdt.TensorType([3, 2]), tdt.TensorType([2, 5]))))
Пример #9
0
 def test_pair(self):
     self.assertConvertsPairs(
         [[], [1], [1, 2], [1, 2, 3], 'int64', ['int64'], [1, 'int64'],
          [1, 2, 'int64'], [1, 2, 3, 'int64']], [
              tdt.TensorType(()),
              tdt.TensorType((1, )),
              tdt.TensorType((1, 2)),
              tdt.TensorType((
                  1,
                  2,
                  3,
              )),
              tdt.TensorType((), 'int64'),
              tdt.TensorType((), 'int64'),
              tdt.TensorType((1, ), 'int64'),
              tdt.TensorType((1, 2), 'int64'),
              tdt.TensorType((1, 2, 3), 'int64')
          ])
Пример #10
0
 def test_tuple(self):
     t = tdt.TupleType(tdt.TensorType(()), tdt.TensorType((1, ), 'int32'))
     self.assertEqual(tuple(t), (tdt.TensorType(
         ()), tdt.TensorType((1, ), 'int32')))
     self.assertNotEqual(tuple(t), t)
     self.assertEqual(len(t), 2)
     self.assertEqual(t[0], tdt.TensorType(()))
     self.assertEqual(t[:], t)
     self.assertEqual(t[0:1], tdt.TupleType(tdt.TensorType(())))
Пример #11
0
  def test_forward_declarations(self):
    # Define a simple expression data structure
    nlit = lambda x: {'op': 'lit', 'val': x}
    nadd = lambda x, y: {'op': 'add', 'left': x, 'right': y}
    nexpr = nadd(nadd(nlit(3.0), nlit(5.0)), nlit(2.0))

    # Define a recursive block using forward declarations
    expr_fwd = tdb.ForwardDeclaration(tdt.PyObjectType(),
                                      tdt.TensorType((), 'float32'))
    lit_case = tdb.GetItem('val') >> tdb.Scalar()
    add_case = (tdb.Record({'left': expr_fwd(), 'right': expr_fwd()})
                >> tdb.Function(tf.add))
    expr = tdb.OneOf(lambda x: x['op'], {'lit': lit_case, 'add': add_case})
    expr_fwd.resolve_to(expr)

    self.assertBuilds(10.0, expr, nexpr, max_depth=2)
Пример #12
0
  def __init__(self, num_units_out, activation=tf.nn.relu, initializer=None,
               input_keep_prob=None, output_keep_prob=None,
               normalization_fn=None, weight_norm=False, name=None):
    """Initializes the layer.

    Args:
      num_units_out: The number of output units in the layer.
      activation: The activation function. Default is ReLU. Use `None` to get a
        linear layer.
      initializer: The initializer for the weights. Defaults to uniform unit
        scaling with factor derived in <http://arxiv.org/pdf/1412.6558v3.pdf>
        if activation is ReLU, ReLU6, tanh, or linear. Otherwise defaults to
        truncated normal initialization with a standard deviation of 0.01.
      input_keep_prob: Optional scalar float32 tensor for dropout on input.
        Feed 1.0 at serving to disable dropout.
      output_keep_prob: Optional scalar float32 tensor for dropout on output.
        Feed 1.0 at serving to disable dropout.
      normalization_fn: Optional normalization function that will be inserted
        before nonlinearity.
      weight_norm: A bool to control whether weight normalization is used. See
        https://arxiv.org/abs/1602.07868 for how it works.
      name: An optional string name. Defaults to `FC_%d % num_units_out`. Used
        to name the variable scope where the variables for the layer live.
    """
    self.set_constructor_args('td.FC', *get_local_arguments(FC.__init__, True))

    if not initializer:
      # TODO(SamEisenstat): This constant is calibrated for ReLU, something else
      # might be better for ReLU6.
      if activation in [tf.nn.relu, tf.nn.relu6]:
        initializer = tf.uniform_unit_scaling_initializer(1.43)
      elif activation == tf.tanh:
        initializer = tf.uniform_unit_scaling_initializer(1.15)
      elif not activation:
        initializer = tf.uniform_unit_scaling_initializer(1.0)
      else:
        initializer = tf.truncated_normal_initializer(stddev=0.01)
    self._activation = activation
    self._initializer = initializer
    self._input_keep_prob = input_keep_prob
    self._output_keep_prob = output_keep_prob
    self._normalization_fn = normalization_fn
    self._weight_norm = weight_norm
    if name is None: name = 'FC_%d' % num_units_out
    super(FC, self).__init__(
        output_type=tdt.TensorType([num_units_out]), name_or_scope=name)
Пример #13
0
  def __init__(self, num_buckets, num_units_out, initializer=None, name=None,
               trainable=True, mod_inputs=True, use_cpu=True):
    """Initializes the layer.

    Args:
      num_buckets: How many buckets the embedding has.
      num_units_out: The number of output units in the layer.
      initializer: the initializer for the weights. Defaults to uniform unit
        scaling. The initializer can also be a Tensor or numpy array, in which
        case the weights are initialized to this value and shape. Note that in
        this case the weights will still be trainable unless you also pass
        `trainable=False`.
      name: An optional string name. Defaults to
        `Embedding_%d_%d % (num_buckets, num_units_out)`. Used to name the
        variable scope where the variables for the layer live.
      trainable: Whether or not to make the weights trainable.
      mod_inputs: Whether or not to mod the input by the number of buckets.
      use_cpu: Whether to use cpu (Adagrad seems to work only with cpu for
        sentiment)

    Raises:
      ValueError: If the shape of `weights` is not
        `(num_buckets, num_units_out)`.
    """

    self.set_constructor_args('td.Embedding',
                              *get_local_arguments(Embedding.__init__, True))

    self._weights_shape = (num_buckets, num_units_out)
    if name is None: name = 'Embedding_%d_%d' % self._weights_shape
    if initializer is None:
      initializer = tf.uniform_unit_scaling_initializer(1.0)
    elif isinstance(initializer, np.ndarray):
      initializer = tf.convert_to_tensor(initializer)
    if isinstance(initializer, tf.Tensor):
      initializer.set_shape(self._weights_shape)
      self._weights_shape = None  # otherwise get_variable barfs
    self._initializer = initializer
    self._num_buckets = num_buckets
    self._num_units_out = num_units_out
    self._trainable = trainable
    self._mod_inputs = bool(mod_inputs)
    self._use_cpu = use_cpu
    super(Embedding, self).__init__(
        output_type=tdt.TensorType([num_units_out]), name_or_scope=name)
Пример #14
0
    def __init__(self,
                 num_units_out,
                 activation=tf.nn.relu,
                 initializer=None,
                 input_keep_prob=None,
                 output_keep_prob=None,
                 name=None):
        """Initializes the layer.

    Args:
      num_units_out: The number of output units in the layer.
      activation: The activation function. Default is ReLU. Use `None` to get a
        linear layer.
      initializer: The initializer for the weights. Defaults to uniform unit
        scaling with factor derived in <http://arxiv.org/pdf/1412.6558v3.pdf>
        if activation is ReLU, ReLU6, tanh, or linear. Otherwise defaults to
        truncated normal initialization with a standard deviation of 0.01.
      input_keep_prob: Optional scalar float32 tensor for dropout on input.
        Feed 1.0 at serving to disable dropout.
      output_keep_prob: Optional scalar float32 tensor for dropout on output.
        Feed 1.0 at serving to disable dropout.
      name: An optional string name. Defaults to `FC_%d % num_units_out`. Used
        to name the variable scope where the variables for the layer live.
    """
        if not initializer:
            # TODO(SamEisenstat): This constant is calibrated for ReLU, something else
            # might be better for ReLU6.
            if activation in [tf.nn.relu, tf.nn.relu6]:
                initializer = tf.uniform_unit_scaling_initializer(1.43)
            elif activation == tf.tanh:
                initializer = tf.uniform_unit_scaling_initializer(1.15)
            elif not activation:
                initializer = tf.uniform_unit_scaling_initializer(1.0)
            else:
                initializer = tf.truncated_normal_initializer(stddev=0.01)
        self._activation = activation
        self._initializer = initializer
        self._input_keep_prob = input_keep_prob
        self._output_keep_prob = output_keep_prob
        if name is None: name = 'FC_%d' % num_units_out
        super(FC, self).__init__(output_type=tdt.TensorType([num_units_out]),
                                 name_or_scope=name)
Пример #15
0
  def __call__(self, batch):
    """Calls the function associated with this layer on a batch of inputs.

    Creates the variables for this layer if they don't already exist.

    Args:
      batch: A batch tensor.

    Returns:
      A tensor of this layer's output type.

    Raises:
      ValueError: If the layer was previously called with a batch of a different
        dtype or shape (not considering the leading dimension).
    """
    self.set_input_type(
        tdt.TensorType(batch.get_shape().as_list()[1:], batch.dtype))
    self.create_variables()
    with tf.variable_scope(self._vscope):
      return self._process_batch(batch)
Пример #16
0
 def test_hash(self):
     self.assertHashes(tdt.SequenceType, tdt.TensorType(()))
Пример #17
0
  def test_function_otype_inference_tensor_to_tensor(self):
    infer = tdb._infer_tf_output_type_from_input_type

    self.assertEqual(tdt.TensorType([]),
                     infer(tf.negative, tdt.TensorType([])))
    self.assertEqual(tdt.TensorType([2, 3]),
                     infer(tf.negative, tdt.TensorType([2, 3])))

    self.assertEqual(tdt.TensorType([], 'int32'),
                     infer(tf.negative, tdt.TensorType([], 'int32')))
    self.assertEqual(tdt.TensorType([2, 3], 'int32'),
                     infer(tf.negative, tdt.TensorType([2, 3], 'int32')))

    f = lambda x: tf.cast(x, 'int32')
    self.assertEqual(tdt.TensorType([], 'int32'),
                     infer(f, tdt.TensorType([], 'float32')))
    self.assertEqual(tdt.TensorType([2, 3], 'int32'),
                     infer(f, tdt.TensorType([2, 3], 'float64')))
Пример #18
0
 def test_hash(self):
     self.assertHashes(tdt.TupleType, tdt.TensorType(()))
Пример #19
0
 def test_flatten_unflatten(self):
     t = tdt.TensorType(())
     self.assertEqual(list(t.terminal_types()), [t])
     self.assertEqual(t.flatten(42), [42])
     self.assertEqual(t.unflatten(iter([42]), None), 42)
Пример #20
0
 def test_size(self):
     self.assertEqual(tdt.TensorType(()).size, 1)
     self.assertEqual(tdt.TensorType((1, 2, 3)).size, 6)
     self.assertEqual(tdt.TensorType((1, 0, 3)).size, 0)
Пример #21
0
 def test_conversion(self):
     t = tdt.TensorType((1, 2), 'int32')
     self.assertEqual(repr(t), 'TensorType((1, 2), \'int32\')')
     self.assertEqual(np.ones_like(t).dtype, np.int32)
     np.testing.assert_equal(np.ones_like(t), np.ones((1, 2)))
     self.assertEqual(t._type_shape, loom.TypeShape('int32', (1, 2)))
Пример #22
0
 def infer(result):
   itype = tdt.TensorType([])
   f = lambda _: result
   return tdb._infer_tf_output_type_from_input_type(f, itype)
Пример #23
0
 def test_forward_declaration_orphaned(self):
   fwd = tdb.ForwardDeclaration(tdt.VoidType(), tdt.TensorType([]))
   b = tdb.AllOf(fwd(), fwd()) >> tdb.Sum()
   fwd.resolve_to(tdb.FromTensor(tf.ones([])))
   self.assertBuilds(2., b, None)
Пример #24
0
 def test_default_dtype(self):
     self.assertConverts([], tdt.TensorType(()))
     self.assertConverts([1], tdt.TensorType((1, )))
     self.assertConverts([1, 2], tdt.TensorType((1, 2)))
     self.assertConverts([1, 2, 3], tdt.TensorType((1, 2, 3)))
Пример #25
0
 def test_tensor_shape(self):
     self.assertConverts(tf.TensorShape([]), tdt.TensorType(()))
     self.assertConverts(tf.TensorShape([1]), tdt.TensorType((1, )))
     self.assertConverts(tf.TensorShape([1, 2]), tdt.TensorType((1, 2)))
     self.assertConverts(tf.TensorShape([1, 2, 3]), tdt.TensorType(
         (1, 2, 3)))
Пример #26
0
 def test_optional_default_none_type_inference(self):
   child = tdb.Scalar() >> tdb.Function(tf.negative)
   block = tdb.Optional(child)
   self.assertEqual(child.output_type, None)
   child.set_output_type([])
   self.assertEqual(block.output_type, tdt.TensorType([]))
Пример #27
0
 def test_zeros_void(self):
   block = tdb.Zeros(tdt.TupleType(tdt.VoidType(), tdt.TensorType(())))
   self.assertBuildsConst((None, 0.0), block, None)
Пример #28
0
 def test_terminal_types(self):
     t = tdt.SequenceType(
         tdt.TupleType(tdt.TensorType([]), tdt.VoidType(),
                       tdt.TupleType(tdt.PyObjectType())))
     t_elem = t.element_type
     self.assertEqual(list(t.terminal_types()), [t_elem[0], t_elem[2][0]])
Пример #29
0
 def test_conversion(self):
     t = tdt.SequenceType(tdt.TensorType((1, 2)))
     self.assertEqual(repr(t),
                      'SequenceType(TensorType((1, 2), \'float32\'))')
     self.assertEqual(t.element_type, tdt.TensorType((1, 2)))
Пример #30
0
 def test_terminal_types(self):
     t0 = tdt.TensorType([])
     t1 = tdt.TensorType([1, 2])
     t = tdt.TupleType(tdt.TupleType(t0),
                       tdt.TupleType(tdt.TupleType(t1, t1), t0))
     self.assertEqual(list(t.terminal_types()), [t0, t1, t1, t0])