Ejemplo n.º 1
0
    def testFoldl_Simple(self):
        elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")

        r = functional_ops.foldl(
            lambda a, x: math_ops.multiply(math_ops.add(a, x), 2), elems)
        self.assertAllEqual(208, self.evaluate(r))

        r = functional_ops.foldl(
            lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
            elems,
            initializer=10)
        self.assertAllEqual(880, self.evaluate(r))
Ejemplo n.º 2
0
  def testFoldl_Simple(self):
    elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")

    r = functional_ops.foldl(
        lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
        elems)
    self.assertAllEqual(208, self.evaluate(r))

    r = functional_ops.foldl(
        lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
        elems,
        initializer=10)
    self.assertAllEqual(880, self.evaluate(r))
  def testFoldl_Simple(self):
    with self.test_session():
      elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")

      r = functional_ops.foldl(lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
                               elems)
      self.assertAllEqual(208, r.eval())

      r = functional_ops.foldl(
          lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
          elems,
          initializer=10)
      self.assertAllEqual(880, r.eval())
Ejemplo n.º 4
0
    def testFoldl_Simple(self):
        with self.test_session():
            elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")

            r = functional_ops.foldl(
                lambda a, x: math_ops.mul(math_ops.add(a, x), 2), elems)
            self.assertAllEqual(208, r.eval())

            r = functional_ops.foldl(
                lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
                elems,
                initializer=10)
            self.assertAllEqual(880, r.eval())
Ejemplo n.º 5
0
 def testFoldl_MultiInputSingleOutput(self):
   with self.test_session():
     elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
     initializer = np.array(1.0)
     r = functional_ops.foldl(lambda a, x: a + x[0] + x[1], (elems, -elems),
                              initializer)
     self.assertAllEqual(1, self.evaluate(r))
Ejemplo n.º 6
0
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
  """Boolean mask for `SparseTensor`s.

  Args:
    sparse_tensor: a `SparseTensor`.
    mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
      of `sparse_tensor`.
    name: optional name for this operation.
  Returns:
    A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
    `True`.
  """
  # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
  with ops.op_scope([sparse_tensor, mask], name):
    mask = ops.convert_to_tensor(mask)
    mask_rows = array_ops.where(mask)
    first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
                                                      [0, 0], [-1, 1]))

    # Identify indices corresponding to the rows identified by mask_rows.
    sparse_entry_matches = functional_ops.map_fn(
        lambda x: math_ops.equal(first_indices, x),
        mask_rows,
        dtype=dtypes.bool)
    # Combine the rows of index_matches to form a mask for the sparse indices
    # and values.
    to_retain = array_ops.reshape(
        functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])

    return sparse_ops.sparse_retain(sparse_tensor, to_retain)
Ejemplo n.º 7
0
def foldl(fn, labeled_tensor, initial_value, name=None):
  """Left fold on the list of tensors unpacked from labeled_tensor.

  See tf.foldl.

  Args:
    fn: The function to apply to each unpacked LabeledTensor.
      It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
      Its arguments are (accumulated_value, next_value).
    labeled_tensor: The input tensor.
    initial_value: The initial value of the accumulator.
    name: Optional op name.

  Returns:
    The accumulated value.
  """
  with ops.name_scope(name, 'lt_foldl',
                      [labeled_tensor, initial_value]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
    initial_value = core.convert_to_labeled_tensor(initial_value)

    @tc.returns(ops.Tensor)
    @tc.accepts(ops.Tensor, ops.Tensor)
    def tf_fn(accumulator, next_element):
      accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
      next_element_lt = core.LabeledTensor(
          next_element, list(labeled_tensor.axes.values())[1:])
      return fn(accumulator_lt, next_element_lt).tensor

    foldl_op = functional_ops.foldl(
        tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
    foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)

    return core.identity(foldl_lt, name=scope)
Ejemplo n.º 8
0
 def testFoldl_MultiInputSingleOutput(self):
     with self.test_session():
         elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
         initializer = np.array(1.0)
         r = functional_ops.foldl(lambda a, x: a + x[0] + x[1],
                                  (elems, -elems), initializer)
         self.assertAllEqual(1, self.evaluate(r))
Ejemplo n.º 9
0
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
    """Boolean mask for `SparseTensor`s.

  Args:
    sparse_tensor: a `SparseTensor`.
    mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
      of `sparse_tensor`.
    name: optional name for this operation.
  Returns:
    A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
    `True`.
  """
    # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
    with ops.op_scope([sparse_tensor, mask], name):
        mask = ops.convert_to_tensor(mask)
        mask_rows = array_ops.where(mask)
        first_indices = array_ops.squeeze(
            array_ops.slice(sparse_tensor.indices, [0, 0], [-1, 1]))

        # Identify indices corresponding to the rows identified by mask_rows.
        sparse_entry_matches = functional_ops.map_fn(
            lambda x: math_ops.equal(first_indices, x),
            mask_rows,
            dtype=dtypes.bool)
        # Combine the rows of index_matches to form a mask for the sparse indices
        # and values.
        to_retain = array_ops.reshape(
            functional_ops.foldl(math_ops.logical_or, sparse_entry_matches),
            [-1])

        return sparse_ops.sparse_retain(sparse_tensor, to_retain)
Ejemplo n.º 10
0
 def testFoldl_MultiInputDifferentDimsSingleOutput(self):
   elems = np.array([[1.0, 1.0, 1.0], [2.0, 3.0, 4.0]])
   other_elems = np.array([-1.0, 1.0])
   initializer = np.array([0.0, 0.0, 0.0])
   r = functional_ops.foldl(lambda a, x: a + x[0] * x[1],
                            (elems, other_elems), initializer)
   self.assertAllEqual([1.0, 2.0, 3.0], self.evaluate(r))
Ejemplo n.º 11
0
def foldl(fn, labeled_tensor, initial_value, name=None):
  """Left fold on the list of tensors unpacked from labeled_tensor.

  See tf.foldl.

  Args:
    fn: The function to apply to each unpacked LabeledTensor.
      It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
      Its arguments are (accumulated_value, next_value).
    labeled_tensor: The input tensor.
    initial_value: The initial value of the accumulator.
    name: Optional op name.

  Returns:
    The accumulated value.
  """
  with ops.name_scope(name, 'lt_foldl',
                      [labeled_tensor, initial_value]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
    initial_value = core.convert_to_labeled_tensor(initial_value)

    @tc.returns(ops.Tensor)
    @tc.accepts(ops.Tensor, ops.Tensor)
    def tf_fn(accumulator, next_element):
      accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
      next_element_lt = core.LabeledTensor(
          next_element, list(labeled_tensor.axes.values())[1:])
      return fn(accumulator_lt, next_element_lt).tensor

    foldl_op = functional_ops.foldl(
        tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
    foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)

    return core.identity(foldl_lt, name=scope)
Ejemplo n.º 12
0
 def testFoldl_MultiInputDifferentDimsSingleOutput(self):
   elems = np.array([[1.0, 1.0, 1.0], [2.0, 3.0, 4.0]])
   other_elems = np.array([-1.0, 1.0])
   initializer = np.array([0.0, 0.0, 0.0])
   r = functional_ops.foldl(lambda a, x: a + x[0] * x[1],
                            (elems, other_elems), initializer)
   self.assertAllEqual([1.0, 2.0, 3.0], self.evaluate(r))
Ejemplo n.º 13
0
  def testFoldl_SingleInputMultiOutput(self):
    elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
    initializer = np.array([1, -1.0])
    r = functional_ops.foldl(lambda a, x: a + x, elems, initializer)
    r_value = self.evaluate(r)

    self.assertAllEqual(22, r_value[0])
    self.assertAllEqual(20, r_value[1])
Ejemplo n.º 14
0
  def testFoldl_SingleInputMultiOutput(self):
    elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
    initializer = np.array([1, -1.0])
    r = functional_ops.foldl(lambda a, x: a + x, elems, initializer)
    r_value = self.evaluate(r)

    self.assertAllEqual(22, r_value[0])
    self.assertAllEqual(20, r_value[1])
Ejemplo n.º 15
0
  def testFoldShape(self):
    x = constant_op.constant([[1, 2, 3], [4, 5, 6]])

    def fn(_, current_input):
      return current_input

    initializer = constant_op.constant([0, 0, 0])
    y = functional_ops.foldl(fn, x, initializer=initializer)
    self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
Ejemplo n.º 16
0
  def testFoldl_Scoped(self):
    with self.cached_session() as sess:
      with variable_scope.variable_scope("root") as varscope:
        elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")

        r = functional_ops.foldl(simple_scoped_fn, elems)
        # Check that we have the one variable we asked for here.
        self.assertEqual(len(variables.trainable_variables()), 1)
        self.assertEqual(variables.trainable_variables()[0].name,
                         "root/body/two:0")
        sess.run([variables.global_variables_initializer()])
        self.assertAllEqual(208, self.evaluate(r))

        # Now let's reuse our single variable.
        varscope.reuse_variables()
        r = functional_ops.foldl(simple_scoped_fn, elems, initializer=10)
        self.assertEqual(len(variables.trainable_variables()), 1)
        self.assertAllEqual(880, self.evaluate(r))
Ejemplo n.º 17
0
  def testFoldShape(self):
    x = constant_op.constant([[1, 2, 3], [4, 5, 6]])

    def fn(_, current_input):
      return current_input

    initializer = constant_op.constant([0, 0, 0])
    y = functional_ops.foldl(fn, x, initializer=initializer)
    self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
Ejemplo n.º 18
0
  def testFoldl_Scoped(self):
    with self.cached_session() as sess:
      with variable_scope.variable_scope("root") as varscope:
        elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")

        r = functional_ops.foldl(simple_scoped_fn, elems)
        # Check that we have the one variable we asked for here.
        self.assertEqual(len(variables.trainable_variables()), 1)
        self.assertEqual(variables.trainable_variables()[0].name,
                         "root/body/two:0")
        sess.run([variables.global_variables_initializer()])
        self.assertAllEqual(208, self.evaluate(r))

        # Now let's reuse our single variable.
        varscope.reuse_variables()
        r = functional_ops.foldl(simple_scoped_fn, elems, initializer=10)
        self.assertEqual(len(variables.trainable_variables()), 1)
        self.assertAllEqual(880, self.evaluate(r))
Ejemplo n.º 19
0
  def testFold_Grad(self):
    with self.cached_session():
      elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
      v = constant_op.constant(2.0, name="v")
      r = functional_ops.foldl(
          lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
      r = gradients_impl.gradients(r, v)[0]
      self.assertAllEqual(720.0, self.evaluate(r))

      r = functional_ops.foldr(
          lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
      r = gradients_impl.gradients(r, v)[0]
      self.assertAllEqual(720.0, self.evaluate(r))
Ejemplo n.º 20
0
  def testFold_Grad(self):
    with self.cached_session():
      elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
      v = constant_op.constant(2.0, name="v")
      r = functional_ops.foldl(
          lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
      r = gradients_impl.gradients(r, v)[0]
      self.assertAllEqual(720.0, self.evaluate(r))

      r = functional_ops.foldr(
          lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
      r = gradients_impl.gradients(r, v)[0]
      self.assertAllEqual(720.0, self.evaluate(r))
Ejemplo n.º 21
0
    def forward(self, y0, save_intermediate=False):
        time_grid = ops.convert_to_tensor(self.ts,
                                          preferred_dtype=float_type,
                                          name='t')
        y0 = ops.convert_to_tensor(y0, name='y0')
        time_delta_grid = time_grid[1:] - time_grid[:-1]
        time_grid = time_grid[1:]
        time_combined = tf.concat(
            [time_grid[:, None], time_delta_grid[:, None]], axis=1)
        scan_func = self._make_scan_func(self.f)

        if save_intermediate:
            y_grid = functional_ops.scan(scan_func, time_combined, y0)
            y_s = array_ops.concat([[y0], y_grid], axis=0)
            y_t = y_s[-1, :, :, :]
            return y_t, y_s
        else:
            y_t = functional_ops.foldl(scan_func, time_combined, y0)
            return y_t, None
Ejemplo n.º 22
0
 def r_inner(a, x):
   return functional_ops.foldl(
       lambda b, y: b * y * x, inner_elems, initializer=a)
Ejemplo n.º 23
0
 def r_inner(a, x):
   return functional_ops.foldl(
       lambda b, y: b * y * x, inner_elems, initializer=a)