コード例 #1
0
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
コード例 #2
0
ファイル: math_ops_test.py プロジェクト: xjump/tensorflow-cl
 def testInt(self):
   np.random.seed(54321)
   x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllEqual(x[0] * 6, math_ops.accumulate_n([tf_x[0]] * 6).eval())
コード例 #3
0
 def testInt(self):
   np.random.seed(54321)
   x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllEqual(x[0] * 6, math_ops.accumulate_n([tf_x[0]] * 6).eval())
コード例 #4
0
 def testWrongShape(self):
     with self.cached_session():
         with self.assertRaises(ValueError):
             a = variables.Variable(0.2)
             b = variables.Variable(0.1)
             math_ops.accumulate_n([a, b], shape=[2,
                                                  2])  # Should be shape=[]
コード例 #5
0
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x))
   self.assertAllClose(x[0] * 5,
                       math_ops.accumulate_n([tf_x[0]] * 5))
コード例 #6
0
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
コード例 #7
0
ファイル: math_ops_test.py プロジェクト: govindap/tensorflow
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   for u in tf_x:
     print("shape=%s" % u.get_shape())
   with self.test_session():
     self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
コード例 #8
0
ファイル: math_ops_test.py プロジェクト: igorbb/tensorflow-1
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   for u in tf_x:
     print("shape=%s" % u.get_shape())
   with self.test_session():
     self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
コード例 #9
0
 def testSimple(self):
   with self.cached_session():
     random_arrays = [
         np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
     ]
     random_tensors = [
         ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
         for x in random_arrays
     ]
     tf_val = math_ops.accumulate_n(random_tensors)
     np_val = random_arrays[0]
     for random_array in random_arrays[1:]:
       np_val += random_array
     self.assertAllClose(np_val, self.evaluate(tf_val))
コード例 #10
0
 def testGrad(self):
     np.random.seed(42)
     for num_inputs in range(1, 10):
         with self.cached_session(use_gpu=True) as sess:
             input_vars = [
                 variables.Variable(10.0 * np.random.random())
                 for _ in range(0, num_inputs)
             ]
             accum_n = math_ops.accumulate_n(input_vars)
             sess.run(variables.global_variables_initializer())
             accum_n_grad = gradients.gradients(accum_n, input_vars)
             self.assertAllEqual(
                 np.repeat(1.0, num_inputs),  # d/dx (x + y + ...) = 1
                 [g.eval() for g in accum_n_grad])
コード例 #11
0
 def testGrad(self):
   np.random.seed(42)
   for num_inputs in range(1, 10):
     with self.cached_session(use_gpu=True) as sess:
       input_vars = [
           variables.Variable(10.0 * np.random.random())
           for _ in range(0, num_inputs)
       ]
       accum_n = math_ops.accumulate_n(input_vars)
       sess.run(variables.global_variables_initializer())
       accum_n_grad = gradients.gradients(accum_n, input_vars)
       self.assertAllEqual(
           np.repeat(1.0, num_inputs),  # d/dx (x + y + ...) = 1
           [g.eval() for g in accum_n_grad])
コード例 #12
0
 def testUnknownShape(self):
     with self.session(use_gpu=True):
         x0 = array_ops.placeholder(dtype=dtypes_lib.int32, shape=[None])
         acc = math_ops.accumulate_n([x0, x0], shape=[None])
         self.assertAllEqual([2, 4], acc.eval(feed_dict={x0: [1, 2]}))
コード例 #13
0
 def testWrongTypeOneInput(self):
     # Scenario that used to trigger a bug, even when testWrongType() worked
     with self.cached_session():
         with self.assertRaises(TypeError):
             a = variables.Variable(0.2, dtype=np.float32)
             math_ops.accumulate_n([a], tensor_dtype=np.int32)
コード例 #14
0
 def testWrongType(self):
     with self.cached_session():
         with self.assertRaises(TypeError):
             a = variables.Variable(0.2, dtype=np.float32)
             b = variables.Variable(0.1, dtype=np.float32)
             math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
コード例 #15
0
 def testIncompatibleShapes(self):
     with self.cached_session():
         with self.assertRaises(ValueError):
             a = variables.Variable(np.array([0.1, 0.2]))
             b = variables.Variable(np.array([[0.3], [0.4]]))
             math_ops.accumulate_n([a, b])
コード例 #16
0
 def testZeroArgs(self):
   with self.test_session():
     with self.assertRaises(ValueError):
       tf_val = math_ops.accumulate_n([])
       tf_val.eval()
コード例 #17
0
 def fn(first, second, third):
     return math_ops.accumulate_n([first, second, third])
コード例 #18
0
ファイル: cwise_ops_test.py プロジェクト: Wajih-O/tensorflow
 def testWrongType(self):
   with self.cached_session():
     with self.assertRaises(TypeError):
       a = variables.Variable(0.2, dtype=np.float32)
       b = variables.Variable(0.1, dtype=np.float32)
       math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
コード例 #19
0
ファイル: cwise_ops_test.py プロジェクト: Wajih-O/tensorflow
 def testWrongShape(self):
   with self.cached_session():
     with self.assertRaises(ValueError):
       a = variables.Variable(0.2)
       b = variables.Variable(0.1)
       math_ops.accumulate_n([a, b], shape=[2, 2])  # Should be shape=[]
コード例 #20
0
 def testMinimalEagerMode(self):
     forty = constant_op.constant(40)
     two = constant_op.constant(2)
     answer = math_ops.accumulate_n([forty, two])
     self.assertEqual(42, answer.numpy())
コード例 #21
0
 def body_fn(i, acc, tensors):
     return i + 1, acc + math_ops.accumulate_n(tensors), tensors
コード例 #22
0
 def fn(first, second, third):
   return math_ops.accumulate_n([first, second, third])
コード例 #23
0
 def testIncompatibleShapes(self):
   with self.cached_session():
     with self.assertRaises(ValueError):
       a = variables.Variable(np.array([0.1, 0.2]))
       b = variables.Variable(np.array([[0.3], [0.4]]))
       math_ops.accumulate_n([a, b])
コード例 #24
0
 def testMinimalEagerMode(self):
   forty = constant_op.constant(40)
   two = constant_op.constant(2)
   answer = math_ops.accumulate_n([forty, two])
   self.assertEqual(42, answer.numpy())
コード例 #25
0
ファイル: cwise_ops_test.py プロジェクト: Wajih-O/tensorflow
 def testWrongTypeOneInput(self):
   # Scenario that used to trigger a bug, even when testWrongType() worked
   with self.cached_session():
     with self.assertRaises(TypeError):
       a = variables.Variable(0.2, dtype=np.float32)
       math_ops.accumulate_n([a], tensor_dtype=np.int32)
コード例 #26
0
 def testZeroArgs(self):
     with self.cached_session():
         with self.assertRaises(ValueError):
             tf_val = math_ops.accumulate_n([])
             tf_val.eval()
コード例 #27
0
 def body_fn(i, acc, tensors):
   return i + 1, acc + math_ops.accumulate_n(tensors), tensors
コード例 #28
0
ファイル: cwise_ops_test.py プロジェクト: Wajih-O/tensorflow
 def testZeroArgs(self):
   with self.cached_session():
     with self.assertRaises(ValueError):
       tf_val = math_ops.accumulate_n([])
       self.evaluate(tf_val)
コード例 #29
0
 def testUnknownShape(self):
   with self.session(use_gpu=True):
     x0 = array_ops.placeholder(dtype=dtypes_lib.int32, shape=[None])
     acc = math_ops.accumulate_n([x0, x0], shape=[None])
     self.assertAllEqual([2, 4], acc.eval(feed_dict={x0: [1, 2]}))
コード例 #30
0
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
  """Get the aggregated gradients for op.

  Args:
    grads: The map of memoized gradients.
    op: The op to get gradients for.
    loop_state: An object for maintaining the state of the while loops in the
                graph. It is of type ControlFlowState. None if the graph
                contains no while loops.
    aggregation_method: Specifies the method used to combine gradient terms.
      Accepted values are constants defined in the class `AggregationMethod`.

  Returns:
    A list of gradients, one per each output of `op`. If the gradients
      for a particular output is a list, this function aggregates it
      before returning.

  Raises:
    TypeError: if the incoming grads are not Tensors or IndexedSlices.
    ValueError: if the arguments are invalid.

  """
  if aggregation_method is None:
    aggregation_method = AggregationMethod.DEFAULT
  if aggregation_method not in [
      AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
      AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
  ]:
    raise ValueError("Invalid aggregation_method specified %s." %
                     aggregation_method)
  out_grads = _GetGrads(grads, op)
  for i, out_grad in enumerate(out_grads):
    if loop_state:
      if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
        assert control_flow_ops.IsLoopSwitch(op)
        continue
    # Grads have to be Tensors or IndexedSlices
    if (isinstance(out_grad, collections.Sequence) and not all([
        isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
        if g is not None
    ])):
      raise TypeError("gradients have to be either all Tensors "
                      "or all IndexedSlices")
    # Aggregate multiple gradients, and convert [] to None.
    if out_grad:
      if len(out_grad) < 2:
        used = "nop"
        out_grads[i] = out_grad[0]
      elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
        tensor_shape = _AccumulatorShape(out_grad)
        if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
            and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
          # The benefit of using AccumulateN is that its inputs can be combined
          # in any order and this can allow the expression to be evaluated with
          # a smaller memory footprint.  When used with gpu_allocator_retry,
          # it is possible to compute a sum of terms which are much larger than
          # total GPU memory.
          # AccumulateN can currently only be used if we know the shape for
          # an accumulator variable.  If this is not known, or if we only have
          # 2 grads then we fall through to the "tree" case below.
          used = "accumulate_n"
          out_grads[i] = math_ops.accumulate_n(out_grad)
        elif aggregation_method in [
            AggregationMethod.EXPERIMENTAL_TREE,
            AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
        ]:
          # Aggregate all gradients by doing pairwise sums: this may
          # reduce performance, but it can improve memory because the
          # gradients can be released earlier.
          #
          # TODO(vrv): Consider replacing this with a version of
          # tf.AddN() that eagerly frees its inputs as soon as they are
          # ready, so the order of this tree does not become a problem.
          used = "tree"
          with ops.name_scope(op.name + "_gradient_sum"):
            running_sum = out_grad[0]
            for grad in out_grad[1:]:
              running_sum = math_ops.add_n([running_sum, grad])
            out_grads[i] = running_sum
        else:
          used = "add_n"
          out_grads[i] = _MultiDeviceAddN(out_grad)
        logging.vlog(2, "  _AggregatedGrads %d x %s using %s",
                     len(out_grad), tensor_shape, used)
      else:
        out_grad = math_ops._as_indexed_slices_list(
            [g for g in out_grad if g is not None])
        out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
        # Form IndexedSlices out of the concatenated values and
        # indices.
        out_grads[i] = ops.IndexedSlices(
            array_ops.concat_v2([x.values for x in out_grad], 0),
            array_ops.concat_v2([x.indices for x in out_grad], 0),
            out_grad[0].dense_shape)
    else:
      out_grads[i] = []
  return out_grads
コード例 #31
0
 def testFloat(self):
     np.random.seed(12345)
     x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
     tf_x = ops.convert_n_to_tensor(x)
     self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x))
     self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5))