Example #1
0
 def _apply_sparse(self, grad, var):
     acc = self.get_slot(var, "accumulator")
     return training_ops.sparse_apply_adagrad(var,
                                              acc,
                                              self._learning_rate_tensor,
                                              grad.values,
                                              grad.indices,
                                              use_locking=self._use_locking)
Example #2
0
 def _apply_sparse(self, grad, var, state):
   acc = state.get_slot(var, "accumulator")
   return training_ops.sparse_apply_adagrad(
       var,
       acc,
       state.get_hyper("learning_rate", var.dtype.base_dtype),
       grad.values,
       grad.indices,
       use_locking=self._use_locking)
Example #3
0
 def _apply_sparse(self, grad, var, state):
   acc = state.get_slot(var, "accumulator")
   return training_ops.sparse_apply_adagrad(
       var,
       acc,
       state.get_hyper("learning_rate", var.dtype.base_dtype),
       grad.values,
       grad.indices,
       use_locking=self._use_locking)
Example #4
0
 def _apply_sparse(self, grad, var):
   acc = self.get_slot(var, "accumulator")
   return training_ops.sparse_apply_adagrad(
       var,
       acc,
       math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
       grad.values,
       grad.indices,
       use_locking=self._use_locking)
 def _apply_sparse(self, grad, var, update_slots=True):
     acc = self.get_slot(var, "accumulator")
     return training_ops.sparse_apply_adagrad(
         var,
         acc,
         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
         grad.values,
         grad.indices,
         use_locking=self._use_locking,
         update_slots=self._should_update_slots)
Example #6
0
  def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
    self.setUp()
    with self.test_session(use_gpu=False):
      var = variables.Variable(x)
      accum = variables.Variable(y)
      variables.initialize_all_variables().run()

      self.assertAllEqual(x, var.eval())
      sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
          var, accum, lr, grad,
          constant_op.constant(indices, self._toType(indices.dtype)))
      out = sparse_apply_adagrad.eval()
      self.assertShapeEqual(out, sparse_apply_adagrad)

      for (i, index) in enumerate(indices):
        self.assertAllClose(
            x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i]) ** (-0.5),
            var.eval()[index])
        self.assertAllEqual(y[index] + grad[i] * grad[i], accum.eval()[index])
  def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
    self.setUp()
    with self.test_session(use_gpu=False):
      var = variables.Variable(x)
      accum = variables.Variable(y)
      variables.initialize_all_variables().run()

      self.assertAllEqual(x, var.eval())
      sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
          var, accum, lr, grad,
          constant_op.constant(indices, self._toType(indices.dtype)))
      out = sparse_apply_adagrad.eval()
      self.assertShapeEqual(out, sparse_apply_adagrad)

      for (i, index) in enumerate(indices):
        self.assertAllClose(
            x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i]) ** (-0.5),
            var.eval()[index])
        self.assertAllEqual(y[index] + grad[i] * grad[i], accum.eval()[index])
Example #8
0
  def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
    self.setUp()
    with self.session(use_gpu=False):
      var = variables.VariableV1(x)
      accum = variables.VariableV1(y)
      self.evaluate(variables.global_variables_initializer())

      self.assertAllCloseAccordingToType(x, self.evaluate(var))
      sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
          var, accum, lr, grad,
          constant_op.constant(indices, self._toType(indices.dtype)))
      out = self.evaluate(sparse_apply_adagrad)
      self.assertShapeEqual(out, sparse_apply_adagrad)

      for (i, index) in enumerate(indices):
        self.assertAllCloseAccordingToType(
            x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i])**(-0.5),
            self.evaluate(var)[index])
        self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
                                           self.evaluate(accum)[index])
Example #9
0
  def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
    self.setUp()
    with self.session(use_gpu=False):
      var = variables.VariableV1(x)
      accum = variables.VariableV1(y)
      self.evaluate(variables.global_variables_initializer())

      self.assertAllCloseAccordingToType(x, self.evaluate(var))
      sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
          var, accum, lr, grad,
          constant_op.constant(indices, self._toType(indices.dtype)))
      out = self.evaluate(sparse_apply_adagrad)
      self.assertShapeEqual(out, sparse_apply_adagrad)

      for (i, index) in enumerate(indices):
        self.assertAllCloseAccordingToType(
            x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i])**(-0.5),
            self.evaluate(var)[index])
        self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
                                           self.evaluate(accum)[index])