Example #1
0
 def _apply_dense(self, grad, var, state):
   acc = state.get_slot(var, "accumulator")
   return training_ops.apply_adagrad(
       var,
       acc,
       state.get_hyper("learning_rate", var.dtype.base_dtype),
       grad,
       use_locking=self._use_locking)
Example #2
0
 def _apply_dense(self, grad, var):
   acc = self.get_slot(var, "accumulator")
   return training_ops.apply_adagrad(
       var,
       acc,
       math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
       grad,
       use_locking=self._use_locking)
Example #3
0
 def _apply_dense(self, grad, var):
     acc = self.get_slot(var, "accumulator")
     lr = self._learning_rate_tensor
     eps = 10e-8
     sqdiv = tf.pow(lr, 2) - tf.pow(var, 2)
     #if sqdiv < 0  the next var absolutely become positive
     penalty = tf.to_float(tf.less(sqdiv, 0)) * 10e+12
     g_max = (var * tf.sqrt(sqdiv)) / (sqdiv + eps) + penalty
     grad_clipped = tf.minimum(grad, g_max)
     return training_ops.apply_adagrad(
         var, acc, self._learning_rate_tensor, grad_clipped,
         use_locking=self._use_locking)
    def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
        self.setUp()
        with self.test_session(use_gpu=use_gpu):
            var = variables.Variable(x)
            accum = variables.Variable(y)
            variables.initialize_all_variables().run()

            self.assertAllEqual(x, var.eval())
            apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
            out = apply_adagrad.eval()
            self.assertShapeEqual(out, apply_adagrad)
            self.assertAllClose(x - lr * grad * (y + grad * grad)**(-0.5), out)
            self.assertAllEqual(y + grad * grad, accum.eval())
  def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
    self.setUp()
    with self.test_session(use_gpu=use_gpu):
      var = variables.Variable(x)
      accum = variables.Variable(y)
      variables.global_variables_initializer().run()

      self.assertAllCloseAccordingToType(x, var.eval())
      apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
      out = apply_adagrad.eval()
      self.assertShapeEqual(out, apply_adagrad)
      self.assertAllCloseAccordingToType(x - lr * grad * (y + grad * grad)**
                                         (-0.5), out)
      self.assertAllCloseAccordingToType(y + grad * grad, accum.eval())
Example #6
0
  def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
    self.setUp()
    with self.session(use_gpu=use_gpu):
      var = variables.VariableV1(x)
      accum = variables.VariableV1(y)
      self.evaluate(variables.global_variables_initializer())

      self.assertAllCloseAccordingToType(x, self.evaluate(var))
      apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
      out = self.evaluate(apply_adagrad)
      self.assertShapeEqual(out, apply_adagrad)
      self.assertAllCloseAccordingToType(x - lr * grad * (y + grad * grad)**
                                         (-0.5), out)
      self.assertAllCloseAccordingToType(y + grad * grad, self.evaluate(accum))
Example #7
0
 def _apply_dense(self, grad, var):
   acc = self.get_slot(var, "accumulator")
   if self.quantizer is None:
       return training_ops.apply_adagrad(
           var,
           acc,
           math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
           grad,
           use_locking=self._use_locking)
   else:
       return self.apply_qadagrad(
           var,
           acc,
           math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
           grad)
Example #8
0
 def _apply_dense(self, grad, var):
   acc = self.get_slot(var, "accumulator")
   return training_ops.apply_adagrad(
       var, acc, self._learning_rate_tensor, grad,
       use_locking=self._use_locking)
Example #9
0
 def _apply_dense(self, grad, var):
   acc = self.get_slot(var, "accumulator")
   return training_ops.apply_adagrad(
       var, acc, self._learning_rate_tensor, grad,
       use_locking=self._use_locking)