def _resource_apply_sparse(self, grad, var, indices): # This method is only needed for momentum optimization. var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) momentum_var = self.get_slot(var, "momentum") return training_ops.resource_sparse_apply_keras_momentum( var.handle, momentum_var.handle, lr_t, grad, indices, self._get_hyper("momentum", var_dtype), use_locking=self._use_locking, use_nesterov=self.nesterov)
def _resource_apply_sparse(self, grad, var, indices): # This method is only needed for momentum optimization. var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) momentum_var = self.get_slot(var, "momentum") return training_ops.resource_sparse_apply_keras_momentum( var.handle, momentum_var.handle, lr_t, grad, indices, self._get_hyper("momentum", var_dtype), use_locking=self._use_locking, use_nesterov=self.nesterov)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) mom = self.get_slot(var, "momentum") return training_ops.resource_sparse_apply_keras_momentum( var.handle, mom.handle, coefficients["learning_rate"], grad, indices, self.momentum, use_locking=False, use_nesterov=self.use_nesterov)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None): # This method is only needed for momentum optimization. var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) momentum_var = self.get_slot(var, "momentum") return training_ops.resource_sparse_apply_keras_momentum( var.handle, momentum_var.handle, coefficients["lr_t"], grad, indices, coefficients["momentum"], use_locking=self._use_locking, use_nesterov=self.nesterov)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = (apply_state or {}).get( (var_device, var_dtype)) or self._fallback_apply_state( var_device, var_dtype) weight_decay = self._get_hyper("weight_decay") grad_averaging = self._get_hyper("grad_averaging") v = self.get_slot(var, "v") g_2 = tf.reduce_sum(tf.square(tf.cast(grad, tf.float32))) # v is just a scalar and does not need to involve sparse tensors. v_t = tf.cond( tf.equal(self.iterations, 0), lambda: g_2, lambda: v * coefficients["beta_2_t"] + g_2 * coefficients[ "one_minus_beta_2_t"], ) v_t = v.assign(v_t, use_locking=self._use_locking) if self.amsgrad: vhat = self.get_slot(var, "vhat") vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking) grad = grad / (tf.sqrt(vhat_t) + self.epsilon) else: grad = grad / (tf.sqrt(v_t) + self.epsilon) grad = tf.cond( tf.greater(weight_decay, 0), lambda: grad + weight_decay * tf.gather(var, indices), lambda: grad, ) grad = tf.cond( tf.logical_and(grad_averaging, tf.not_equal(self.iterations, 0)), lambda: grad * coefficients["one_minus_beta_1_t"], lambda: grad, ) m = self.get_slot(var, "m") return training_ops.resource_sparse_apply_keras_momentum( var.handle, m.handle, coefficients["lr_t"], grad, indices, coefficients["beta_1_t"], use_locking=self._use_locking, use_nesterov=False, )