def _apply_sparse(self, grad, var): accum = self.get_slot(var, "accum") accum_update = self.get_slot(var, "accum_update") return training_ops.sparse_apply_adadelta( var, accum, accum_update, self._lr_t, self._rho_t, self._epsilon_t, grad.values, grad.indices, use_locking=self._use_locking)
def _apply_sparse(self, grad, var): accum = self.get_slot(var, "accum") accum_update = self.get_slot(var, "accum_update") return training_ops.sparse_apply_adadelta( var, accum, accum_update, math_ops.cast(self._lr_t, var.dtype.base_dtype), math_ops.cast(self._rho_t, var.dtype.base_dtype), math_ops.cast(self._epsilon_t, var.dtype.base_dtype), grad.values, grad.indices, use_locking=self._use_locking)
def _apply_sparse(self, grad, var, state): accum = state.get_slot(var, "accum") accum_update = state.get_slot(var, "accum_update") return training_ops.sparse_apply_adadelta( var, accum, accum_update, state.get_hyper("learning_rate", var.dtype.base_dtype), state.get_hyper("rho", var.dtype.base_dtype), state.get_hyper("epsilon", var.dtype.base_dtype), grad.values, grad.indices, use_locking=self._use_locking)
def _apply_sparse(self, grad, var, state): accum = state.get_slot(var, "accum") accum_update = state.get_slot(var, "accum_update") return training_ops.sparse_apply_adadelta( var, accum, accum_update, state.get_hyper("learning_rate", var.dtype.base_dtype), state.get_hyper("rho", var.dtype.base_dtype), state.get_hyper("epsilon", var.dtype.base_dtype), grad.values, grad.indices, use_locking=self._use_locking)