def _resource_apply_sparse(self, grad, var, indices, state): accum = state.get_slot(var, "accum") accum_update = state.get_slot(var, "accum_update") return training_ops.resource_sparse_apply_adadelta( var.handle, accum.handle, accum_update.handle, state.get_hyper("learning_rate", var.dtype.base_dtype), state.get_hyper("rho", var.dtype.base_dtype), state.get_hyper("epsilon", var.dtype.base_dtype), grad, indices, use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices): accum = self.get_slot(var, "accum") accum_update = self.get_slot(var, "accum_update") return training_ops.resource_sparse_apply_adadelta( var.handle, accum.handle, accum_update.handle, math_ops.cast(self._lr_t, grad.dtype), math_ops.cast(self._rho_t, grad.dtype), math_ops.cast(self._epsilon_t, grad.dtype), grad, indices, use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices): accum_grad = self.get_slot(var, 'accum_grad') accum_var = self.get_slot(var, 'accum_var') return training_ops.resource_sparse_apply_adadelta( var.handle, accum_grad.handle, accum_var.handle, math_ops.cast(self._get_hyper('learning_rate'), grad.dtype.base_dtype), math_ops.cast(self._get_hyper('rho'), grad.dtype.base_dtype), math_ops.cast(self._get_hyper('epsilon'), grad.dtype.base_dtype), grad, indices, use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, state): accum = state.get_slot(var, "accum") accum_update = state.get_slot(var, "accum_update") return training_ops.resource_sparse_apply_adadelta( var.handle, accum.handle, accum_update.handle, state.get_hyper("learning_rate", var.dtype.base_dtype), state.get_hyper("rho", var.dtype.base_dtype), state.get_hyper("epsilon", var.dtype.base_dtype), grad, indices, use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices): accum = self.get_slot(var, "accum") accum_update = self.get_slot(var, "accum_update") return training_ops.resource_sparse_apply_adadelta( var.handle, accum.handle, accum_update.handle, math_ops.cast(self._lr_t, grad.dtype), math_ops.cast(self._rho_t, grad.dtype), math_ops.cast(self._epsilon_t, grad.dtype), grad, indices, use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) accum_grad = self.get_slot(var, 'accum_grad') accum_var = self.get_slot(var, 'accum_var') return training_ops.resource_sparse_apply_adadelta( var.handle, accum_grad.handle, accum_var.handle, lr_t, self._get_hyper('rho', var_dtype), self._get_hyper('epsilon', var_dtype), grad, indices, use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) accum_grad = self.get_slot(var, 'accum_grad') accum_var = self.get_slot(var, 'accum_var') return training_ops.resource_sparse_apply_adadelta( var.handle, accum_grad.handle, accum_var.handle, lr_t, self._get_hyper('rho', var_dtype), self._get_hyper('epsilon', var_dtype), grad, indices, use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) accum_grad = self.get_slot(var, 'accum_grad') accum_var = self.get_slot(var, 'accum_var') return training_ops.resource_sparse_apply_adadelta( var.handle, accum_grad.handle, accum_var.handle, coefficients['lr_t'], coefficients['rho'], coefficients['epsilon'], grad, indices, use_locking=self._use_locking)