def _multi_tensor_rmsprop(params: List[Tensor], grads: List[Tensor], square_avgs: List[Tensor], grad_avgs: List[Tensor], momentum_buffer_list: List[Tensor], *, lr: float, alpha: float, eps: float, weight_decay: float, momentum: float, centered: bool): if len(params) == 0: return if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) torch._foreach_mul_(square_avgs, alpha) torch._foreach_addcmul_(square_avgs, grads, grads, value=1 - alpha) if centered: torch._foreach_mul_(grad_avgs, alpha) torch._foreach_add_(grad_avgs, grads, alpha=1 - alpha) avg = torch._foreach_addcmul(square_avgs, grad_avgs, grad_avgs, value=-1) torch._foreach_sqrt_(avg) torch._foreach_add_(avg, eps) else: avg = torch._foreach_sqrt(square_avgs) torch._foreach_add_(avg, eps) if momentum > 0: torch._foreach_mul_(momentum_buffer_list, momentum) torch._foreach_addcdiv_(momentum_buffer_list, grads, avg) torch._foreach_add_(params, momentum_buffer_list, alpha=-lr) else: torch._foreach_addcdiv_(params, grads, avg, value=-lr)
def _multi_tensor_adamax(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_infs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float): if len(params) == 0: return # Update steps torch._foreach_add_(state_steps, 1) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Update biased first moment estimate. torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # Update the exponentially weighted infinity norm. torch._foreach_mul_(exp_infs, beta2) for exp_inf, grad in zip(exp_infs, grads): norm_buf = torch.cat( [exp_inf.unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0) torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long())) bias_corrections = [1 - beta1**step.item() for step in state_steps] clr = [-1 * (lr / bias_correction) for bias_correction in bias_corrections] torch._foreach_addcdiv_(params, exp_avgs, exp_infs, clr)
def adamax(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_infs: List[Tensor], states: List[Dict], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float): r"""Functional API that performs Adamax algorithm computation. See :class:`~torch.optim.Adamax` for details. """ if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Update biased first moment estimate. torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # Update the exponentially weighted infinity norm. torch._foreach_mul_(exp_infs, beta2) for exp_inf, grad in zip(exp_infs, grads): norm_buf = torch.cat( [exp_inf.unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0) torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long())) bias_corrections = [1 - beta1**state['step'] for state in states] clr = [-1 * (lr / bias_correction) for bias_correction in bias_corrections] torch._foreach_addcdiv_(params, exp_avgs, exp_infs, clr)
def _multi_tensor_radam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float): if len(params) == 0: return # Update steps torch._foreach_add_(state_steps, 1) # maximum length of the approximated SMA rho_inf = 2 / (1 - beta2) - 1 # compute the length of the approximated SMA rho_t_list = [ rho_inf - 2 * step.item() * (beta2**step.item()) / (1 - beta2**step.item()) for step in state_steps ] bias_correction1 = [1 - beta1**step.item() for step in state_steps] bias_correction2 = [1 - beta2**step.item() for step in state_steps] if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) rect = [ math.sqrt((rho_t - 4) * (rho_t - 2) * rho_inf / ((rho_inf - 4) * (rho_inf - 2) * rho_t)) if rho_t > 5 else 0 for rho_t in rho_t_list ] unrectified = [0 if rect > 0 else 1. for rect in rect] exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2] denom = torch._foreach_div(exp_avg_sq_sqrt, bias_correction_sqrt) step_size = [(lr * rect / bc) * -1 for rect, bc in zip(rect, bias_correction1)] torch._foreach_addcdiv_(params, exp_avgs, denom, step_size) denom = [ torch.ones_like(exp_av, memory_format=torch.preserve_format) for exp_av in exp_avgs ] step_size = [(lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1)] torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
def _multi_tensor_adam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool): if len(params) == 0: return # update steps torch._foreach_add_(state_steps, 1) if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now max_exp_avg_sqs = torch._foreach_maximum(max_exp_avg_sqs, exp_avg_sqs) # type: ignore[assignment] # Use the max. for normalizing running avg. of gradient max_exp_avg_sq_sqrt = torch._foreach_sqrt(max_exp_avg_sqs) bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2] torch._foreach_div_(max_exp_avg_sq_sqrt, bias_correction_sqrt) denom = torch._foreach_add(max_exp_avg_sq_sqrt, eps) else: exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2] torch._foreach_div_(exp_avg_sq_sqrt, bias_correction_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) step_size = [(lr / bc) * -1 for bc in bias_correction1] torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
def _multi_tensor_rmsprop(params: List[Tensor], grads: List[Tensor], square_avgs: List[Tensor], grad_avgs: List[Tensor], momentum_buffer_list: List[Tensor], *, lr: float, alpha: float, eps: float, weight_decay: float, momentum: float, centered: bool, maximize: bool, differentiable: bool): if len(params) == 0: return assert not differentiable, "_foreach ops don't support autograd" if maximize: grads = torch._foreach_neg(grads) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) def _view_complex_as_real(tensor_list): return [ torch.view_as_real(t) if torch.is_complex(t) else t for t in tensor_list ] grads = _view_complex_as_real(grads) params = _view_complex_as_real(params) square_avgs = _view_complex_as_real(square_avgs) torch._foreach_mul_(square_avgs, alpha) torch._foreach_addcmul_(square_avgs, grads, grads, value=1 - alpha) if centered: grad_avgs = _view_complex_as_real(grad_avgs) torch._foreach_mul_(grad_avgs, alpha) torch._foreach_add_(grad_avgs, grads, alpha=1 - alpha) avg = torch._foreach_addcmul(square_avgs, grad_avgs, grad_avgs, value=-1) torch._foreach_sqrt_(avg) torch._foreach_add_(avg, eps) else: avg = torch._foreach_sqrt(square_avgs) torch._foreach_add_(avg, eps) if momentum > 0: momentum_buffer_list = _view_complex_as_real(momentum_buffer_list) torch._foreach_mul_(momentum_buffer_list, momentum) torch._foreach_addcdiv_(momentum_buffer_list, grads, avg) torch._foreach_add_(params, momentum_buffer_list, alpha=-lr) else: torch._foreach_addcdiv_(params, grads, avg, value=-lr)
def _multi_tensor_nadam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], mu_products: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, momentum_decay: float, eps: float): if len(params) == 0: return # update steps torch._foreach_add_(state_steps, 1) bias_correction1 = [1 - beta1**step.item() for step in state_steps] bias_correction2 = [1 - beta2**step.item() for step in state_steps] mus = [ beta1 * (1. - 0.5 * (0.96**(step.item() * momentum_decay))) for step in state_steps ] mu_nexts = [ beta1 * (1. - 0.5 * (0.96**((step.item() + 1) * momentum_decay))) for step in state_steps ] # update mu_products torch._foreach_mul_(mu_products, mus) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2] torch._foreach_div_(exp_avg_sq_sqrt, bias_correction_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) step_size_grads = [(lr * (1. - mu) / (1. - mu_product.item())) * -1 for mu_product, mu in zip(mu_products, mus)] step_size_expavg = [ (lr * mu_next / (1. - mu_product.item() * mu_next)) * -1 for mu_product, mu_next in zip(mu_products, mu_nexts) ] torch._foreach_addcdiv_(params, grads, denom, step_size_grads) torch._foreach_addcdiv_(params, exp_avgs, denom, step_size_expavg)
def radam(params: List[Tensor], grads: List[Tensor], exp_avg: List[Tensor], exp_avg_sq: List[Tensor], states: List[Dict], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float): r"""Functional API that performs RAdam algorithm computation. See :class:`~torch.optim.RAdam` for details. """ # maximum length of the approximated SMA rho_inf = 2 / (1 - beta2) - 1 # compute the length of the approximated SMA rho_t_list = [ rho_inf - 2 * state['step'] * (beta2**state['step']) / (1 - beta2**state['step']) for state in states ] bias_correction1 = [1 - beta1**state['step'] for state in states] bias_correction2 = [1 - beta2**state['step'] for state in states] if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avg, beta1) torch._foreach_add_(exp_avg, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sq, beta2) torch._foreach_addcmul_(exp_avg_sq, grads, grads, 1 - beta2) rect = [ math.sqrt((rho_t - 4) * (rho_t - 2) * rho_inf / ((rho_inf - 4) * (rho_inf - 2) * rho_t)) if rho_t > 5 else 0 for rho_t in rho_t_list ] unrectified = [0 if rect > 0 else 1. for rect in rect] exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sq) bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2] denom = torch._foreach_div(exp_avg_sq_sqrt, bias_correction_sqrt) step_size = [(lr * rect / bc) * -1 for rect, bc in zip(rect, bias_correction1)] torch._foreach_addcdiv_(params, exp_avg, denom, step_size) denom = [ torch.ones_like(exp_av, memory_format=torch.preserve_format) for exp_av in exp_avg ] step_size = [(lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1)] torch._foreach_addcdiv_(params, exp_avg, denom, step_size)
def nadam(params: List[Tensor], grads: List[Tensor], exp_avg: List[Tensor], exp_avg_sq: List[Tensor], mu_products: List[Tensor], states: List[Dict], *, beta1: float, beta2: float, lr: float, weight_decay: float, momentum_decay: float, eps: float): r"""Functional API that performs NAdam algorithm computation. See :class:`~torch.optim.NAdam` for details. """ bias_correction1 = [1 - beta1 ** state['step'] for state in states] bias_correction2 = [1 - beta2 ** state['step'] for state in states] mus = [beta1 * (1. - 0.5 * (0.96 ** (state['step'] * momentum_decay))) for state in states] mu_nexts = [beta1 * (1. - 0.5 * (0.96 ** ((state['step'] + 1) * momentum_decay))) for state in states] if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avg, beta1) torch._foreach_add_(exp_avg, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sq, beta2) torch._foreach_addcmul_(exp_avg_sq, grads, grads, 1 - beta2) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sq) bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2] torch._foreach_div_(exp_avg_sq_sqrt, bias_correction_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) step_size_grads = [(lr * (1. - mu) / (1. - mu_product)) * -1 for mu_product, mu in zip(mu_products, mus)] step_size_expavg = [(lr * mu_next / (1. - mu_product * mu_next)) * -1 for mu_product, mu_next in zip(mu_products, mu_nexts)] torch._foreach_addcdiv_(params, grads, denom, step_size_grads) torch._foreach_addcdiv_(params, exp_avg, denom, step_size_expavg)
def adagrad(params: List[Tensor], grads: List[Tensor], state_sums: List[Tensor], state_steps: List[int], has_sparse_grad: bool, *, lr: float, weight_decay: float, lr_decay: float, eps: float): r"""Functional API that performs Adagrad algorithm computation. See :class:`~torch.optim.Adagrad` for details. """ if weight_decay != 0: if has_sparse_grad: raise RuntimeError( "weight_decay option is not compatible with sparse gradients") torch._foreach_add_(grads, params, alpha=weight_decay) minus_clr = [-lr / (1 + (step - 1) * lr_decay) for step in state_steps] if has_sparse_grad: # sparse is not supported by multi_tensor. Fall back to optim.adagrad # implementation for sparse gradients for i, (param, grad, state_sum, step) in enumerate(zip(params, grads, state_sums, state_steps)): grad = grad.coalesce( ) # the update is non-linear so indices must be unique grad_indices = grad._indices() grad_values = grad._values() size = grad.size() state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2))) std_sparse = state_sum.sparse_mask(grad) std_sparse_values = std_sparse._values().sqrt_().add_(eps) param.add_( _make_sparse(grad, grad_indices, grad_values / std_sparse_values), alpha=minus_clr[i], ) else: torch._foreach_addcmul_(state_sums, grads, grads, value=1) std = torch._foreach_add(torch._foreach_sqrt(state_sums), eps) torch._foreach_addcdiv_(params, torch._foreach_mul(grads, minus_clr), std)
def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: grads = [] params_with_grad = [] states = [] alpha = group['alpha'] square_avg = [] for p in group['params']: if p.grad is not None: if p.grad.is_sparse: raise RuntimeError( 'RMSprop does not support sparse gradients') grads.append(p.grad) params_with_grad.append(p) state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['square_avg'] = torch.zeros_like( p, memory_format=torch.preserve_format) if group['momentum'] > 0: state['momentum_buffer'] = torch.zeros_like( p, memory_format=torch.preserve_format) if group['centered']: state['grad_avg'] = torch.zeros_like( p, memory_format=torch.preserve_format) state['step'] += 1 states.append(state) square_avg.append(state['square_avg']) if group['weight_decay'] != 0: torch._foreach_add_(grads, params_with_grad, alpha=group['weight_decay']) torch._foreach_mul_(square_avg, alpha) torch._foreach_addcmul_(square_avg, grads, grads, value=1 - alpha) if group['centered']: grad_avgs = [s['grad_avg'] for s in states] torch._foreach_mul_(grad_avgs, alpha) torch._foreach_add_(grad_avgs, grads, alpha=1 - alpha) avg = torch._foreach_addcmul(square_avg, grad_avgs, grad_avgs, value=-1) torch._foreach_sqrt_(avg) torch._foreach_add_(avg, group['eps']) else: avg = torch._foreach_sqrt(square_avg) torch._foreach_add_(avg, group['eps']) if group['momentum'] > 0: buf = [s['momentum_buffer'] for s in states] torch._foreach_mul_(buf, group['momentum']) torch._foreach_addcdiv_(buf, grads, avg) torch._foreach_add_(params_with_grad, buf, alpha=-group['lr']) else: torch._foreach_addcdiv_(params_with_grad, grads, avg, value=-group['lr']) return loss
def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: amsgrad = group['amsgrad'] grads = [] states = [] exp_avg = [] exp_avg_sq = [] max_exp_avg_sq = [] params_with_grad = [] for p in group['params']: if p.grad is not None: if p.grad.is_sparse: raise RuntimeError( 'Adam does not support sparse gradients, please consider SparseAdam instead' ) params_with_grad.append(p) grads.append(p.grad) for p in params_with_grad: state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like( p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like( p, memory_format=torch.preserve_format) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like( p, memory_format=torch.preserve_format) exp_avg.append(state['exp_avg']) exp_avg_sq.append(state['exp_avg_sq']) if amsgrad: max_exp_avg_sq.append(state['max_exp_avg_sq']) state['step'] += 1 states.append(state) beta1, beta2 = group['betas'] bias_correction1 = [1 - beta1**state['step'] for state in states] bias_correction2 = [1 - beta2**state['step'] for state in states] if group['weight_decay'] != 0: grads = torch._foreach_add(grads, params_with_grad, alpha=group['weight_decay']) # # Decay the first and second moment running average coefficient # torch._foreach_mul_(exp_avg, beta1) torch._foreach_add_(exp_avg, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sq, beta2) torch._foreach_addcmul_(exp_avg_sq, grads, grads, 1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now max_exp_avg_sq = torch._foreach_maximum( max_exp_avg_sq, exp_avg_sq) # Use the max. for normalizing running avg. of gradient max_exp_avg_sq_sqrt = torch._foreach_sqrt(max_exp_avg_sq) bias_correction_sqrt = [ math.sqrt(bc) for bc in bias_correction2 ] torch._foreach_div_(max_exp_avg_sq_sqrt, bias_correction_sqrt) denom = torch._foreach_add(max_exp_avg_sq_sqrt, group['eps']) else: exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sq) bias_correction_sqrt = [ math.sqrt(bc) for bc in bias_correction2 ] torch._foreach_div_(exp_avg_sq_sqrt, bias_correction_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, group['eps']) step_size = [(group['lr'] / bc) * -1 for bc in bias_correction1] torch._foreach_addcdiv_(params_with_grad, exp_avg, denom, step_size) return loss
def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: grads = [] params_with_grad = [] states = [] exp_avgs = [] exp_infs = [] beta1, beta2 = group['betas'] eps = group['eps'] for p in group['params']: if p.grad is not None: if p.grad.is_sparse: raise RuntimeError( 'Adamax does not support sparse gradients') grads.append(p.grad) params_with_grad.append(p) state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like( p, memory_format=torch.preserve_format) state['exp_inf'] = torch.zeros_like( p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_infs.append(state['exp_inf']) state['step'] += 1 states.append(state) if group['weight_decay'] != 0: torch._foreach_add_(grads, params_with_grad, alpha=group['weight_decay']) # Update biased first moment estimate. torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # Update the exponentially weighted infinity norm. torch._foreach_mul_(exp_infs, beta2) for exp_inf, grad in zip(exp_infs, grads): norm_buf = torch.cat( [exp_inf.unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0) torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long())) bias_corrections = [1 - beta1**state['step'] for state in states] clr = [ -1 * (group['lr'] / bias_correction) for bias_correction in bias_corrections ] torch._foreach_addcdiv_(params_with_grad, exp_avgs, exp_infs, clr) return loss
def _multi_tensor_adamw(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool): if len(params) == 0: return if capturable: assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \ "If capturable=True, params and state_steps must be CUDA tensors." if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] grads = [ torch.view_as_real(x) if torch.is_complex(x) else x for x in grads ] exp_avgs = [ torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs ] exp_avg_sqs = [ torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs ] params = [ torch.view_as_real(x) if torch.is_complex(x) else x for x in params ] # update steps torch._foreach_add_(state_steps, 1) # Perform stepweight decay torch._foreach_mul_(params, 1 - lr * weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) if capturable: # TODO: use foreach_pow if/when foreach_pow is added bias_correction1 = [torch.pow(beta1, step) for step in state_steps] bias_correction2 = [torch.pow(beta2, step) for step in state_steps] # foreach_sub doesn't allow a scalar as the first arg torch._foreach_sub_(bias_correction1, 1) torch._foreach_sub_(bias_correction2, 1) torch._foreach_neg_(bias_correction1) torch._foreach_neg_(bias_correction2) # foreach_div doesn't allow a scalar as the first arg step_size = torch._foreach_div(bias_correction1, lr) torch._foreach_reciprocal_(step_size) torch._foreach_neg_(step_size) bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch._foreach_maximum_(max_exp_avg_sqs, exp_avg_sqs) # Use the max. for normalizing running avg. of gradient max_exp_avg_sq_sqrt = torch._foreach_sqrt(max_exp_avg_sqs) # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) torch._foreach_div_( max_exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size)) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(max_exp_avg_sq_sqrt, eps_over_step_size) else: exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_( exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size)) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(exp_avg_sq_sqrt, eps_over_step_size) torch._foreach_addcdiv_(params, exp_avgs, denom) else: bias_correction1 = [1 - beta1**step.item() for step in state_steps] bias_correction2 = [1 - beta2**step.item() for step in state_steps] step_size = [(lr / bc) * -1 for bc in bias_correction1] bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2] if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch._foreach_maximum_(max_exp_avg_sqs, exp_avg_sqs) # Use the max. for normalizing running avg. of gradient max_exp_avg_sq_sqrt = torch._foreach_sqrt(max_exp_avg_sqs) torch._foreach_div_(max_exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(max_exp_avg_sq_sqrt, eps) else: exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: amsgrad = group["amsgrad"] grads = [] states = [] exp_avg = [] exp_avg_sq = [] max_exp_avg_sq = [] params_with_grad = [] for p in group["params"]: if p.grad is not None: if p.grad.is_sparse: raise RuntimeError( "AdamW does not support sparse gradients") # Perform stepweight decay p.mul_(1 - group["lr"] * group["weight_decay"]) params_with_grad.append(p) grads.append(p.grad) for p in params_with_grad: state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like( p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.ones_like( p, memory_format=torch.preserve_format ) # torch init to zeros if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state["max_exp_avg_sq"] = torch.zeros_like( p, memory_format=torch.preserve_format) exp_avg.append(state["exp_avg"]) exp_avg_sq.append(state["exp_avg_sq"]) if amsgrad: max_exp_avg_sq.append(state["max_exp_avg_sq"]) state["step"] += 1 states.append(state) beta1, beta2 = group["betas"] bias_correction1 = [1 - beta1**state["step"] for state in states] bias_correction2 = [1 - beta2**state["step"] for state in states] # # Decay the first and second moment running average coefficient # torch._foreach_mul_(exp_avg, beta1) torch._foreach_add_(exp_avg, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sq, beta2) torch._foreach_addcmul_(exp_avg_sq, grads, grads, 1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now max_exp_avg_sq = torch._foreach_maximum( max_exp_avg_sq, exp_avg_sq) # Use the max. for normalizing running avg. of gradient max_exp_avg_sq_sqrt = torch._foreach_sqrt( torch._foreach_add(max_exp_avg_sq, group["eps"])) bias_correction_sqrt = [ math.sqrt(bc) for bc in bias_correction2 ] denom = torch._foreach_div(max_exp_avg_sq_sqrt, bias_correction_sqrt) else: exp_avg_sq_sqrt = torch._foreach_sqrt( torch._foreach_add(exp_avg_sq, group["eps"])) bias_correction_sqrt = [ math.sqrt(bc) for bc in bias_correction2 ] denom = torch._foreach_div(exp_avg_sq_sqrt, bias_correction_sqrt) step_size = [-1 * (group["lr"] / bc) for bc in bias_correction1] torch._foreach_addcdiv_(params_with_grad, exp_avg, denom, step_size) return loss