def grad(self):
     # TODO(hvy): Cache constructed `torch.Tensor`.
     if self._param.grad is not None:
         return cpm.astensor(self._param.grad)
     else:
         return None
 def step(self, closure=None):
     for param_group in self._base_optimizer.param_groups:
         for param in param_group['params']:
             assert isinstance(param, ChainerParameter)
             param.grad.copy_(cpm.astensor(param._param.grad))
     self._base_optimizer.step(closure)
 def __new__(cls, param):
     return super().__new__(cls, cpm.astensor(param.array))
 def __new__(cls, variable):
     assert isinstance(variable, chainer.Variable)
     obj = cpm.astensor(variable.array)
     obj.__class__ = cls
     return obj