Beispiel #1
0
 def _step(self, epoch):
     t = fn.to_tensor(epoch)
     grads = [p.grad for p in self.parameters]
     for p, g, m, v in (zip(self.parameters, grads, self.ms, self.vs)):
         m = self.beta1*m + (1-self.beta1)*g
         v = self.beta2*v + (1-self.beta2)*fn.square(g)
         m_hat = m / (1 - fn.power(self.beta1, t))
         v_hat = v / (1 - fn.power(self.beta2, t))
         p -= (self.lr * (m_hat * self.beta1 + (1 - self.beta2) / (1 - fn.power(self.beta1, t))*g)) / (fn.sqrt(v_hat) + self.epsilon)
Beispiel #2
0
 def _step(self, epoch):
     t = fn.to_tensor(epoch)
     grads = [p.grad for p in self.parameters]
     for p, g, m, v, vhat in (zip(self.parameters, grads, self.ms, self.vs,
                                  self.vhats)):
         m = self.beta1 * m + (1 - self.beta1) * g
         v = self.beta2 * v + (1 - self.beta2) * fn.square(g)
         vhat = fn.maximum(vhat, v)
         p -= self.lr * m / (fn.sqrt(vhat) + self.epsilon)
Beispiel #3
0
 def _step(self, epoch):
     t = fn.to_tensor(epoch)
     step_size = self.lr * (fn.sqrt(1 - fn.power(self.beta2, t)) /
                            (1 - fn.power(self.beta1, t)))
     lower_bound = self.final_lr * (1.0 - 1.0 / (self.gamma * t + 1))
     upper_bound = self.final_lr * (1.0 + 1.0 / (self.gamma * t))
     grads = [p.grad for p in self.parameters]
     for p, g, m, v in (zip(self.parameters, grads, self.ms, self.vs)):
         m = self.beta1 * m + (1 - self.beta1) * g
         v = self.beta2 * v + (1 - self.beta2) * fn.square(g)
         denom = fn.sqrt(v) + self.epsilon
         p -= m * fn.clip(step_size / denom, lower_bound.item(),
                          upper_bound.item())
Beispiel #4
0
 def forward(self, x):
     if self.train_mode:
         mean = fn.mean(x)
         standard_deviation = fn.mean(fn.square(x - mean))
         self.u_avg.data = self.momentum.data * self.u_avg.data + (
             1 - self.momentum.data) * mean.data
         self.std_avg.data = self.momentum.data * self.std_avg.data + (
             1 - self.momentum.data) * standard_deviation.data
     else:
         mean = self.u_avg
         standard_deviation = self.std_avg
     x = (x - mean) / fn.sqrt(standard_deviation + self.epsilon)
     return fn.mul(x, self.gamma) + self.beta
Beispiel #5
0
def half_quadratic(output: Tensor, target: Tensor):
    """
    Half quadratic loss function.

    ## Parameters
    output: `Tensor` - model's prediction

    target: `Target` - training sample targets

    ## Example usage
    ```python
    from beacon.tensor import Tensor
    from beacon.functional import functions as F

    output = Tensor([[0.2, 0.7, 0.1], [0.4, 0.45, 0.15]], requires_grad=True)
    target = Tensor([[0, 1, 0], [1, 0, 0]], requires_grad=True)
    loss = F.half_quadratic(output, target)
    ```
    """
    output, target = fn.to_tensor(output), fn.to_tensor(target)
    return 0.5 * fn.square(output - target)
Beispiel #6
0
def mean_squared_error(output: Tensor, target: Tensor):
    """
    Mean squared error loss function.

    ## Parameters
    output: `Tensor` - model's prediction

    target: `Target` - training sample targets

    ## Example usage
    ```python
    from beacon.tensor import Tensor
    from beacon.functional import functions as F

    output = Tensor([[0.2, 0.7, 0.1], [0.4, 0.45, 0.15]], requires_grad=True)
    target = Tensor([[0, 1, 0], [1, 0, 0]], requires_grad=True)
    loss = F.mean_squared_error(output, target)
    ```
    """
    output, target = fn.to_tensor(output), fn.to_tensor(target)
    return fn.mean(fn.square(output - target), axis=-1)
Beispiel #7
0
 def _step(self, epoch):
     grads = [p.grad for p in self.parameters]
     for p, g, e in zip(self.parameters, grads, self.E):
         e = self.beta * e + (1 - self.beta) * fn.square(g)
         p -= self.lr * g / (fn.sqrt(e) + self.epsilon)
Beispiel #8
0
 def _step(self, epoch):
     grads = [p.grad for p in self.parameters]
     for p, g, gs in zip(self.parameters, grads, self.G):
         gs += fn.square(g)
         p -= self.lr * g / fn.sqrt(gs + self.epison)