Ejemplo n.º 1
0
 def step(self):
     for param, rms_grad in zip(self.params, self.steps):
         rms_grad *= self.decay
         step = param.grad()
         if param.penalty is not None:
             step -= param.penalty()
         rms_grad += (1.0 - self.decay) * step**2
         scaling = ca.maximum(ca.sqrt(rms_grad), self.max_scaling_inv)
         step_rate = self.learn_rate * param.learn_rate / self.batch_size
         param.step(step / scaling * (-step_rate))
Ejemplo n.º 2
0
 def step(self):
     for param, rms_grad in zip(self.params, self.steps):
         rms_grad *= self.decay
         step = param.grad()
         if param.penalty is not None:
             step -= param.penalty()
         rms_grad += (1.0 - self.decay) * step**2
         scaling = ca.maximum(ca.sqrt(rms_grad), self.max_scaling_inv)
         step_rate = self.learn_rate * param.learn_rate / self.batch_size
         param.step(step / scaling * (-step_rate))
Ejemplo n.º 3
0
 def fprop(self):
     ca.maximum(self.lhs.out, self.rhs.out, out=self.out)
Ejemplo n.º 4
0
 def fprop(self, x):
     self._tmp_x = x
     pos = ca.maximum(x, 0)
     neg = self.a.array * ca.minimum(x, 0)
     return pos + neg
Ejemplo n.º 5
0
 def grad(self, pred, target):
     pred = ca.maximum(pred, _FLT_MIN)
     return -(target/pred - (1-target)/(1-pred))
Ejemplo n.º 6
0
 def fprop(self):
     ca.minimum(self.x.out, 0, out=self.out)
     self.out *= self.a
     self.out += ca.maximum(self.x.out, 0)
Ejemplo n.º 7
0
 def fprop(self):
     ca.maximum(self.lhs.array, self.rhs.array, out=self.array)
Ejemplo n.º 8
0
 def loss(self, pred, target):
     pred = ca.maximum(pred, _FLT_MIN)
     return -ca.sum(target*ca.log(pred) + (1 - target)*ca.log(1 - pred),
                    axis=1)
Ejemplo n.º 9
0
 def loss(self, y, dists):
     return y * dists + (1 - y) * ca.maximum(self.margin - dists, 0)
Ejemplo n.º 10
0
 def fprop(self):
     ca.maximum(self.lhs.out, self.rhs.out, out=self.out)
Ejemplo n.º 11
0
 def grad(self, pred, target):
     pred = ca.maximum(pred, _FLT_MIN)
     return -(target / pred - (1 - target) / (1 - pred))
Ejemplo n.º 12
0
 def fprop(self):
     ca.minimum(self.x.array, 0, out=self.array)
     self.array *= self.a
     self.array += ca.maximum(self.x.array, 0)
Ejemplo n.º 13
0
 def loss(self, pred, target):
     pred = ca.maximum(pred, _FLT_MIN)
     return -ca.sum(target * ca.log(pred) + (1 - target) * ca.log(1 - pred), axis=1)
Ejemplo n.º 14
0
 def grad(self, y, y_pred):
     y_pred = ca.maximum(y_pred, _FLT_MIN)
     return -(y/y_pred - (1-y)/(1-y_pred))
Ejemplo n.º 15
0
 def loss(self, y, y_pred):
     y_pred = ca.maximum(y_pred, _FLT_MIN)
     return -ca.mean(y*ca.log(y_pred) + (1 - y)*ca.log(1 - y_pred), axis=1)
Ejemplo n.º 16
0
 def fprop(self, x):
     self._tmp_x = x
     pos = ca.maximum(x, 0)
     neg = self.a.array * ca.minimum(x, 0)
     return pos + neg
Ejemplo n.º 17
0
 def fprop(self):
     ca.maximum(self.lhs.array, self.rhs.array, out=self.array)
Ejemplo n.º 18
0
 def loss(self, target, x1, x2):
     dists = self.fprop(x1, x2)
     return target*dists + (1-target)*ca.maximum(self.margin-dists, 0)
Ejemplo n.º 19
0
 def loss(self, y, dists):
     return y*dists + (1-y)*ca.maximum(self.margin-dists, 0)