示例#1
0
文件: loss.py 项目: obinsc/deeppy
 def fprop(self):
     pred = self.x.out
     target = self.target.out
     if self.clip:
         ca.clip(pred, _FLT_MIN, .9999999, pred)
     self.out = -ca.sum(target * ca.log(pred) +
                        (1 - target) * ca.log(1 - pred))
示例#2
0
 def fprop(self):
     # -target * log(pred)
     tmp1 = self.pred.array + self.eps
     ca.log(tmp1, tmp1)
     tmp1 *= self.target.array
     ca.sum(tmp1, axis=1, out=self.array)
     ca.negative(self.array, self.array)
示例#3
0
文件: loss.py 项目: EricSchles/deeppy
 def fprop(self):
     # -target * log(pred)
     tmp1 = self.pred.array + self.eps
     ca.log(tmp1, tmp1)
     tmp1 *= self.target.array
     ca.sum(tmp1, axis=1, out=self.array)
     ca.negative(self.array, self.array)
示例#4
0
 def bprop(self):
     if self.lhs.bpropable:
         tmp = self.rhs.array - 1
         ca.power(self.lhs.array, tmp, out=self.lhs.grad_array)
         self.lhs.grad_array *= self.rhs.array
         self.lhs.grad_array *= self.grad_array
     if self.rhs.bpropable:
         ca.log(self.lhs.array, out=self.rhs.grad_array)
         self.rhs.grad_array *= self.array
         self.rhs.grad_array *= self.grad_array
示例#5
0
 def bprop(self):
     if self.lhs.bpropable:
         tmp = self.rhs.array - 1
         ca.power(self.lhs.array, tmp, out=self.lhs.grad_array)
         self.lhs.grad_array *= self.rhs.array
         self.lhs.grad_array *= self.grad_array
     if self.rhs.bpropable:
         ca.log(self.lhs.array, out=self.rhs.grad_array)
         self.rhs.grad_array *= self.array
         self.rhs.grad_array *= self.grad_array
示例#6
0
 def bprop(self):
     if self.lhs_bprop:
         tmp = self.rhs.out - 1
         ca.power(self.lhs.out, tmp, out=self.lhs.out_grad)
         self.lhs.out_grad *= self.rhs.out
         self.lhs.out_grad *= self.out_grad
     if self.rhs_bprop:
         ca.log(self.lhs.out, out=self.rhs.out_grad)
         self.rhs.out_grad *= self.out
         self.rhs.out_grad *= self.out_grad
示例#7
0
 def bprop(self):
     if self.lhs_bprop:
         tmp = self.rhs.out - 1
         ca.power(self.lhs.out, tmp, out=self.lhs.out_grad)
         self.lhs.out_grad *= self.rhs.out
         self.lhs.out_grad *= self.out_grad
     if self.rhs_bprop:
         ca.log(self.lhs.out, out=self.rhs.out_grad)
         self.rhs.out_grad *= self.out
         self.rhs.out_grad *= self.out_grad
示例#8
0
 def fprop(self):
     # -log(1 - pred)*(1 - target) - log(pred)*target
     tmp1 = 1 - self.pred.array
     tmp1 += self.eps
     ca.log(tmp1, tmp1)
     tmp2 = 1 - self.target.array
     ca.multiply(tmp1, tmp2, tmp1)
     ca.add(self.pred.array, self.eps, tmp2)
     ca.log(tmp2, tmp2)
     tmp2 *= self.target.array
     ca.add(tmp1, tmp2, tmp1)
     tmp1 *= -1
     ca.sum(tmp1, axis=1, out=self.array)
示例#9
0
 def fprop(self):
     # -log(1 - pred)*(1 - target) - log(pred)*target
     tmp1 = 1 - self.pred.out
     tmp1 += self.eps
     ca.log(tmp1, tmp1)
     tmp2 = 1 - self.target.out
     ca.multiply(tmp1, tmp2, tmp1)
     ca.add(self.pred.out, self.eps, tmp2)
     ca.log(tmp2, tmp2)
     tmp2 *= self.target.out
     ca.add(tmp1, tmp2, tmp1)
     tmp1 *= -1
     ca.sum(tmp1, axis=1, keepdims=True, out=self.out)
示例#10
0
文件: loss.py 项目: EricSchles/deeppy
 def fprop(self):
     # -log(1 - pred)*(1 - target) - log(pred)*target
     tmp1 = 1 - self.pred.array
     tmp1 += self.eps
     ca.log(tmp1, tmp1)
     tmp2 = 1 - self.target.array
     ca.multiply(tmp1, tmp2, tmp1)
     ca.add(self.pred.array, self.eps, tmp2)
     ca.log(tmp2, tmp2)
     tmp2 *= self.target.array
     ca.add(tmp1, tmp2, tmp1)
     tmp1 *= -1
     ca.sum(tmp1, axis=1, out=self.array)
示例#11
0
def categorical_cross_entropy(y_pred, y_true, eps=1e-15):
    # Assumes one-hot encoding.
    y_pred = ca.clip(y_pred, eps, 1 - eps)
    # XXX: do we need to normalize?
    y_pred /= ca.sum(y_pred, axis=1, keepdims=True)
    loss = -ca.sum(y_true * ca.log(y_pred), axis=1)
    return loss
示例#12
0
def categorical_cross_entropy(y_pred, y_true, eps=1e-15):
    # Assumes one-hot encoding.
    y_pred = ca.clip(y_pred, eps, 1 - eps)
    # XXX: do we need to normalize?
    y_pred /= ca.sum(y_pred, axis=1, keepdims=True)
    loss = -ca.sum(y_true * ca.log(y_pred), axis=1)
    return loss
示例#13
0
 def fprop(self):
     ca.log(self.x.out, out=self.out)
示例#14
0
 def fprop(self):
     pred = self.x.out
     target = self.target.out
     if self.clip:
         ca.clip(pred, _FLT_MIN, .9999999, pred)
     self.out = -ca.sum(target*ca.log(pred) + (1 - target)*ca.log(1 - pred))
示例#15
0
 def fprop(self):
     ca.exp(self.x.out, self.out)
     # TODO: use log1p()
     self.out += 1
     ca.log(self.out, self.out)
示例#16
0
文件: loss.py 项目: luffyhwl/deeppy
 def loss(self, y, y_pred):
     return ca.mean(-ca.sum(y*ca.log(y_pred+self.eps) +
                            (1-y) * ca.log(1-y_pred+self.eps), axis=1))
示例#17
0
 def loss(self, pred, target):
     pred = ca.maximum(pred, _FLT_MIN)
     return -ca.sum(target*ca.log(pred) + (1 - target)*ca.log(1 - pred),
                    axis=1)
示例#18
0
 def fprop(self):
     ca.log(self.x.array, out=self.array)
示例#19
0
 def fprop(self):
     ca.log(self.x.out, out=self.out)
示例#20
0
 def fprop(self):
     ca.log(self.x.array, out=self.array)
示例#21
0
文件: loss.py 项目: numericx/deeppy
 def loss(self, y, y_pred):
     y_pred = ca.maximum(y_pred, _FLT_MIN)
     return -ca.mean(y*ca.log(y_pred) + (1 - y)*ca.log(1 - y_pred), axis=1)
示例#22
0
 def fprop(self, x):
     self._tmp_x = x
     return ca.log(1.0 + ca.exp(x))
示例#23
0
 def fprop(self, x):
     self._tmp_x = x
     return ca.log(1.0 + ca.exp(x))
示例#24
0
 def loss(self, pred, target):
     pred = ca.maximum(pred, _FLT_MIN)
     return -ca.sum(target * ca.log(pred) + (1 - target) * ca.log(1 - pred), axis=1)