예제 #1
0
파일: loss.py 프로젝트: obinsc/deeppy
 def fprop(self):
     pred = self.x.out
     target = self.target.out
     if self.clip:
         ca.clip(pred, _FLT_MIN, .9999999, pred)
     self.out = -ca.sum(target * ca.log(pred) +
                        (1 - target) * ca.log(1 - pred))
예제 #2
0
 def fprop(self):
     # -target * log(pred)
     tmp1 = self.pred.array + self.eps
     ca.log(tmp1, tmp1)
     tmp1 *= self.target.array
     ca.sum(tmp1, axis=1, out=self.array)
     ca.negative(self.array, self.array)
예제 #3
0
파일: loss.py 프로젝트: EricSchles/deeppy
 def fprop(self):
     # -target * log(pred)
     tmp1 = self.pred.array + self.eps
     ca.log(tmp1, tmp1)
     tmp1 *= self.target.array
     ca.sum(tmp1, axis=1, out=self.array)
     ca.negative(self.array, self.array)
예제 #4
0
 def bprop(self):
     if self.lhs.bpropable:
         tmp = self.rhs.array - 1
         ca.power(self.lhs.array, tmp, out=self.lhs.grad_array)
         self.lhs.grad_array *= self.rhs.array
         self.lhs.grad_array *= self.grad_array
     if self.rhs.bpropable:
         ca.log(self.lhs.array, out=self.rhs.grad_array)
         self.rhs.grad_array *= self.array
         self.rhs.grad_array *= self.grad_array
예제 #5
0
 def bprop(self):
     if self.lhs.bpropable:
         tmp = self.rhs.array - 1
         ca.power(self.lhs.array, tmp, out=self.lhs.grad_array)
         self.lhs.grad_array *= self.rhs.array
         self.lhs.grad_array *= self.grad_array
     if self.rhs.bpropable:
         ca.log(self.lhs.array, out=self.rhs.grad_array)
         self.rhs.grad_array *= self.array
         self.rhs.grad_array *= self.grad_array
예제 #6
0
 def bprop(self):
     if self.lhs_bprop:
         tmp = self.rhs.out - 1
         ca.power(self.lhs.out, tmp, out=self.lhs.out_grad)
         self.lhs.out_grad *= self.rhs.out
         self.lhs.out_grad *= self.out_grad
     if self.rhs_bprop:
         ca.log(self.lhs.out, out=self.rhs.out_grad)
         self.rhs.out_grad *= self.out
         self.rhs.out_grad *= self.out_grad
예제 #7
0
 def bprop(self):
     if self.lhs_bprop:
         tmp = self.rhs.out - 1
         ca.power(self.lhs.out, tmp, out=self.lhs.out_grad)
         self.lhs.out_grad *= self.rhs.out
         self.lhs.out_grad *= self.out_grad
     if self.rhs_bprop:
         ca.log(self.lhs.out, out=self.rhs.out_grad)
         self.rhs.out_grad *= self.out
         self.rhs.out_grad *= self.out_grad
예제 #8
0
 def fprop(self):
     # -log(1 - pred)*(1 - target) - log(pred)*target
     tmp1 = 1 - self.pred.array
     tmp1 += self.eps
     ca.log(tmp1, tmp1)
     tmp2 = 1 - self.target.array
     ca.multiply(tmp1, tmp2, tmp1)
     ca.add(self.pred.array, self.eps, tmp2)
     ca.log(tmp2, tmp2)
     tmp2 *= self.target.array
     ca.add(tmp1, tmp2, tmp1)
     tmp1 *= -1
     ca.sum(tmp1, axis=1, out=self.array)
예제 #9
0
파일: loss.py 프로젝트: houxingxing/deeppy
 def fprop(self):
     # -log(1 - pred)*(1 - target) - log(pred)*target
     tmp1 = 1 - self.pred.out
     tmp1 += self.eps
     ca.log(tmp1, tmp1)
     tmp2 = 1 - self.target.out
     ca.multiply(tmp1, tmp2, tmp1)
     ca.add(self.pred.out, self.eps, tmp2)
     ca.log(tmp2, tmp2)
     tmp2 *= self.target.out
     ca.add(tmp1, tmp2, tmp1)
     tmp1 *= -1
     ca.sum(tmp1, axis=1, keepdims=True, out=self.out)
예제 #10
0
파일: loss.py 프로젝트: EricSchles/deeppy
 def fprop(self):
     # -log(1 - pred)*(1 - target) - log(pred)*target
     tmp1 = 1 - self.pred.array
     tmp1 += self.eps
     ca.log(tmp1, tmp1)
     tmp2 = 1 - self.target.array
     ca.multiply(tmp1, tmp2, tmp1)
     ca.add(self.pred.array, self.eps, tmp2)
     ca.log(tmp2, tmp2)
     tmp2 *= self.target.array
     ca.add(tmp1, tmp2, tmp1)
     tmp1 *= -1
     ca.sum(tmp1, axis=1, out=self.array)
예제 #11
0
def categorical_cross_entropy(y_pred, y_true, eps=1e-15):
    # Assumes one-hot encoding.
    y_pred = ca.clip(y_pred, eps, 1 - eps)
    # XXX: do we need to normalize?
    y_pred /= ca.sum(y_pred, axis=1, keepdims=True)
    loss = -ca.sum(y_true * ca.log(y_pred), axis=1)
    return loss
예제 #12
0
def categorical_cross_entropy(y_pred, y_true, eps=1e-15):
    # Assumes one-hot encoding.
    y_pred = ca.clip(y_pred, eps, 1 - eps)
    # XXX: do we need to normalize?
    y_pred /= ca.sum(y_pred, axis=1, keepdims=True)
    loss = -ca.sum(y_true * ca.log(y_pred), axis=1)
    return loss
예제 #13
0
 def fprop(self):
     ca.log(self.x.out, out=self.out)
예제 #14
0
 def fprop(self):
     pred = self.x.out
     target = self.target.out
     if self.clip:
         ca.clip(pred, _FLT_MIN, .9999999, pred)
     self.out = -ca.sum(target*ca.log(pred) + (1 - target)*ca.log(1 - pred))
예제 #15
0
파일: activation.py 프로젝트: obinsc/deeppy
 def fprop(self):
     ca.exp(self.x.out, self.out)
     # TODO: use log1p()
     self.out += 1
     ca.log(self.out, self.out)
예제 #16
0
파일: loss.py 프로젝트: luffyhwl/deeppy
 def loss(self, y, y_pred):
     return ca.mean(-ca.sum(y*ca.log(y_pred+self.eps) +
                            (1-y) * ca.log(1-y_pred+self.eps), axis=1))
예제 #17
0
파일: loss.py 프로젝트: vijayshgupta/deeppy
 def loss(self, pred, target):
     pred = ca.maximum(pred, _FLT_MIN)
     return -ca.sum(target*ca.log(pred) + (1 - target)*ca.log(1 - pred),
                    axis=1)
예제 #18
0
 def fprop(self):
     ca.log(self.x.array, out=self.array)
예제 #19
0
 def fprop(self):
     ca.log(self.x.out, out=self.out)
예제 #20
0
 def fprop(self):
     ca.log(self.x.array, out=self.array)
예제 #21
0
파일: loss.py 프로젝트: numericx/deeppy
 def loss(self, y, y_pred):
     y_pred = ca.maximum(y_pred, _FLT_MIN)
     return -ca.mean(y*ca.log(y_pred) + (1 - y)*ca.log(1 - y_pred), axis=1)
예제 #22
0
 def fprop(self, x):
     self._tmp_x = x
     return ca.log(1.0 + ca.exp(x))
예제 #23
0
 def fprop(self, x):
     self._tmp_x = x
     return ca.log(1.0 + ca.exp(x))
예제 #24
0
 def loss(self, pred, target):
     pred = ca.maximum(pred, _FLT_MIN)
     return -ca.sum(target * ca.log(pred) + (1 - target) * ca.log(1 - pred), axis=1)