Пример #1
0
def test_binary():
    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)

    c_np = np.add(a_np, b_np)
    c_ca = ca.add(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    np.add(a_np, b_np, a_np)
    ca.add(a_ca, b_ca, a_ca)
    print(np.allclose(a_np, np.array(a_ca)))

    np.multiply(a_np, b_np, a_np)
    ca.multiply(a_ca, b_ca, a_ca)
    print(np.allclose(a_np, np.array(a_ca)))

    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5)) > 0
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.multiply(a_np, b_np)
    c_ca = ca.multiply(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))
Пример #2
0
def test_binary():
    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)

    c_np = np.add(a_np, b_np)
    c_ca = ca.add(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    np.add(a_np, b_np, a_np)
    ca.add(a_ca, b_ca, a_ca)
    print(np.allclose(a_np, np.array(a_ca)))

    np.multiply(a_np, b_np, a_np)
    ca.multiply(a_ca, b_ca, a_ca)
    print(np.allclose(a_np, np.array(a_ca)))

    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5)) > 0
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.multiply(a_np, b_np)
    c_ca = ca.multiply(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))
Пример #3
0
 def bprop(self):
     # -(target/pred - (1 - target)/(1 - pred))
     tmp1 = 1 - self.target.array
     tmp2 = 1 - self.pred.array
     tmp2 += self.eps
     ca.divide(tmp1, tmp2, tmp1)
     ca.add(self.pred.array, self.eps, tmp2)
     ca.divide(self.target.array, tmp2, out=tmp2)
     ca.subtract(tmp1, tmp2, self.pred.grad_array)
     self.pred.grad_array *= ca.reshape(self.grad_array, self.bcast_shape)
Пример #4
0
 def bprop(self):
     # -(target/pred - (1 - target)/(1 - pred))
     tmp1 = 1 - self.target.array
     tmp2 = 1 - self.pred.array
     tmp2 += self.eps
     ca.divide(tmp1, tmp2, tmp1)
     ca.add(self.pred.array, self.eps, tmp2)
     ca.divide(self.target.array, tmp2, out=tmp2)
     ca.subtract(tmp1, tmp2, self.pred.grad_array)
     self.pred.grad_array *= ca.reshape(self.grad_array, self.bcast_shape)
Пример #5
0
 def bprop(self):
     # -(target/pred - (1 - target)/(1 - pred))
     tmp1 = 1 - self.target.out
     tmp2 = 1 - self.pred.out
     tmp2 += self.eps
     ca.divide(tmp1, tmp2, tmp1)
     ca.add(self.pred.out, self.eps, tmp2)
     ca.divide(self.target.out, tmp2, out=tmp2)
     ca.subtract(tmp1, tmp2, self.pred.out_grad)
     self.pred.out_grad *= self.out_grad
Пример #6
0
 def fprop(self):
     # -log(1 - pred)*(1 - target) - log(pred)*target
     tmp1 = 1 - self.pred.array
     tmp1 += self.eps
     ca.log(tmp1, tmp1)
     tmp2 = 1 - self.target.array
     ca.multiply(tmp1, tmp2, tmp1)
     ca.add(self.pred.array, self.eps, tmp2)
     ca.log(tmp2, tmp2)
     tmp2 *= self.target.array
     ca.add(tmp1, tmp2, tmp1)
     tmp1 *= -1
     ca.sum(tmp1, axis=1, out=self.array)
Пример #7
0
 def fprop(self):
     # -log(1 - pred)*(1 - target) - log(pred)*target
     tmp1 = 1 - self.pred.array
     tmp1 += self.eps
     ca.log(tmp1, tmp1)
     tmp2 = 1 - self.target.array
     ca.multiply(tmp1, tmp2, tmp1)
     ca.add(self.pred.array, self.eps, tmp2)
     ca.log(tmp2, tmp2)
     tmp2 *= self.target.array
     ca.add(tmp1, tmp2, tmp1)
     tmp1 *= -1
     ca.sum(tmp1, axis=1, out=self.array)
Пример #8
0
 def fprop(self):
     # -log(1 - pred)*(1 - target) - log(pred)*target
     tmp1 = 1 - self.pred.out
     tmp1 += self.eps
     ca.log(tmp1, tmp1)
     tmp2 = 1 - self.target.out
     ca.multiply(tmp1, tmp2, tmp1)
     ca.add(self.pred.out, self.eps, tmp2)
     ca.log(tmp2, tmp2)
     tmp2 *= self.target.out
     ca.add(tmp1, tmp2, tmp1)
     tmp1 *= -1
     ca.sum(tmp1, axis=1, keepdims=True, out=self.out)
Пример #9
0
def matrix_factorization(R, P, Q, mask, steps=200000000, alpha=0.00005, beta=0.02):
    Q = ca.transpose(Q)
    for step in xrange(steps):
 	E = ca.subtract(R, ca.multiply(ca.dot(P,Q), mask))

	rmse = ca.sqrt(ca.sum(ca.power(E,2)) / ca.sum(mask))
	rmse = np.array(rmse)[0]

 	print 'step: %i RMSE: %f' % (step, rmse)
        if rmse < 0.65:
            break
	P = ca.add(ca.multiply(P,(1-alpha*beta)),ca.multiply(ca.dot(E,ca.transpose(Q)), 2*alpha))
	Q = ca.add(ca.multiply(Q,(1-alpha*beta)),ca.multiply(ca.dot(ca.transpose(P),E),2*alpha))

    return P, Q
Пример #10
0
    def fprop(self):
        if self.phase == 'train':
            # Calculate batch mean
            tmp = ca.mean(ca.mean(self.x.out, axis=0, keepdims=True),
                          axis=(2, 3),
                          keepdims=True)
            # Center input
            ca.subtract(self.x.out, tmp, self._tmp_batch_centered)
            # Update running mean
            tmp *= 1 - self.momentum
            self.running_mean *= self.momentum
            self.running_mean += tmp
            # Calculate batch variance
            ca.power(self._tmp_batch_centered, 2, self.out)
            ca.mean(ca.mean(self.out, axis=0, keepdims=True),
                    axis=(2, 3),
                    keepdims=True,
                    out=self._tmp_batch_inv_std)
            # Calculate 1 / E([x - E(x)]^2)
            self._tmp_batch_inv_std += self.eps
            ca.sqrt(self._tmp_batch_inv_std, self._tmp_batch_inv_std)
            ca.power(self._tmp_batch_inv_std, -1, self._tmp_batch_inv_std)
            # Normalize input
            ca.multiply(self._tmp_batch_centered, self._tmp_batch_inv_std,
                        self.out)
            # Update running std
            self.running_std *= self.momentum
            ca.multiply(self._tmp_batch_inv_std, 1 - self.momentum, tmp)
            self.running_std += tmp

            if self.noise_std > 0.0:
                noise = ca.random.normal(scale=self.noise_std,
                                         size=self.out_shape)
                ca.add(self.out, noise, self.out)

        elif self.phase == 'test':
            ca.subtract(self.x.out, self.running_mean, self.out)
            self.out *= self.running_std
        else:
            raise ValueError('Invalid phase: %s' % self.phase)
        if self.affine:
            self.out *= self.gamma.array
            self.out += self.beta.array
Пример #11
0
    def fprop(self):
        if self.phase == 'train':
            # Calculate batch mean
            tmp = ca.mean(ca.mean(self.x.out, axis=0, keepdims=True),
                          axis=(2, 3), keepdims=True)
            # Center input
            ca.subtract(self.x.out, tmp, self._tmp_batch_centered)
            # Update running mean
            tmp *= 1 - self.momentum
            self.running_mean *= self.momentum
            self.running_mean += tmp
            # Calculate batch variance
            ca.power(self._tmp_batch_centered, 2, self.out)
            ca.mean(ca.mean(self.out, axis=0, keepdims=True), axis=(2, 3),
                    keepdims=True, out=self._tmp_batch_inv_std)
            # Calculate 1 / E([x - E(x)]^2)
            self._tmp_batch_inv_std += self.eps
            ca.sqrt(self._tmp_batch_inv_std, self._tmp_batch_inv_std)
            ca.power(self._tmp_batch_inv_std, -1, self._tmp_batch_inv_std)
            # Normalize input
            ca.multiply(self._tmp_batch_centered, self._tmp_batch_inv_std,
                        self.out)
            # Update running std
            self.running_std *= self.momentum
            ca.multiply(self._tmp_batch_inv_std, 1-self.momentum, tmp)
            self.running_std += tmp

            if self.noise_std > 0.0:
                noise = ca.random.normal(scale=self.noise_std,
                                         size=self.out_shape)
                ca.add(self.out, noise, self.out)

        elif self.phase == 'test':
            ca.subtract(self.x.out, self.running_mean, self.out)
            self.out *= self.running_std
        else:
            raise ValueError('Invalid phase: %s' % self.phase)
        if self.affine:
            self.out *= self.gamma.array
            self.out += self.beta.array
Пример #12
0
def matrix_factorization(R,
                         P,
                         Q,
                         mask,
                         steps=200000000,
                         alpha=0.00005,
                         beta=0.02):
    Q = ca.transpose(Q)
    for step in xrange(steps):
        E = ca.subtract(R, ca.multiply(ca.dot(P, Q), mask))

        rmse = ca.sqrt(ca.sum(ca.power(E, 2)) / ca.sum(mask))
        rmse = np.array(rmse)[0]

        print 'step: %i RMSE: %f' % (step, rmse)
        if rmse < 0.65:
            break
        P = ca.add(ca.multiply(P, (1 - alpha * beta)),
                   ca.multiply(ca.dot(E, ca.transpose(Q)), 2 * alpha))
        Q = ca.add(ca.multiply(Q, (1 - alpha * beta)),
                   ca.multiply(ca.dot(ca.transpose(P), E), 2 * alpha))

    return P, Q
Пример #13
0
 def fprop(self):
     ca.add(self.lhs.out, self.rhs.out, out=self.out)
Пример #14
0
 def fprop(self):
     ca.add(self.lhs.array, self.rhs.array, out=self.array)
Пример #15
0
 def fprop(self):
     ca.add(self.lhs.array, self.rhs.array, out=self.array)
Пример #16
0
 def fprop(self):
     self.tmp.fill(0.0)
     feats = ca.reshape(self.feats.out, self.feats.out.shape + (1, 1))
     ca.add(feats, self.tmp, out=self.tmp)
     ca.extra.concatenate(self.imgs.out, self.tmp, axis=1, out=self.out)
Пример #17
0
 def fprop(self):
     ca.add(self.lhs.out, self.rhs.out, out=self.out)