def fprop(self, x, h):
        self._tmp_x = x
        self._tmp_h_tm1 = h

        x_stack = ca.dot(self.w_x.array.T, x.T)
        h_stack = ca.dot(self.w_h.array.T, h.T)

        n = self.n_hidden
        x_r = x_stack[:n, :]
        x_u = x_stack[n : n * 2, :]
        x_c = x_stack[n * 2 : n * 3, :]
        h_r = h_stack[:n, :]
        h_u = h_stack[n : n * 2, :]
        h_c = h_stack[n * 2 : n * 3, :]

        r = self.act_r.fprop(x_r + h_r + self.b_r.array)
        u = self.act_u.fprop(x_u + h_u + self.b_u.array)
        c = self.act_c.fprop(x_c + r * h_c + self.b_c.array)

        u = ca.ascontiguousarray(ca.transpose(u))
        c = ca.ascontiguousarray(ca.transpose(c))

        h_tp1 = 1 - u
        h_tp1 *= h
        h_tp1 += u * c

        self._tmp_r = r
        self._tmp_u = u
        self._tmp_c = c
        self._tmp_h_c = h_c
        return {"y": h_tp1, "h": h_tp1}
    def fprop(self, x, h):
        self._tmp_x = x
        self._tmp_h_tm1 = h

        x_stack = ca.dot(self.w_x.array.T, x.T)
        h_stack = ca.dot(self.w_h.array.T, h.T)

        n = self.n_hidden
        x_r = x_stack[:n, :]
        x_u = x_stack[n:n*2, :]
        x_c = x_stack[n*2:n*3, :]
        h_r = h_stack[:n, :]
        h_u = h_stack[n:n*2, :]
        h_c = h_stack[n*2:n*3, :]

        r = self.act_r.fprop(x_r + h_r + self.b_r.array)
        u = self.act_u.fprop(x_u + h_u + self.b_u.array)
        c = self.act_c.fprop(x_c + r*h_c + self.b_c.array)

        u = ca.ascontiguousarray(ca.transpose(u))
        c = ca.ascontiguousarray(ca.transpose(c))

        h_tp1 = 1-u
        h_tp1 *= h
        h_tp1 += u*c
        
        self._tmp_r = r
        self._tmp_u = u
        self._tmp_c = c
        self._tmp_h_c = h_c
        return {'y': h_tp1, 'h': h_tp1}
Example #3
0
 def encode_bprop(self, y_grad):
     y_grad = self.activation.bprop(y_grad)
     # Because the weight gradient has already been updated by
     # decode_bprop() we must add the contribution.
     w_grad = self.weights.grad_array
     w_grad += ca.dot(self._tmp_x.T, y_grad)
     ca.sum(y_grad, axis=0, out=self.bias.grad_array)
     return ca.dot(y_grad, self.weights.array.T)
Example #4
0
 def encode_bprop(self, y_grad):
     y_grad = self.activation.bprop(y_grad)
     # Because the weight gradient has already been updated by
     # decode_bprop() we must add the contribution.
     w_grad = self.weights.grad_array
     w_grad += ca.dot(self._tmp_x.T, y_grad)
     ca.sum(y_grad, axis=0, out=self.bias.grad_array)
     return ca.dot(y_grad, self.weights.array.T)
Example #5
0
 def encode_bprop(self, y_grad):
     y_grad = self.activation.bprop(y_grad)
     # Because W's gradient has already been updated by decode_bprop() at
     # this point, we should add its contribution from the encode step.
     W_grad = self.W.grad_array
     W_grad += ca.dot(self._tmp_last_x.T, y_grad)
     ca.sum(y_grad, axis=0, out=self.b.grad_array)
     return ca.dot(y_grad, self.W.array.T)
    def fprop(self, x, h):
        self._tmp_x = x
        self._tmp_h_tm1 = h

        h = ca.dot(x, self.w_xh.array) + ca.dot(h, self.w_hh.array) + self.b_h.array
        h = self.activation.fprop(h)
        y = ca.dot(h, self.w_hy.array) + self.b_y.array

        self._tmp_h = h
        self._tmp_y = y
        return {"y": y, "h": h}
    def fprop(self, x, h):
        self._tmp_x = x
        self._tmp_h_tm1 = h

        h = (ca.dot(x, self.w_xh.array) + ca.dot(h, self.w_hh.array) +
             self.b_h.array)
        h = self.activation.fprop(h)
        y = ca.dot(h, self.w_hy.array) + self.b_y.array

        self._tmp_h = h
        self._tmp_y = y
        return {'y': y, 'h': h}
Example #8
0
def matrix_factorization(R, P, Q, mask, steps=200000000, alpha=0.00005, beta=0.02):
    Q = ca.transpose(Q)
    for step in xrange(steps):
 	E = ca.subtract(R, ca.multiply(ca.dot(P,Q), mask))

	rmse = ca.sqrt(ca.sum(ca.power(E,2)) / ca.sum(mask))
	rmse = np.array(rmse)[0]

 	print 'step: %i RMSE: %f' % (step, rmse)
        if rmse < 0.65:
            break
	P = ca.add(ca.multiply(P,(1-alpha*beta)),ca.multiply(ca.dot(E,ca.transpose(Q)), 2*alpha))
	Q = ca.add(ca.multiply(Q,(1-alpha*beta)),ca.multiply(ca.dot(ca.transpose(P),E),2*alpha))

    return P, Q
    def bprop(self, y_grad, h_grad):
        ca.dot(self._tmp_h.T, y_grad, out=self.w_hy.grad_array)
        ca.sum(y_grad, axis=0, keepdims=True, out=self.b_y.grad_array)
        h_grad = h_grad + ca.dot(y_grad, self.w_hy.array.T)

        h_grad = self.activation.bprop(h_grad)
        ca.sum(h_grad, axis=0, keepdims=True, out=self.b_h.grad_array)
        ca.dot(self._tmp_h_tm1.T, h_grad, out=self.w_hh.grad_array)
        ca.dot(self._tmp_x.T, h_grad, out=self.w_xh.grad_array)

        x_grad = ca.dot(h_grad, self.w_xh.array.T)
        h_grad = ca.dot(h_grad, self.w_hh.array.T)

        return {"x_grad": x_grad, "h_grad": h_grad}
    def bprop(self, y_grad, h_grad):
        ca.dot(self._tmp_h.T, y_grad, out=self.w_hy.grad_array)
        ca.sum(y_grad, axis=0, keepdims=True, out=self.b_y.grad_array)
        h_grad = h_grad + ca.dot(y_grad, self.w_hy.array.T)

        h_grad = self.activation.bprop(h_grad)
        ca.sum(h_grad, axis=0, keepdims=True, out=self.b_h.grad_array)
        ca.dot(self._tmp_h_tm1.T, h_grad, out=self.w_hh.grad_array)
        ca.dot(self._tmp_x.T, h_grad, out=self.w_xh.grad_array)

        x_grad = ca.dot(h_grad, self.w_xh.array.T)
        h_grad = ca.dot(h_grad, self.w_hh.array.T)

        return {'x_grad': x_grad, 'h_grad': h_grad}
Example #11
0
 def setup(self):
     try:
         # XXX: don't be lazy
         self.out_shape = ca.dot(self.lhs.out, self.rhs.out).shape
     except ValueError:
         raise ValueError('Shape mismatch: %s and %s for %s. LHS: %s RHS: '
                          '%s.' % (self.lhs.out.shape, self.rhs.out.shape,
                                   self, self.lhs, self.rhs))
     self.out = ca.empty(self.out_shape)
     self.out_grad = ca.empty(self.out_shape)
Example #12
0
 def setup(self):
     try:
         # XXX: don't be lazy
         self.shape = ca.dot(self.lhs.array, self.rhs.array).shape
     except ValueError:
         raise ValueError('Shape mismatch: %s and %s for %s. LHS: %s RHS: '
                          '%s.' % (self.lhs.shape, self.rhs.shape,
                                   self, self.lhs, self.rhs))
     self.array = ca.zeros(self.shape)
     self.grad_array = ca.zeros(self.shape)
Example #13
0
def getSimpleDot(test,train,gpuFlag=False,normalize=True):
    if normalize:
        test = util.normalize(test,gpuFlag=gpuFlag);
        train = util.normalize(train,gpuFlag=gpuFlag);

    if gpuFlag:
        distances=ca.dot(test,ca.transpose(train));
        distances=np.array(distances);
    else:
        distances=np.dot(test,train.T);
    
    return distances
    def bprop(self, y_grad, h_grad):
        n = self.n_hidden
        h_grad = h_grad + y_grad

        c_grad = h_grad * self._tmp_u
        u_grad = h_grad * (self._tmp_c - self._tmp_h_tm1)
        h_grad *= 1 - self._tmp_u

        c_grad = ca.ascontiguousarray(ca.transpose(c_grad))
        u_grad = ca.ascontiguousarray(ca.transpose(u_grad))

        c_grad = self.act_c.bprop(c_grad)
        ca.sum(c_grad, axis=1, keepdims=True, out=self.b_c.grad_array)

        u_grad = self.act_u.bprop(u_grad)
        ca.sum(u_grad, axis=1, keepdims=True, out=self.b_u.grad_array)

        r_grad = c_grad * self._tmp_h_c
        r_grad = self.act_r.bprop(r_grad)
        ca.sum(r_grad, axis=1, keepdims=True, out=self.b_r.grad_array)

        stack_grad = ca.empty((self.n_hidden * 3, y_grad.shape[0]))
        stack_grad[:n, :] = r_grad
        stack_grad[n : n * 2, :] = u_grad
        stack_grad[n * 2 : n * 3, :] = c_grad

        ca.dot(self._tmp_x.T, stack_grad.T, out=self.w_x.grad_array)
        x_grad = ca.dot(stack_grad.T, self.w_x.array.T)

        stack_grad[n * 2 : n * 3, :] *= self._tmp_r
        ca.dot(self._tmp_h_tm1.T, stack_grad.T, out=self.w_h.grad_array)
        h_grad += ca.dot(stack_grad.T, self.w_h.array.T)

        ca.clip(h_grad, -self.clip, self.clip, out=h_grad)
        return {"x_grad": x_grad, "h_grad": h_grad}
    def bprop(self, y_grad, h_grad):
        n = self.n_hidden
        h_grad = h_grad + y_grad

        c_grad = h_grad * self._tmp_u
        u_grad = h_grad * (self._tmp_c - self._tmp_h_tm1)
        h_grad *= (1 - self._tmp_u)

        c_grad = ca.ascontiguousarray(ca.transpose(c_grad))
        u_grad = ca.ascontiguousarray(ca.transpose(u_grad))

        c_grad = self.act_c.bprop(c_grad)
        ca.sum(c_grad, axis=1, keepdims=True, out=self.b_c.grad_array)

        u_grad = self.act_u.bprop(u_grad)
        ca.sum(u_grad, axis=1, keepdims=True, out=self.b_u.grad_array)

        r_grad = c_grad * self._tmp_h_c
        r_grad = self.act_r.bprop(r_grad)
        ca.sum(r_grad, axis=1, keepdims=True, out=self.b_r.grad_array)

        stack_grad = ca.empty((self.n_hidden*3, y_grad.shape[0]))
        stack_grad[:n, :] = r_grad
        stack_grad[n:n*2, :] = u_grad
        stack_grad[n*2:n*3, :] = c_grad

        ca.dot(self._tmp_x.T, stack_grad.T, out=self.w_x.grad_array)
        x_grad = ca.dot(stack_grad.T, self.w_x.array.T)

        stack_grad[n*2:n*3, :] *= self._tmp_r
        ca.dot(self._tmp_h_tm1.T, stack_grad.T, out=self.w_h.grad_array)
        h_grad += ca.dot(stack_grad.T, self.w_h.array.T)

        ca.clip(h_grad, -self.clip, self.clip, out=h_grad)
        return {'x_grad': x_grad, 'h_grad': h_grad}
Example #16
0
def matrix_factorization(R,
                         P,
                         Q,
                         mask,
                         steps=200000000,
                         alpha=0.00005,
                         beta=0.02):
    Q = ca.transpose(Q)
    for step in xrange(steps):
        E = ca.subtract(R, ca.multiply(ca.dot(P, Q), mask))

        rmse = ca.sqrt(ca.sum(ca.power(E, 2)) / ca.sum(mask))
        rmse = np.array(rmse)[0]

        print 'step: %i RMSE: %f' % (step, rmse)
        if rmse < 0.65:
            break
        P = ca.add(ca.multiply(P, (1 - alpha * beta)),
                   ca.multiply(ca.dot(E, ca.transpose(Q)), 2 * alpha))
        Q = ca.add(ca.multiply(Q, (1 - alpha * beta)),
                   ca.multiply(ca.dot(ca.transpose(P), E), 2 * alpha))

    return P, Q
 def fit(self, x):
     x = self._flat(x)
     self._mean = np.mean(x, axis=0, dtype=np.float64).astype(x.dtype)
     self._std = np.std(x, axis=0, dtype=np.float64).astype(x.dtype)
     x = self._normalize(x)
     try:
         # Perform dot product on GPU
         import cudarray as ca
         x_ca = ca.array(x)
         cov = np.array(ca.dot(x_ca.T, x_ca)).astype(np.float_)
     except:
         cov = np.dot(x.T, x)
     cov = cov / x.shape[0] + self.bias * np.identity(x.shape[1])
     s, v = np.linalg.eigh(cov)
     s = np.diag(1.0 / np.sqrt(s))
     self.whitener = np.dot(np.dot(v, s), v.T)
     return self
Example #18
0
 def fit(self, x):
     x = self._flat(x)
     self._mean = np.mean(x, axis=0, dtype=np.float64).astype(x.dtype)
     self._std = np.std(x, axis=0, dtype=np.float64).astype(x.dtype)
     x = self._normalize(x)
     try:
         # Perform dot product on GPU
         import cudarray as ca
         x_ca = ca.array(x)
         cov = np.array(ca.dot(x_ca.T, x_ca)).astype(np.float_)
     except:
         cov = np.dot(x.T, x)
     cov = cov / x.shape[0] + self.bias * np.identity(x.shape[1])
     s, v = np.linalg.eigh(cov)
     s = np.diag(1.0 / np.sqrt(s + 0.0001))
     self.whitener = np.dot(np.dot(v, s), v.T)
     return self
    def _update(self):
        # Forward propagation
        next_x = self.x.array
        x_feats = [None]*len(self.layers)
        x_grams = [None]*len(self.layers)
        for l, layer in enumerate(self.layers):
            next_x = layer.fprop(next_x)
            if self.subject_weights[l] > 0:
                x_feats[l] = next_x
            if self.style_weights[l] > 0:
                x_feats[l] = next_x
                x_grams[l] = gram_matrix(next_x)

        # Backward propagation
        grad = ca.zeros_like(next_x)
        loss = ca.zeros(1)
        for l, layer in reversed(list(enumerate(self.layers))):
            if self.subject_weights[l] > 0:
                diff = x_feats[l] - self.subject_feats[l]
                norm = ca.sum(ca.fabs(diff)) + 1e-8
                weight = float(self.subject_weights[l]) / norm
                grad += diff * weight
                loss += 0.5*weight*ca.sum(diff**2)
            if self.style_weights[l] > 0:
                diff = x_grams[l] - self.style_grams[l]
                n_channels = diff.shape[0]
                x_feat = ca.reshape(x_feats[l], (n_channels, -1))
                style_grad = ca.reshape(ca.dot(diff, x_feat), x_feats[l].shape)
                norm = ca.sum(ca.fabs(style_grad))
                weight = float(self.style_weights[l]) / norm
                style_grad *= weight
                grad += style_grad
                loss += 0.25*weight*ca.sum(diff**2)
            grad = layer.bprop(grad)

        if self.tv_weight > 0:
            x = ca.reshape(self.x.array, (3, 1) + grad.shape[2:])
            tv = self.tv_conv.fprop(x, self.tv_kernel)
            tv *= self.tv_weight
            grad -= ca.reshape(tv, grad.shape)

        ca.copyto(self.x.grad_array, grad)
        return loss
Example #20
0
    def _update(self):
        # Forward propagation
        next_x = self.x.array
        x_feats = [None] * len(self.layers)
        for l, layer in enumerate(self.layers):
            next_x = layer.fprop(next_x)
            if self.subject_weights[l] > 0 or self.style_weights[l] > 0:
                x_feats[l] = next_x

        # Backward propagation
        grad = ca.zeros_like(next_x)
        loss = ca.zeros(1)
        for l, layer in reversed(list(enumerate(self.layers))):
            if self.subject_weights[l] > 0:
                diff = x_feats[l] - self.subject_feats[l]
                norm = ca.sum(ca.fabs(diff)) + 1e-8
                weight = float(self.subject_weights[l]) / norm
                grad += diff * weight
                loss += 0.5 * weight * ca.sum(diff**2)
            if self.style_weights[l] > 0:
                diff = gram_matrix(x_feats[l]) - self.style_grams[l]
                n_channels = diff.shape[0]
                x_feat = ca.reshape(x_feats[l], (n_channels, -1))
                style_grad = ca.reshape(ca.dot(diff, x_feat), x_feats[l].shape)
                norm = ca.sum(ca.fabs(style_grad))
                weight = float(self.style_weights[l]) / norm
                style_grad *= weight
                grad += style_grad
                loss += 0.25 * weight * ca.sum(diff**2)
            grad = layer.bprop(grad)

        if self.tv_weight > 0:
            x = ca.reshape(self.x.array, (3, 1) + grad.shape[2:])
            tv = self.tv_conv.fprop(x, self.tv_kernel)
            tv *= self.tv_weight
            grad -= ca.reshape(tv, grad.shape)

        ca.copyto(self.x.grad_array, grad)
        return loss
Example #21
0
 def bprop(self):
     ca.dot(self.x.array.T, self.grad_array, self.weights.grad_array)
     ca.dot(self.grad_array, self.weights.array.T, self.x.grad_array)
Example #22
0
def gram_matrix(img_bc01):
    n_channels = img_bc01.shape[1]
    feats = ca.reshape(img_bc01, (n_channels, -1))
    gram = ca.dot(feats, feats.T)
    return gram
Example #23
0
 def bprop(self, y_grad):
     ca.dot(self._last_x.T, y_grad, out=self.W.grad_array)
     ca.sum(y_grad, axis=0, out=self.b.grad_array)
     return ca.dot(y_grad, self.W.array.T)
Example #24
0
 def bprop(self, y_grad):
     ca.dot(self._tmp_x.T, y_grad, out=self.weights.grad_array)
     ca.sum(y_grad, axis=0, out=self.bias.grad_array)
     if self.bprop_to_x:
         return ca.dot(y_grad, self.weights.array.T)
Example #25
0
 def decode(self, y_prime):
     self._tmp_last_y_prime = y_prime
     x_prime = ca.dot(y_prime, self.W.array.T) + self.b_prime.array
     return self.activation_decode.fprop(x_prime, '')
Example #26
0
 def decode_bprop(self, x_prime_grad):
     x_prime_grad = self.activation_decode.bprop(x_prime_grad)
     ca.dot(x_prime_grad.T, self._tmp_last_y_prime, out=self.W.grad_array)
     ca.sum(x_prime_grad, axis=0, out=self.b_prime.grad_array)
     return ca.dot(x_prime_grad, self.W.array)
Example #27
0
 def encode(self, x):
     self._tmp_x = x
     y = ca.dot(x, self.weights.array) + self.bias.array
     return self.activation.fprop(y)
Example #28
0
 def bprop(self, y_grad):
     ca.dot(self._tmp_x.T, y_grad, out=self.weights.grad_array)
     ca.sum(y_grad, axis=0, out=self.bias.grad_array)
     if self.bprop_to_x:
         return ca.dot(y_grad, self.weights.array.T)
Example #29
0
 def fprop(self, x, phase):
     self._tmp_last_x = x
     return ca.dot(x, self.W.array) + self.b.array
Example #30
0
 def bprop(self):
     if self.lhs_bprop:
         ca.dot(self.out_grad, self.rhs.out.T, out=self.lhs.out_grad)
     if self.rhs_bprop:
         ca.dot(self.lhs.out.T, self.out_grad, out=self.rhs.out_grad)
Example #31
0
def test_dot():
    a = np.random.normal(size=(5, 5))
    b = np.random.normal(size=(5, 5))
    c_np = np.dot(a, b)

    a = ca.array(a)
    b = ca.array(b)

    c_ca = ca.dot(a, b)
    print(np.allclose(c_np, np.array(c_ca)))

    c_ca = ca.zeros_like(a)
    ca.dot(a, b, c_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5))
    b_np = np.random.normal(size=(5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np)
    c_ca = ca.dot(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np.T, b_np)
    c_ca = ca.dot(a_ca.T, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(3, 4))
    b_np = np.random.normal(size=(5, 4))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np.T)
    c_ca = ca.dot(a_ca, b_ca.T)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4, 3))
    b_np = np.random.normal(size=(4, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np.T, b_np)
    c_ca = ca.dot(a_ca.T, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4, 3))
    b_np = np.random.normal(size=(5, 4))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np.T, b_np.T)
    c_ca = ca.dot(a_ca.T, b_ca.T)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4))
    b_np = np.random.normal(size=(4, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np)
    c_ca = ca.dot(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4, 5))
    b_np = np.random.normal(size=(5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np)
    c_ca = ca.dot(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4))
    b_np = np.random.normal(size=(5, 4))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np.T)
    c_ca = ca.dot(a_ca, b_ca.T)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5, 4))
    b_np = np.random.normal(size=(5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np.T, b_np)
    c_ca = ca.dot(a_ca.T, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))
Example #32
0
    Y = np.loadtxt('test.csv',
                   delimiter=',',
                   usecols=(0, 1),
                   skiprows=1,
                   dtype=int)
    shape = tuple(X.max(axis=0)[:2] + 1)
    R = sparse.coo_matrix((X[:, 2], (X[:, 0], X[:, 1])),
                          shape=shape,
                          dtype=X.dtype)
    R = R.todense()
    R = np.array(R)
    mask = np.divide(np.array(R, dtype=int), np.array(R, dtype=int))
    d_R = ca.array(R)
    d_M = ca.array(mask)
    N = len(R)
    M = len(R[0])
    K = 23

    P = np.random.rand(N, K)
    Q = np.random.rand(M, K)

    d_P = ca.array(P)
    d_Q = ca.array(Q)

    d_nP, d_nQ = matrix_factorization(d_R, d_P, d_Q, d_M)

    d_nR = ca.dot(d_nP, d_nQ)
    nR = np.around(np.array(d_nR))
    Z = nR[Y[:, 0], Y[:, 1]]
    np.savetxt('sgd_test.csv', Z, delimiter='\n')
Example #33
0
 def fprop(self):
     ca.dot(self.x.array, self.weights.array, self.array)
Example #34
0
 def fprop(self):
     ca.dot(self.lhs.array, self.rhs.array, out=self.array)
Example #35
0
 def decode(self, y):
     self._tmp_y = y
     x = ca.dot(y, self.weights.array.T) + self.bias_prime.array
     return self.activation_decode.fprop(x)
Example #36
0
 def fprop(self, x):
     self._tmp_x = x
     return ca.dot(x, self.weights.array) + self.bias.array
Example #37
0
 def decode_bprop(self, x_grad):
     x_grad = self.activation_decode.bprop(x_grad)
     ca.dot(x_grad.T, self._tmp_y, out=self.weights.grad_array)
     ca.sum(x_grad, axis=0, out=self.bias_prime.grad_array)
     return ca.dot(x_grad, self.weights.array)
Example #38
0
 def encode(self, x):
     self._tmp_last_x = x
     y = ca.dot(x, self.W.array) + self.b.array
     return self.activation.fprop(y, '')
Example #39
0
 def fprop(self):
     ca.dot(self.lhs.out, self.rhs.out, out=self.out)
Example #40
0
 def decode(self, y):
     self._tmp_last_y = y
     x = ca.dot(y, self.W.array.T) + self.b_prime.array
     return self.activation_decode.fprop(x, '')
Example #41
0
 def encode(self, x):
     self._tmp_x = x
     y = ca.dot(x, self.weights.array) + self.bias.array
     return self.activation.fprop(y)
Example #42
0
 def fprop(self, x, phase):
     self._last_x = x
     return ca.dot(x, self.W.array) + self.b.array
Example #43
0
def test_dot():
    a = np.random.normal(size=(5, 5))
    b = np.random.normal(size=(5, 5))
    c_np = np.dot(a, b)

    a = ca.array(a)
    b = ca.array(b)

    c_ca = ca.dot(a, b)
    print(np.allclose(c_np, np.array(c_ca)))

    c_ca = ca.zeros_like(a)
    ca.dot(a, b, c_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5))
    b_np = np.random.normal(size=(5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np)
    c_ca = ca.dot(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5, 5))
    b_np = np.random.normal(size=(5, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np.T, b_np)
    c_ca = ca.dot(a_ca.T, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(3, 4))
    b_np = np.random.normal(size=(5, 4))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np.T)
    c_ca = ca.dot(a_ca, b_ca.T)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4, 3))
    b_np = np.random.normal(size=(4, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np.T, b_np)
    c_ca = ca.dot(a_ca.T, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4, 3))
    b_np = np.random.normal(size=(5, 4))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np.T, b_np.T)
    c_ca = ca.dot(a_ca.T, b_ca.T)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4))
    b_np = np.random.normal(size=(4, 5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np)
    c_ca = ca.dot(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4, 5))
    b_np = np.random.normal(size=(5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np)
    c_ca = ca.dot(a_ca, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(4))
    b_np = np.random.normal(size=(5, 4))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np, b_np.T)
    c_ca = ca.dot(a_ca, b_ca.T)
    print(np.allclose(c_np, np.array(c_ca)))

    a_np = np.random.normal(size=(5, 4))
    b_np = np.random.normal(size=(5))
    a_ca = ca.array(a_np)
    b_ca = ca.array(b_np)
    c_np = np.dot(a_np.T, b_np)
    c_ca = ca.dot(a_ca.T, b_ca)
    print(np.allclose(c_np, np.array(c_ca)))
Example #44
0
 def fprop(self, x):
     self._tmp_x = x
     return ca.dot(x, self.weights.array) + self.bias.array
Example #45
0
 def bprop(self):
     ca.dot(self.x.out.T, self.out_grad, out=self.weights.grad_array)
     ca.dot(self.out_grad, self.weights.array.T, out=self.x.out_grad)
     if self.bias is not None:
         ca.sum(self.out_grad, axis=0, out=self.bias.grad_array)
Example #46
0
 def decode(self, y):
     self._tmp_y = y
     x = ca.dot(y, self.weights.array.T) + self.bias_prime.array
     return self.activation_decode.fprop(x)
def gram_matrix(img_bc01):
    n_channels = img_bc01.shape[1]
    feats = ca.reshape(img_bc01, (n_channels, -1))
    gram = ca.dot(feats, feats.T)
    return gram
Example #48
0
 def fprop(self):
     ca.dot(self.x.out, self.weights.array, out=self.out)
     if self.bias is not None:
         self.out += self.bias.array
Example #49
0
 def decode_bprop(self, x_grad):
     x_grad = self.activation_decode.bprop(x_grad)
     ca.dot(x_grad.T, self._tmp_y, out=self.weights.grad_array)
     ca.sum(x_grad, axis=0, out=self.bias_prime.grad_array)
     return ca.dot(x_grad, self.weights.array)
Example #50
0
 def bprop(self):
     ca.dot(self.x.out.T, self.out_grad, out=self.weights.grad_array)
     ca.dot(self.out_grad, self.weights.array.T, out=self.x.out_grad)
     if self.bias is not None:
         ca.sum(self.out_grad, axis=0, out=self.bias.grad_array)
Example #51
0
import numpy as np
import cudarray as ca
from time import time

n, p = int(2e3), int(40e3)
A = np.random.randn(n, p)
B = np.random.randn(p, n)
t0 = time()
np.dot(A,B)
t1 = time()
print("Numpy %f" % (t1-t0))

A_ca = ca.random.normal(size=(n, p))
B_ca = ca.random.normal(size=(p, n))
t0 = time()
ca.dot(A_ca, B_ca)
t1 = time()
print("CUDArray%f" % (t1-t0))
Example #52
0
 def bprop(self, y_grad, to_x=True):
     ca.dot(self._tmp_last_x.T, y_grad, out=self.W.grad_array)
     ca.sum(y_grad, axis=0, out=self.b.grad_array)
     if to_x:
         return ca.dot(y_grad, self.W.array.T)
Example #53
0
 def bprop(self):
     if self.lhs.bpropable:
         ca.dot(self.grad_array, self.rhs.array.T, out=self.lhs.grad_array)
     if self.rhs.bpropable:
         ca.dot(self.lhs.array.T, self.grad_array, out=self.rhs.grad_array)