Example #1
0
File: base.py Project: 0rchard/CUV
    def updateLayer(self, layernum, sample = True):
        L = self.layers[layernum]
        if layernum == 0:
            self.downPass(layernum+1, sample = sample)
        if layernum == len(self.layers)-1:
            self.upPass(layernum-1, sample)
        if layernum<len(self.layers)-1 and layernum>0:
            hi = self.layers[layernum+1]
            lo = self.layers[layernum-1]
            wlo = self.weights[layernum-1]
            whi = self.weights[layernum]

            cp.prod(L.act, whi.mat, hi.act, 'n', 'n')
            cp.matrix_plus_col(L.act, whi.bias_lo)

            tmp = L.act.copy()
            cp.prod(L.act, wlo.mat, lo.act, 't', 'n')
            cp.matrix_plus_col(L.act, wlo.bias_hi)

            # add parts from above/below
            cp.apply_binary_functor(L.act, tmp, cp.binary_functor.AXPBY, 0.5, 0.5)
            tmp.dealloc()

            L.nonlinearity()
            if sample:
                L.sample()
Example #2
0
    def updateLayer(self, layernum, sample=True):
        L = self.layers[layernum]
        if layernum == 0:
            self.downPass(layernum + 1, sample=sample)
        if layernum == len(self.layers) - 1:
            self.upPass(layernum - 1, sample)
        if layernum < len(self.layers) - 1 and layernum > 0:
            hi = self.layers[layernum + 1]
            lo = self.layers[layernum - 1]
            wlo = self.weights[layernum - 1]
            whi = self.weights[layernum]

            cp.prod(L.act, whi.mat, hi.act, 'n', 'n')
            cp.matrix_plus_col(L.act, whi.bias_lo)

            tmp = L.act.copy()
            cp.prod(L.act, wlo.mat, lo.act, 't', 'n')
            cp.matrix_plus_col(L.act, wlo.bias_hi)

            # add parts from above/below
            cp.apply_binary_functor(L.act, tmp, cp.binary_functor.AXPBY, 0.5,
                                    0.5)
            tmp.dealloc()

            L.nonlinearity()
            if sample:
                L.sample()
Example #3
0
    def forward(self, input, weight, bias, linear=False):

        result = cp.dev_tensor_float_cm([weight.shape[1], input.shape[1]])
        cp.fill(result, 0)
        cp.prod(result, weight, input, "t", "n")
        cp.matrix_plus_col(result, bias)
        if not linear: cp.apply_scalar_functor(result, cp.scalar_functor.SIGM)

        return result
Example #4
0
File: mlp.py Project: 0rchard/CUV
  def forward(self, input, weight, bias,linear=False):

    result = cp.dev_tensor_float_cm([weight.shape[1], input.shape[1]])
    cp.fill(result,0)
    cp.prod(result, weight, input, "t", "n")
    cp.matrix_plus_col(result, bias)
    if not linear: cp.apply_scalar_functor(result, cp.scalar_functor.SIGM)

    return result
Example #5
0
 def get_distance_matrix(self, test):
     t   = cp.dev_tensor_float_cm(test)
     assert t.shape[1] == self.data.shape[1]
     tsq = cp.dev_tensor_float(t.shape[0])
     cp.reduce_to_col(tsq,t,cp.reduce_functor.ADD_SQUARED)
     p   = cp.dev_tensor_float_cm([self.data.shape[0], t.shape[0]])
     cp.prod(p, self.data, t, 'n','t',-2, 0)
     cp.matrix_plus_col(p,self.dsq)
     cp.matrix_plus_row(p,tsq)
     return p
Example #6
0
File: knn.py Project: kgl-prml/CUV
 def get_distance_matrix(self, test):
     t = cp.dev_tensor_float_cm(test)
     assert t.shape[1] == self.data.shape[1]
     tsq = cp.dev_tensor_float(t.shape[0])
     cp.reduce_to_col(tsq, t, cp.reduce_functor.ADD_SQUARED)
     p = cp.dev_tensor_float_cm([self.data.shape[0], t.shape[0]])
     cp.prod(p, self.data, t, "n", "t", -2, 0)
     cp.matrix_plus_col(p, self.dsq)
     cp.matrix_plus_row(p, tsq)
     return p
Example #7
0
class weight_layer:
    """ weight layer of the MLP represented by a matrix."""

    def __init__(self, source_layer, target_layer):
        """Constructor

        @param source_layer pointer to the previous neuron layer.
        @param target_layer pointer to the next neuron layer.

        """
        self.source=source_layer
        self.target=target_layer
        dim1 = self.target.activations.h
        dim2 = self.source.activations.h
        self.weight = cp.get_filled_matrix(dim1, dim2, 0.0)
        cp.fill_rnd_uniform(self.weight)
        cp.apply_scalar_functor(self.weight, cp.scalar_functor.SUBTRACT, 0.5)
        cp.apply_scalar_functor(self.weight, cp.scalar_functor.DIV, 10)
        self.bias = cp.get_filled_matrix(dim1, 1, 0)

    def forward(self):
        """Forward pass, calculates the activations of next neuron layer."""
				cp.prod(self.target.activations, self.weight,
				self.source.activations)
        cp.matrix_plus_col(self.target.activations, self.bias.vec)
        self.target.nonlinearity(self.target.activations)
Example #8
0
    def p_k(self,beta,tmp,tmp2,collect):
        cp.prod(tmp,self.v,self.baserate_bias,'t','n')
        cp.apply_scalar_functor(tmp,cp.scalar_functor.MULT,(1-beta))
        collect(tmp)
        cp.prod(tmp2,self.w,self.v,'t','n')
        cp.matrix_plus_col(tmp2,self.bias_hi)

        cp.apply_scalar_functor(tmp2,cp.scalar_functor.MULT,beta)

        # RECT computes log(1+exp(x))
        cp.apply_scalar_functor(tmp2,cp.scalar_functor.RECT,1)

        cp.reduce_to_row(tmp.T,tmp2,cp.reduce_functor.ADD) # tmp.T is an evil hack. it makes tmp into row major, which doesn't change anything since it's a vector any way. But vectors are always assumed to be row major.
        collect(tmp)
        cp.prod(tmp,self.v,self.bias_lo.T,'t','n')
        cp.apply_scalar_functor(tmp,cp.scalar_functor.MULT,beta)
        collect(tmp)
Example #9
0
    def partialsumV(self, actv, acth, row):
        """
         sums out hidden variables for given v
          exp( log(exp(bh + actv*W)+1).sum(axis=0) + (v*bv).sum(axis=0) )
        """
        # acth = bv + actv*W
        cp.prod(acth, self.weight, actv, 't', 'n')
        cp.matrix_plus_col(acth, self.bh)

        # acth = log(exp(acth)+1)
        cp.apply_scalar_functor(acth, cp.scalar_functor.RECT, 1.0)

        # row = actv.sum(axis=0)
        cp.reduce_to_row(row, acth, cp.reduce_functor.ADD)

        # row += h*bh
        cp.matrix_times_col(actv, self.bv)
        cp.reduce_to_row(row, actv, cp.reduce_functor.ADD, 1.0, 1.0)

        # exp(row)
        m = row.np.astype("float64")

        return math.fsum(m.flatten()) / actv.shape[1]
Example #10
0
    def partialsumV(self, actv, acth, row):
        """
         sums out hidden variables for given v
          exp( log(exp(bh + actv*W)+1).sum(axis=0) + (v*bv).sum(axis=0) )
        """
        # acth = bv + actv*W
        cp.prod(acth, self.weight, actv, "t", "n")
        cp.matrix_plus_col(acth, self.bh)

        # acth = log(exp(acth)+1)
        cp.apply_scalar_functor(acth, cp.scalar_functor.RECT, 1.0)

        # row = actv.sum(axis=0)
        cp.reduce_to_row(row, acth, cp.reduce_functor.ADD)

        # row += h*bh
        cp.matrix_times_col(actv, self.bv)
        cp.reduce_to_row(row, actv, cp.reduce_functor.ADD, 1.0, 1.0)

        # exp(row)
        m = row.np.astype("float64")

        return math.fsum(m.flatten()) / actv.shape[1]
Example #11
0
    def partialsum(self, acth, actv, row):
        """
        sums out visible variables for given hidden variables
          exp( log(exp(bv + acth*W)+1).sum(axis=0) + (h*bh).sum(axis=0) )
        """
        # actv = bv + acth*W
        cp.prod(actv, self.weight, acth, "n", "n")
        cp.matrix_plus_col(actv, self.bv)

        # actv = log(exp(actv)+1)
        cp.apply_scalar_functor(actv, cp.scalar_functor.RECT, 1.0)

        # row = actv.sum(axis=0)
        cp.reduce_to_row(row, actv, cp.reduce_functor.ADD)

        # row += h*bh
        cp.matrix_times_col(acth, self.bh)
        cp.reduce_to_row(row, acth, cp.reduce_functor.ADD, 1.0, 1.0)
        # cp.prod(row,self.bv,actv,'t','n',1.0,1.0)

        # exp(row)
        m = row.np.astype("float64")

        return math.fsum(np.exp(m).flatten())
Example #12
0
    def partialsum(self, acth, actv, row):
        """
        sums out visible variables for given hidden variables
          exp( log(exp(bv + acth*W)+1).sum(axis=0) + (h*bh).sum(axis=0) )
        """
        # actv = bv + acth*W
        cp.prod(actv, self.weight, acth, 'n', 'n')
        cp.matrix_plus_col(actv, self.bv)

        # actv = log(exp(actv)+1)
        cp.apply_scalar_functor(actv, cp.scalar_functor.RECT, 1.0)

        # row = actv.sum(axis=0)
        cp.reduce_to_row(row, actv, cp.reduce_functor.ADD)

        # row += h*bh
        cp.matrix_times_col(acth, self.bh)
        cp.reduce_to_row(row, acth, cp.reduce_functor.ADD, 1.0, 1.0)
        #cp.prod(row,self.bv,actv,'t','n',1.0,1.0)

        # exp(row)
        m = row.np.astype("float64")

        return math.fsum(np.exp(m).flatten())
Example #13
0
    def sample_markov_chains(self,beta,step):
        cp.prod(self.h,self.w,self.v,'t','n')
        cp.matrix_plus_col(self.h,self.bias_hi)
        cp.apply_scalar_functor(self.h,cp.scalar_functor.MULT,beta)
        cp.apply_scalar_functor(self.h,cp.scalar_functor.SIGM)
        cp.rnd_binarize(self.h)
        cp.prod(self.v,self.w,self.h,'n','n')
        cp.matrix_plus_col(self.v,self.bias_lo)
        cp.apply_scalar_functor(self.v,cp.scalar_functor.MULT,beta)
        cp.apply_scalar_functor(self.baserate_bias,cp.scalar_functor.MULT,1-beta)

        cp.matrix_plus_col(self.v,self.baserate_bias)
        cp.apply_scalar_functor(self.baserate_bias,cp.scalar_functor.MULT,1.0/(1-beta))
        cp.apply_scalar_functor(self.v,cp.scalar_functor.SIGM)
        #if step % 100 == 0:
           #plt.figure(1)
           #self.v_=self.v.np
           #showthis = self.v_.copy()
           #plt.matshow(showthis[:,0].reshape((28,28)))
           #plt.draw()
           #if not os.path.exists("/tmp/%s"%os.getlogin()):
               #os.mkdir("/tmp/%s"%os.getlogin())
           #plt.savefig("/tmp/%s/chain_%05d.png"%(os.getlogin(),step))
        cp.rnd_binarize(self.v)
Example #14
0
 def normalize_zmuv(self, batch):
     """ Zero Mean, Unit Variance based on recorded statistics """
     cp.matrix_plus_col(batch, self.negative_mean)
     cp.matrix_divide_col(batch, self.std)
Example #15
0
File: base.py Project: 0rchard/CUV
 def postUpdateFromBelow(self, sample, bias):
     cp.matrix_plus_col(self.act, bias)
     self.nonlinearity()
     if sample:
         self.sample()
Example #16
0
 def forward(self):
     """Forward pass, calculates the activations of next neuron layer."""
     cp.prod(self.target.activations, self.weight,
             self.source.activations)
     cp.matrix_plus_col(self.target.activations, self.bias)
     self.target.nonlinearity(self.target.activations)
Example #17
0
 def normalize_minmax(self, batch):
     """ normalize by subtracting min and dividing by range"""
     cp.matrix_plus_col(batch, self.negative_min)
     cp.matrix_divide_col(batch, self.range)
Example #18
0
 def normalize_zmuv(self,batch):
     """ Zero Mean, Unit Variance based on recorded statistics """
     cp.matrix_plus_col(batch,self.negative_mean)
     cp.matrix_divide_col(batch,self.std)
Example #19
0
 def normalize_minmax(self,batch):
     """ normalize by subtracting min and dividing by range"""
     cp.matrix_plus_col(batch,self.negative_min)
     cp.matrix_divide_col(batch,self.range)
Example #20
0
 def postUpdateFromBelow(self, sample, bias):
     cp.matrix_plus_col(self.act, bias)
     self.nonlinearity()
     if sample:
         self.sample()