예제 #1
0
 def denominator(self, batchsize):
     acth = cp.dev_tensor_float_cm([self.weight.shape[1], batchsize])
     actv = cp.dev_tensor_float_cm([self.weight.shape[0], batchsize])
     row = cp.dev_tensor_float([batchsize])
     cp.fill(acth, 0.0)
     cp.fill(actv, 0.0)
     cp.fill(row, 0.0)
     n = acth.shape[0]
     nmax = 2**n
     if nmax % batchsize != 0:
         print "Error: 2**n=%d must be dividable by batchsize=%d!" % (
             nmax, batchsize)
         sys.exit(1)
     L = []
     widgets = [
         "Denominator: ",
         Percentage(), ' ',
         Bar(marker=RotatingMarker()), ' ',
         ETA()
     ]
     pbar = ProgressBar(widgets=widgets, maxval=nmax)
     for i in xrange(0, nmax, acth.shape[1]):
         cp.set_binary_sequence(acth, i)
         L.append(self.partialsum(acth, actv, row))
         if (i / acth.shape[1]) % 100 == 0:
             pbar.update(i)
     pbar.finish()
     for m in [actv, acth, row]:
         m.dealloc()
     return math.fsum(L)
예제 #2
0
    def numerator(self, mbp, batchsize):
        sid = 0
        actv = cp.dev_tensor_float_cm([self.weight.shape[0], batchsize])
        acth = cp.dev_tensor_float_cm([self.weight.shape[1], batchsize])
        row = cp.dev_tensor_float([batchsize])
        cp.fill(acth, 0.0)
        cp.fill(actv, 0.0)
        cp.fill(row, 0)
        print "Numerator: ",
        L = []
        try:
            while True:
                mbp.getMiniBatch(batchsize, actv, sid)
                mbp.forgetOriginalData()
                sid += 1
                L.append(self.partialsumV(actv, acth, row))
                sys.stdout.write(".")
                sys.stdout.flush()

        except minibatch_provider.MiniBatchProviderEmpty:
            print "done."
            pass
        for m in [actv, acth, row]:
            m.dealloc()
        return math.fsum(L) / (len(L))
예제 #3
0
    def numerator(self, mbp, batchsize):
        sid = 0
        actv = cp.dev_tensor_float_cm([self.weight.shape[0], batchsize])
        acth = cp.dev_tensor_float_cm([self.weight.shape[1], batchsize])
        row = cp.dev_tensor_float([batchsize])
        cp.fill(acth, 0.0)
        cp.fill(actv, 0.0)
        cp.fill(row, 0)
        print "Numerator: ",
        L = []
        try:
            while True:
                mbp.getMiniBatch(batchsize, actv, sid)
                mbp.forgetOriginalData()
                sid += 1
                L.append(self.partialsumV(actv, acth, row))
                sys.stdout.write('.')
                sys.stdout.flush()

        except minibatch_provider.MiniBatchProviderEmpty:
            print "done."
            pass
        for m in [actv, acth, row]:
            m.dealloc()
        return math.fsum(L) / (len(L))
예제 #4
0
파일: knn.py 프로젝트: stjordanis/CUV
 def get_distance_matrix(self, test):
     t   = cp.dev_tensor_float_cm(test)
     assert t.shape[1] == self.data.shape[1]
     tsq = cp.dev_tensor_float(t.shape[0])
     cp.reduce_to_col(tsq,t,cp.reduce_functor.ADD_SQUARED)
     p   = cp.dev_tensor_float_cm([self.data.shape[0], t.shape[0]])
     cp.prod(p, self.data, t, 'n','t',-2, 0)
     cp.matrix_plus_col(p,self.dsq)
     cp.matrix_plus_row(p,tsq)
     return p
예제 #5
0
파일: mlp.py 프로젝트: 0rchard/CUV
 def delta_output(self, calculated, correct):
   derivative = cp.dev_tensor_float_cm([calculated.shape[0], correct.shape[1]])
   h = cp.dev_tensor_float_cm(derivative.shape)
   cp.copy(derivative, calculated)
   cp.apply_scalar_functor(derivative, cp.scalar_functor.DSIGM)
   cp.copy(h, correct)
   cp.apply_binary_functor(h, calculated, cp.binary_functor.SUBTRACT)
   cp.apply_binary_functor(derivative, h, cp.binary_functor.MULT)
   h.dealloc()
   return derivative
예제 #6
0
def test_pairwise_euclidean_dist_cm():
    from scipy.spatial.distance import cdist
    x = np.random.uniform(0, 1, (20, 10))
    y = np.random.uniform(0, 1, (30, 10))
    x_ = cp.dev_tensor_float_cm(x.copy('F'))
    y_ = cp.dev_tensor_float_cm(y.copy('F'))
    dists = cp.dev_tensor_float_cm([x_.shape[0], y_.shape[0]])
    cp.pairwise_distance_l2(dists, x_, y_)
    numpy_dist = cdist(x, y)
    ok_(np.linalg.norm(numpy_dist - dists.np) < 1e-3)
예제 #7
0
파일: kernels.py 프로젝트: 0rchard/CUV
def test_pairwise_euclidean_dist_cm():
    from scipy.spatial.distance import cdist
    x = np.random.uniform(0,1,(20,10))
    y = np.random.uniform(0,1,(30,10))
    x_ = cp.dev_tensor_float_cm(x.copy('F'))
    y_ = cp.dev_tensor_float_cm(y.copy('F'))
    dists = cp.dev_tensor_float_cm([x_.shape[0],y_.shape[0]])
    cp.pairwise_distance_l2(dists,x_,y_)
    numpy_dist = cdist(x,y)
    ok_(np.linalg.norm(numpy_dist-dists.np)<1e-3)
예제 #8
0
파일: knn.py 프로젝트: kgl-prml/CUV
 def get_distance_matrix(self, test):
     t = cp.dev_tensor_float_cm(test)
     assert t.shape[1] == self.data.shape[1]
     tsq = cp.dev_tensor_float(t.shape[0])
     cp.reduce_to_col(tsq, t, cp.reduce_functor.ADD_SQUARED)
     p = cp.dev_tensor_float_cm([self.data.shape[0], t.shape[0]])
     cp.prod(p, self.data, t, "n", "t", -2, 0)
     cp.matrix_plus_col(p, self.dsq)
     cp.matrix_plus_row(p, tsq)
     return p
예제 #9
0
파일: mlp.py 프로젝트: stjordanis/CUV
 def delta_output(self, calculated, correct):
     derivative = cp.dev_tensor_float_cm(
         [calculated.shape[0], correct.shape[1]])
     h = cp.dev_tensor_float_cm(derivative.shape)
     cp.copy(derivative, calculated)
     cp.apply_scalar_functor(derivative, cp.scalar_functor.DSIGM)
     cp.copy(h, correct)
     cp.apply_binary_functor(h, calculated, cp.binary_functor.SUBTRACT)
     cp.apply_binary_functor(derivative, h, cp.binary_functor.MULT)
     h.dealloc()
     return derivative
예제 #10
0
    def fit(self, input_matrix, teacher_matrix, n_epochs=100, learnrate = 0.10):
        """
        Function to train the network

        @param input_matrix -- matrix consisting of input data
           to the network.
        @param teacher_matrix -- matrix consisting of labels
           of input data.
        @param n_epochs -- number of epochs the network
           is to be trained.

        """
        n_samples = input_matrix.shape[-1]
        squared_errors = cp.dev_tensor_float_cm(self.neuron_layers[-1].deltas.shape)
        for r in xrange(n_epochs):
            print "Epoch ", r + 1, "/", n_epochs
            mse = 0.0
            ce = 0.0
            for batch in xrange(n_samples / self.batch_size):
                index_begin = self.batch_size * batch
                index_end = self.batch_size + index_begin

                # Push input and teacher to GPU memory
                # .copy("F") is needed since memory is non-contiguous
                self.neuron_layers[0].activations = cp.dev_tensor_float_cm(
                    input_matrix[:, index_begin:index_end].copy('F'))
                teacher_batch_host = teacher_matrix[:, index_begin:index_end]
                teacher_batch = cp.dev_tensor_float_cm(teacher_batch_host.copy('F'))

                # Forward-Pass
                for i in xrange(self.n_layers):
                    self.weight_layers[i].forward()

                # calculate error at output layer
                cp.copy(self.neuron_layers[-1].deltas, teacher_batch)
                self.neuron_layers[-1].deltas -= self.neuron_layers[-1].activations
                cp.copy(squared_errors, self.neuron_layers[-1].deltas)
                cp.apply_scalar_functor(squared_errors, cp.scalar_functor.SQUARE)
                mse += cp.sum(squared_errors)
                ce += float(np.sum(np.argmax(teacher_batch_host, axis=0)
                        != np.argmax(self.neuron_layers[-1].activations.np, axis=0)))

                # Backward-Pass
                for i in xrange(self.n_layers):
                    self.weight_layers[self.n_layers - i - 1].backward(learnrate, decay=.01)

                # Don't wait for garbage collector
                teacher_batch.dealloc()
                self.neuron_layers[0].activations.dealloc()

            print "MSE: ",     (mse / n_samples)
            print "Classification Error Training: ", (ce / n_samples)
        squared_errors.dealloc()
예제 #11
0
파일: mlp.py 프로젝트: 0rchard/CUV
  def runMLP(self, mbatch_provider, batchSize, epoch=0):

    self.NumCorrect = 0
    numberPictures  = 0
    teachbatch = None

    batch_idx = 0
    while True:
        try:
            #print "Batch ", batch+1, "/", numberBatches
            output, indices = [], []
            output.append(cp.dev_tensor_float_cm([self.cfg.px*self.cfg.py*self.cfg.maps_bottom,batchSize]))
            teachbatch = mbatch_provider.getMiniBatch(batchSize, output[0], return_teacher=True, id=batch_idx)
            numberPictures += teachbatch.shape[1]
            batch_idx += 1

            # Forward Pass
            for i in xrange(self.NumberOfLayers-1):
                linear = self.cfg.finetune_softmax and i==self.NumberOfLayers-2 # set output layer to linear
                output.append(self.forward(output[i], self.Weights[i], self.Bias[i],linear=linear))
            self.NumCorrect += self.getCorrect(output[-1], teachbatch)


        except MiniBatchProviderEmpty: # mbatch_provider empty
            break
        finally:
            map(lambda x:x.dealloc(), output)
            if teachbatch: teachbatch.dealloc()
            mbatch_provider.forgetOriginalData()


    self.testError.append((numberPictures - self.NumCorrect)/float(numberPictures)) 
    print "Test Correctly Classified:             ", self.NumCorrect, "/", numberPictures
    print "Test Error-Rate:                             %2.3f"% (100*self.testError[-1])
예제 #12
0
파일: base.py 프로젝트: 0rchard/CUV
 def allocUpdateMatrix(self):
     self.w_tmp = cp.dev_tensor_float_cm(self.mat.shape)
     cp.fill(self.w_tmp, 0)
     self.blo_tmp = cp.dev_tensor_float(len(self.bias_lo))
     self.bhi_tmp = cp.dev_tensor_float(len(self.bias_hi))
     cp.fill(self.blo_tmp, 0)
     cp.fill(self.bhi_tmp, 0)
예제 #13
0
파일: base.py 프로젝트: stjordanis/CUV
 def allocUpdateMatrix(self):
     self.w_tmp = cp.dev_tensor_float_cm(self.mat.shape)
     cp.fill(self.w_tmp, 0)
     self.blo_tmp = cp.dev_tensor_float(len(self.bias_lo))
     self.bhi_tmp = cp.dev_tensor_float(len(self.bias_hi))
     cp.fill(self.blo_tmp, 0)
     cp.fill(self.bhi_tmp, 0)
예제 #14
0
 def setMiniBatch(self, mb, dst_layer):
     self.sampleset_ = mb
     self.sampleset = cp.dev_tensor_float_cm(
         self.sampleset_.astype('float32').copy('F'))
     if hasattr(self, "norm"):
         self.norm(self.sampleset)
     cp.copy(dst_layer, self.sampleset)
예제 #15
0
파일: base.py 프로젝트: 0rchard/CUV
 def load(self, prefix, postfix):
     fn = os.path.join(prefix, "weights-%s.npy"%postfix)
     if os.path.exists(fn):
         self.mat.dealloc()
         self.mat = cp.dev_tensor_float_cm(np.load(fn))
         self.bias_lo.dealloc()
         self.bias_hi.dealloc()
         self.bias_lo = cp.dev_tensor_float(np.load(os.path.join(prefix, "bias-lo-%s.npy"%postfix)))
         self.bias_hi = cp.dev_tensor_float(np.load(os.path.join(prefix, "bias-hi-%s.npy"%postfix)))
예제 #16
0
파일: mlp.py 프로젝트: 0rchard/CUV
  def __init__(self, cfg, weights,biases):
    self.cfg=cfg
    self.NumCorrect = 0
    self.Errorrate=[]
    self.testError=[]
    self.NumberOfLayers = cfg.num_layers+1

    self.preEpochHook = lambda mlp,epoch: mlp

    self.Weights = weights

    self.DeltaWeightsOld = []
    self.WeightsLearnRate = []
    self.dWeights = []
    self.dBias = []

    self.Bias = biases
    self.DeltaBiasOld = []
    self.BiasLearnRate = []
    l = 0.001

    self.NumberOfNeuronsPerLayer = []
    for i in xrange(self.NumberOfLayers-2):
        #self.Weights.append(newWeights)
        dim1, dim2 = self.Weights[i].shape
        self.createCopyFilled(self.DeltaWeightsOld,self.Weights[i] , 0)
        self.createCopyFilled(self.WeightsLearnRate,self.Weights[i] , l)
        if not self.cfg.finetune_online_learning or (self.cfg.finetune_online_learning and self.cfg.finetune_rprop):
            self.createCopyFilled(self.dWeights,self.Weights[i] , 0)
            self.createCopyFilled(self.dBias,self.Bias[i] , 0)
        self.createFilled(self.DeltaBiasOld, dim2, 1, 0)
        self.createFilled(self.BiasLearnRate, dim2, 1, l)
        self.NumberOfNeuronsPerLayer.append(dim1)

    # create dense matrix for last layer
    dim1,dim2 = self.Weights[-1].shape[1], self.cfg.num_classes
    if self.cfg.load and self.loadLastLayer(dim1,dim2):
        pass
    else:
        self.Weights.append(cp.dev_tensor_float_cm([dim1,dim2]))
        cp.fill_rnd_uniform(self.Weights[-1])
        #print "Initializing weights with rnd(%2.5f)", 
        cp.apply_scalar_functor(self.Weights[-1],cp.scalar_functor.SUBTRACT, 0.5)
        #cp.apply_scalar_functor(self.Weights[-1],cp.scalar_functor.MULT, 1./math.sqrt(self.Weights[-2].w))
        cp.apply_scalar_functor(self.Weights[-1],cp.scalar_functor.MULT, 1./self.Weights[-2].shape[1])
        self.createFilled(self.Bias, dim2, 1, 0)
    self.createFilled(self.DeltaBiasOld, dim2, 1, 0)
    self.createFilled(self.BiasLearnRate, dim2, 1, l)
    self.createFilled(self.DeltaWeightsOld,dim1,dim2,0)
    self.createFilled(self.WeightsLearnRate,dim1,dim2,l)
    if not self.cfg.finetune_online_learning or (self.cfg.finetune_online_learning and self.cfg.finetune_rprop):
            self.createCopyFilled(self.dWeights,self.Weights[-1] , 0)
            self.createCopyFilled(self.dBias,self.Bias[-1] , 0)
    self.NumberOfNeuronsPerLayer.append(dim1)
    self.NumberOfNeuronsPerLayer.append(dim2)

    self.reconstruction_error = []
예제 #17
0
파일: mlp.py 프로젝트: stjordanis/CUV
    def forward(self, input, weight, bias, linear=False):

        result = cp.dev_tensor_float_cm([weight.shape[1], input.shape[1]])
        cp.fill(result, 0)
        cp.prod(result, weight, input, "t", "n")
        cp.matrix_plus_col(result, bias)
        if not linear: cp.apply_scalar_functor(result, cp.scalar_functor.SIGM)

        return result
예제 #18
0
파일: mlp.py 프로젝트: 0rchard/CUV
  def forward(self, input, weight, bias,linear=False):

    result = cp.dev_tensor_float_cm([weight.shape[1], input.shape[1]])
    cp.fill(result,0)
    cp.prod(result, weight, input, "t", "n")
    cp.matrix_plus_col(result, bias)
    if not linear: cp.apply_scalar_functor(result, cp.scalar_functor.SIGM)

    return result
예제 #19
0
파일: mlp.py 프로젝트: stjordanis/CUV
    def delta_hidden(self, weight, knownDerivative, netInput):
        deltaLo = cp.dev_tensor_float_cm([weight.shape[0], netInput.shape[1]])

        cp.prod(deltaLo, weight, knownDerivative, 'n', 'n')
        help = netInput.copy()
        cp.apply_scalar_functor(help, cp.scalar_functor.DSIGM)
        cp.apply_binary_functor(deltaLo, help, cp.binary_functor.MULT)
        help.dealloc()

        return deltaLo
예제 #20
0
파일: mlp.py 프로젝트: 0rchard/CUV
  def delta_hidden(self, weight, knownDerivative, netInput):
    deltaLo = cp.dev_tensor_float_cm([weight.shape[0], netInput.shape[1]])

    cp.prod(deltaLo, weight, knownDerivative, 'n', 'n')
    help = netInput.copy()
    cp.apply_scalar_functor(help, cp.scalar_functor.DSIGM)
    cp.apply_binary_functor(deltaLo, help, cp.binary_functor.MULT)
    help.dealloc()

    return deltaLo
예제 #21
0
파일: base.py 프로젝트: 0rchard/CUV
    def __init__(self, layer1, layer2, cfg, layernum):
        self.mat = cp.dev_tensor_float_cm([layer1.size, layer2.size])
        cp.fill(self.mat, 0)
        cp.add_rnd_normal(self.mat)
        fact = 1.0
        if layer2.unit_type == UnitType.binary or layer1.unit_type == UnitType.binary:
            # the 0.5 stems from the fact that our upper layer has activation 0.5 on average, not 0, if we use binary hidden units.
            fact = 0.5

        self.mat *= fact / math.sqrt(max(layer1.size, layer2.size))
        self.allocBias(layer1, layer2)
예제 #22
0
def _tmp(dim1, dim2, value):
    """Function to create a filled matrix.
       This demonstrates how CUV can be extended using python.

    @param dim1 -- number of rows.
    @param dim2 -- number of columns.

    """
    mat = cp.dev_tensor_float_cm([dim1, dim2])
    cp.fill(mat,  value)
    return mat
예제 #23
0
파일: base.py 프로젝트: stjordanis/CUV
 def load(self, prefix, postfix):
     fn = os.path.join(prefix, "weights-%s.npy" % postfix)
     if os.path.exists(fn):
         self.mat.dealloc()
         self.mat = cp.dev_tensor_float_cm(np.load(fn))
         self.bias_lo.dealloc()
         self.bias_hi.dealloc()
         self.bias_lo = cp.dev_tensor_float(
             np.load(os.path.join(prefix, "bias-lo-%s.npy" % postfix)))
         self.bias_hi = cp.dev_tensor_float(
             np.load(os.path.join(prefix, "bias-hi-%s.npy" % postfix)))
예제 #24
0
파일: base.py 프로젝트: stjordanis/CUV
    def __init__(self, layer1, layer2, cfg, layernum):
        self.mat = cp.dev_tensor_float_cm([layer1.size, layer2.size])
        cp.fill(self.mat, 0)
        cp.add_rnd_normal(self.mat)
        fact = 1.0
        if layer2.unit_type == UnitType.binary or layer1.unit_type == UnitType.binary:
            # the 0.5 stems from the fact that our upper layer has activation 0.5 on average, not 0, if we use binary hidden units.
            fact = 0.5

        self.mat *= fact / math.sqrt(max(layer1.size, layer2.size))
        self.allocBias(layer1, layer2)
예제 #25
0
파일: ais.py 프로젝트: stjordanis/CUV
 def load_weights(self,path):
     print "loading weights from ",path
     if not os.path.exists(os.path.join(path,"weights-0-%s.npy"%self.cfg.postfix)):
         print "Could not open weights."
         sys.exit(1)
     self.w_ =np.load(os.path.join(path,"weights-0-%s.npy"%self.cfg['postfix']))
     self.bias_lo = cp.dev_tensor_float((np.load(os.path.join(path,"bias-lo-0-%s.npy"%self.cfg.postfix))).reshape(-1,1))
     self.bias_hi = cp.dev_tensor_float((np.load(os.path.join(path,"bias-hi-0-%s.npy"%self.cfg.postfix))).reshape(-1,1))
     self.w=cp.dev_tensor_float_cm(self.w_.copy("F"))
     self.num_vis=self.w_.shape[0]
     self.num_hids=self.w_.shape[1]
     print "Number of hidden units: ",self.num_hids
예제 #26
0
파일: mlp.py 프로젝트: 0rchard/CUV
  def train(self, mbatch_provider, numberRounds, batchSize, useRPROP = 0):
    self.useRPROP = useRPROP

    for r in xrange(numberRounds):
        numberPictures = 0

        print self.cfg.workdir + ": Epoch ", r+1, "/", numberRounds
        self.preEpochHook(self, r)

        self.NumCorrect = 0
        updateOnlyLast = r < self.cfg.finetune_onlylast

        teachbatch = None

        batch_idx = 0
        output, indices = [], []
        while True:
            try:
                output= []
                output.append(cp.dev_tensor_float_cm([self.cfg.px*self.cfg.py*self.cfg.maps_bottom,batchSize]))
                teachbatch = mbatch_provider.getMiniBatch(batchSize, output[0], return_teacher=True, id=batch_idx)

                numberPictures += teachbatch.shape[1]
                batch_idx += 1

                # forward pass trough all layers
                for i in xrange(self.NumberOfLayers-1):
                   linear = self.cfg.finetune_softmax and i==self.NumberOfLayers-2 # set output layer to linear
                   output.append(self.forward(output[i], self.Weights[i], self.Bias[i], linear=linear))


                self.NumCorrect += self.getCorrect(output[-1], teachbatch)

                ## backward pass
                self.backward(output, teachbatch,indices, batchSize, updateOnlyLast, batch_idx)

            except MiniBatchProviderEmpty: # mbatch provider is empty
                break
            finally:
                map(lambda x:x.dealloc(), output)
                map(lambda x:x and x.dealloc(), indices)
                if teachbatch: teachbatch.dealloc()
                mbatch_provider.forgetOriginalData()

        if not self.cfg.finetune_online_learning:
            self.applyDeltaWeights(self.dWeights,self.dBias,updateOnlyLast, batchSize)
            map(lambda x: cp.fill(x,0),self.dWeights)
            map(lambda x: cp.fill(x,0),self.dBias)

        self.Errorrate.append((numberPictures - self.NumCorrect)/ float(numberPictures) )

        print "Train Correctly Classified: ", self.NumCorrect, "/", numberPictures
        print "Train Error-Rate:                 %2.3f"% (self.Errorrate[-1]*100)
예제 #27
0
파일: ais.py 프로젝트: stjordanis/CUV
    def initialize_everything(self):
        ### initialize matrices for cuda ###
        self.r_ =np.zeros(self.cfg.chains).astype('float32').copy('F')
        self.w = cp.dev_tensor_float_cm(self.w_.copy("F"))


        ### generate basemodel
        softened = (self.data.mean(axis=1) + 0.1)
        self.baserate_bias_= (np.log(softened) - np.log(1-softened)).astype('float32').copy('F')
        self.baserate_bias_.shape=(self.w.shape[0],1)


        ## start chains
        self.v_ = np.tile(sigm(self.baserate_bias_),(1,self.cfg.chains))
        self.v_ = sample(self.v_,self.cfg['utype']).astype('float32').copy('F')
        self.v = cp.dev_tensor_float_cm(self.v_.copy("F"))
        self.h = cp.dev_tensor_float_cm([self.num_hids,self.cfg.chains])

        self.baserate_bias = cp.dev_tensor_float_cm(self.baserate_bias_.copy("F"))
        self.r = cp.dev_tensor_float_cm(np.vstack(self.r_).copy("F"))
        cp.initialize_mersenne_twister_seeds(int(time.time()*1000) % 100000)
예제 #28
0
파일: ais.py 프로젝트: stjordanis/CUV
    def get_partition_function(self):
        tmp = cp.dev_tensor_float_cm([self.cfg.chains, 1])
        tmp2 = cp.dev_tensor_float_cm([self.num_hids,self.cfg.chains])
        #steps = 14500
        #steps = 1000
        steps = self.cfg.steps
        #beta=0.001
        beta = 1.0/steps
        beta_old=0
        for step in xrange(steps):
            self.p_k(beta_old,tmp,tmp2,lambda x: cp.apply_binary_functor(self.r,x,cp.binary_functor.SUBTRACT))
            self.p_k(beta,tmp,tmp2,lambda x: cp.apply_binary_functor(self.r,x,cp.binary_functor.ADD))
            self.sample_markov_chains(beta,step)
            ### sample v_i
            ### increase beta
            beta_old = beta
            #if step<500:
                #beta += 0.001
            #elif step < 4500:
                #beta += 0.0001
            #else :
                #beta += 0.00001
            beta += 1.0/steps
            #if step % 100 == 0:
                #self.r_=self.r.np
                #v_=self.v.np
                #h_=self.h.np
                #print "v: %f"%v_.mean()
                #print "h: %f"%h_.mean()
                #print "r: %f"%self.r_.mean()
                #sys.stdout.write('.')
                #sys.stdout.flush()

        ### multiply r by partition function of baseline rbm
        self.r_=self.r.np
        self.partition_baserate = (np.log(1+np.exp(self.baserate_bias_))).sum()+self.num_hids*np.log(2)
        self.r_ += self.partition_baserate
        tmp.dealloc()
        tmp2.dealloc()
예제 #29
0
 def denominator(self, batchsize):
     acth = cp.dev_tensor_float_cm([self.weight.shape[1], batchsize])
     actv = cp.dev_tensor_float_cm([self.weight.shape[0], batchsize])
     row = cp.dev_tensor_float([batchsize])
     cp.fill(acth, 0.0)
     cp.fill(actv, 0.0)
     cp.fill(row, 0.0)
     n = acth.shape[0]
     nmax = 2 ** n
     if nmax % batchsize != 0:
         print "Error: 2**n=%d must be dividable by batchsize=%d!" % (nmax, batchsize)
         sys.exit(1)
     L = []
     widgets = ["Denominator: ", Percentage(), " ", Bar(marker=RotatingMarker()), " ", ETA()]
     pbar = ProgressBar(widgets=widgets, maxval=nmax)
     for i in xrange(0, nmax, acth.shape[1]):
         cp.set_binary_sequence(acth, i)
         L.append(self.partialsum(acth, actv, row))
         if (i / acth.shape[1]) % 100 == 0:
             pbar.update(i)
     pbar.finish()
     for m in [actv, acth, row]:
         m.dealloc()
     return math.fsum(L)
예제 #30
0
 def getMiniBatch(self, samplesize, dst_layer, id=None, return_teacher=False):
     if id == None:
         #id = np.random.randint(0,len(self.dataset)-samplesize)
         id = self.pos
         self.pos = self.pos + samplesize
         self.pos = self.pos % self.dataset.shape[1]
         if self.dataset.shape[1] < self.pos+samplesize-1:
             self.pos = 0
         id = self.pos
     else:
         id = id*samplesize
     if self.dataset.shape[1] < id+samplesize:
         raise MiniBatchProviderEmpty
     self.setMiniBatch(self.dataset[:,id:id+samplesize], dst_layer)
     if return_teacher:
         return cp.dev_tensor_float_cm(self.teacher[:,id:id+samplesize].astype('float32').copy('F'))
예제 #31
0
def get_mbp(cfg):
    if cfg.dataset == Dataset.mnist:
        dataset = MNISTData(cfg, "/home/local/datasets/MNIST")
        mbp = minibatch_provider.MNISTMiniBatchProvider(dataset.data)
        act = cp.dev_tensor_float_cm([cfg.px * cfg.py, cfg.batchsize])
        mbs = minibatch_provider.MiniBatchStatistics(mbp, act)
        mbp.norm = lambda x: mbs.normalize_255(x)
        mbp.mbs = mbs  # allows visualization of mean, range, etc
    elif cfg.dataset == Dataset.shifter:
        dataset = ShifterData(cfg, "/home/local/datasets")
        mbp = minibatch_provider.MNISTMiniBatchProvider(dataset.data)
    elif cfg.dataset == Dataset.bars_and_stripes:
        dataset = BarsAndStripesData(cfg, "/home/local/datasets")
        mbp = minibatch_provider.MNISTMiniBatchProvider(dataset.data)
    else:
        raise NotImplementedError()
    return mbp
예제 #32
0
def get_mbp(cfg):
    if cfg.dataset == Dataset.mnist:
        dataset = MNISTData(cfg, "/home/local/datasets/MNIST")
        mbp = minibatch_provider.MNISTMiniBatchProvider(dataset.data)
        act = cp.dev_tensor_float_cm([cfg.px * cfg.py, cfg.batchsize])
        mbs = minibatch_provider.MiniBatchStatistics(mbp, act)
        mbp.norm = lambda x: mbs.normalize_255(x)
        mbp.mbs = mbs  # allows visualization of mean, range, etc
    elif cfg.dataset == Dataset.shifter:
        dataset = ShifterData(cfg, "/home/local/datasets")
        mbp = minibatch_provider.MNISTMiniBatchProvider(dataset.data)
    elif cfg.dataset == Dataset.bars_and_stripes:
        dataset = BarsAndStripesData(cfg, "/home/local/datasets")
        mbp = minibatch_provider.MNISTMiniBatchProvider(dataset.data)
    else:
        raise NotImplementedError()
    return mbp
예제 #33
0
파일: mlp.py 프로젝트: stjordanis/CUV
    def runMLP(self, mbatch_provider, batchSize, epoch=0):

        self.NumCorrect = 0
        numberPictures = 0
        teachbatch = None

        batch_idx = 0
        while True:
            try:
                #print "Batch ", batch+1, "/", numberBatches
                output, indices = [], []
                output.append(
                    cp.dev_tensor_float_cm([
                        self.cfg.px * self.cfg.py * self.cfg.maps_bottom,
                        batchSize
                    ]))
                teachbatch = mbatch_provider.getMiniBatch(batchSize,
                                                          output[0],
                                                          return_teacher=True,
                                                          id=batch_idx)
                numberPictures += teachbatch.shape[1]
                batch_idx += 1

                # Forward Pass
                for i in xrange(self.NumberOfLayers - 1):
                    linear = self.cfg.finetune_softmax and i == self.NumberOfLayers - 2  # set output layer to linear
                    output.append(
                        self.forward(output[i],
                                     self.Weights[i],
                                     self.Bias[i],
                                     linear=linear))
                self.NumCorrect += self.getCorrect(output[-1], teachbatch)

            except MiniBatchProviderEmpty:  # mbatch_provider empty
                break
            finally:
                map(lambda x: x.dealloc(), output)
                if teachbatch: teachbatch.dealloc()
                mbatch_provider.forgetOriginalData()

        self.testError.append(
            (numberPictures - self.NumCorrect) / float(numberPictures))
        print "Test Correctly Classified:             ", self.NumCorrect, "/", numberPictures
        print "Test Error-Rate:                             %2.3f" % (
            100 * self.testError[-1])
예제 #34
0
    def predict(self, input_matrix):
        """
        Predict label on unseen data

        @param input_matrix -- matrix consisting of input data to the network.
        """
        n_samples = input_matrix.shape[-1]
        predictions = []
        for batch in xrange(n_samples / self.batch_size):
            index_begin = self.batch_size * batch
            index_end = index_begin + self.batch_size
            self.neuron_layers[0].activations = cp.dev_tensor_float_cm(input_matrix[:,
                index_begin:index_end].copy('F'))
            for i in xrange(self.n_layers):
                self.weight_layers[i].forward()
            prediction_batch = np.argmax(self.neuron_layers[-1].activations.np, axis=0)
            predictions.append(prediction_batch)
        return np.hstack(predictions)
예제 #35
0
 def getMiniBatch(self,
                  samplesize,
                  dst_layer,
                  id=None,
                  return_teacher=False):
     if id == None:
         #id = np.random.randint(0,len(self.dataset)-samplesize)
         id = self.pos
         self.pos = self.pos + samplesize
         self.pos = self.pos % self.dataset.shape[1]
         if self.dataset.shape[1] < self.pos + samplesize - 1:
             self.pos = 0
         id = self.pos
     else:
         id = id * samplesize
     if self.dataset.shape[1] < id + samplesize:
         raise MiniBatchProviderEmpty
     self.setMiniBatch(self.dataset[:, id:id + samplesize], dst_layer)
     if return_teacher:
         return cp.dev_tensor_float_cm(
             self.teacher[:,
                          id:id + samplesize].astype('float32').copy('F'))
예제 #36
0
 def testTensorToNpyTrans(self):
     """ convert a tensor to a numpy matrix (transposed) """
     t = cp.dev_tensor_float_cm(self.shape)
     cp.sequence(t)
     n = t.np
     self.cmp3d(t, n)
예제 #37
0
파일: example2.py 프로젝트: stjordanis/CUV
import cuv_python as cp

C = cp.dev_tensor_float_cm([2048, 2048])  # column major tensor
A = cp.dev_tensor_float_cm([2048, 2048])
B = cp.dev_tensor_float_cm([2048, 2048])
cp.fill(C, 0)  # fill with some defined values, not really necessary here
cp.sequence(A)
cp.sequence(B)
cp.apply_binary_functor(B, A,
                        cp.binary_functor.MULT)  # elementwise multiplication
B *= A  # operators also work (elementwise)
cp.prod(C, A, B, 'n', 't')  # matrix multiplication
C = cp.prod(A, B.T)  # numpy-like form, allocates new matrix for result
예제 #38
0
파일: example2.py 프로젝트: 0rchard/CUV
import cuv_python as cp

C = cp.dev_tensor_float_cm([2048,2048])   # column major tensor
A = cp.dev_tensor_float_cm([2048,2048])
B = cp.dev_tensor_float_cm([2048,2048])
cp.fill(C,0)                       # fill with some defined values, not really necessary here
cp.sequence(A)
cp.sequence(B)
cp.apply_binary_functor(B,A,cp.binary_functor.MULT) # elementwise multiplication
B *= A                                              # operators also work (elementwise)
cp.prod(C,A,B,'n','t')                              # matrix multiplication
C = cp.prod(A, B.T)                                 # numpy-like form, allocates new matrix for result
예제 #39
0
파일: base.py 프로젝트: 0rchard/CUV
 def allocPChain(self):
     self.pchain = cp.dev_tensor_float_cm([self.size, self.bsize])
     cp.fill(self.pchain, 0)
예제 #40
0
파일: mlp.py 프로젝트: 0rchard/CUV
 def createCopyFilled(self, matList, someMat, value):
     if len(someMat.shape)<2 or someMat.shape[1] == 1:
         matList.append(cp.dev_tensor_float(someMat.shape))
     else:
         matList.append(cp.dev_tensor_float_cm(someMat.shape))
     cp.fill(matList[-1], value)
예제 #41
0
파일: base.py 프로젝트: stjordanis/CUV
 def allocPChain(self):
     self.pchain = cp.dev_tensor_float_cm([self.size, self.bsize])
     cp.fill(self.pchain, 0)
예제 #42
0
 def __init__(self, weight, bv, bh):
     self.weight = cp.dev_tensor_float_cm(weight)
     self.bv = cp.dev_tensor_float(bv)
     self.bh = cp.dev_tensor_float(bh)
예제 #43
0
 def testTensorToNpyCm(self):
     """ convert a tensor to a numpy matrix (column major) """
     t = cp.dev_tensor_float_cm(self.shape)
     cp.sequence(t)
     n = t.np
     self.cmp3d(t, n)
예제 #44
0
 def testNpyToTensorCmTrans(self):
     """ convert a numpy matrix to a tensor (column major, transposed)"""
     n = np.arange(np.prod(self.shape)).reshape(self.shape)
     t = cp.dev_tensor_float_cm(n.astype("float32"))
     self.cmp3d_inv(t, n)
예제 #45
0
 def __init__(self, weight, bv, bh):
     self.weight = cp.dev_tensor_float_cm(weight)
     self.bv = cp.dev_tensor_float(bv)
     self.bh = cp.dev_tensor_float(bh)
예제 #46
0
파일: mlp.py 프로젝트: 0rchard/CUV
 def calculateDeltaWeights(self, derivative, input,oldWeights):
     result = cp.dev_tensor_float_cm(oldWeights.shape)
     cp.prod(result, input,derivative, 'n', 't')
     return result
예제 #47
0
 def setMiniBatch(self, mb, dst_layer):
     self.sampleset_ = mb
     self.sampleset  = cp.dev_tensor_float_cm(self.sampleset_.astype('float32').copy('F'))
     if hasattr(self,"norm"):
         self.norm(self.sampleset)
     cp.copy(dst_layer,self.sampleset)
예제 #48
0
파일: base.py 프로젝트: stjordanis/CUV
 def alloc(self):
     self.act = cp.dev_tensor_float_cm([self.size, self.bsize])
     cp.fill(self.act, 0)
     return self
예제 #49
0
파일: base.py 프로젝트: 0rchard/CUV
 def alloc(self):
     self.act = cp.dev_tensor_float_cm([self.size, self.bsize])
     cp.fill(self.act, 0)
     return self
예제 #50
0
파일: mlp.py 프로젝트: 0rchard/CUV
 def createFilled(self, matList, dim1, dim2, value):
   if dim2==1:
       matList.append(cp.dev_tensor_float([dim1]))
   else:
       matList.append(cp.dev_tensor_float_cm([dim1, dim2]))
   cp.fill(matList[-1], value)
예제 #51
0
파일: mlp.py 프로젝트: stjordanis/CUV
 def createCopyFilled(self, matList, someMat, value):
     if len(someMat.shape) < 2 or someMat.shape[1] == 1:
         matList.append(cp.dev_tensor_float(someMat.shape))
     else:
         matList.append(cp.dev_tensor_float_cm(someMat.shape))
     cp.fill(matList[-1], value)
예제 #52
0
 def testNpyToTensorCm(self):
     """ convert a numpy matrix to a tensor (column major)"""
     n = np.arange(np.prod(self.shape)).reshape(self.shape).copy("F")
     t = cp.dev_tensor_float_cm(n.astype("float32"))
     self.cmp3d(t, n)
예제 #53
0
파일: mlp.py 프로젝트: stjordanis/CUV
    def train(self, mbatch_provider, numberRounds, batchSize, useRPROP=0):
        self.useRPROP = useRPROP

        for r in xrange(numberRounds):
            numberPictures = 0

            print self.cfg.workdir + ": Epoch ", r + 1, "/", numberRounds
            self.preEpochHook(self, r)

            self.NumCorrect = 0
            updateOnlyLast = r < self.cfg.finetune_onlylast

            teachbatch = None

            batch_idx = 0
            output, indices = [], []
            while True:
                try:
                    output = []
                    output.append(
                        cp.dev_tensor_float_cm([
                            self.cfg.px * self.cfg.py * self.cfg.maps_bottom,
                            batchSize
                        ]))
                    teachbatch = mbatch_provider.getMiniBatch(
                        batchSize,
                        output[0],
                        return_teacher=True,
                        id=batch_idx)

                    numberPictures += teachbatch.shape[1]
                    batch_idx += 1

                    # forward pass trough all layers
                    for i in xrange(self.NumberOfLayers - 1):
                        linear = self.cfg.finetune_softmax and i == self.NumberOfLayers - 2  # set output layer to linear
                        output.append(
                            self.forward(output[i],
                                         self.Weights[i],
                                         self.Bias[i],
                                         linear=linear))

                    self.NumCorrect += self.getCorrect(output[-1], teachbatch)

                    ## backward pass
                    self.backward(output, teachbatch, indices, batchSize,
                                  updateOnlyLast, batch_idx)

                except MiniBatchProviderEmpty:  # mbatch provider is empty
                    break
                finally:
                    map(lambda x: x.dealloc(), output)
                    map(lambda x: x and x.dealloc(), indices)
                    if teachbatch: teachbatch.dealloc()
                    mbatch_provider.forgetOriginalData()

            if not self.cfg.finetune_online_learning:
                self.applyDeltaWeights(self.dWeights, self.dBias,
                                       updateOnlyLast, batchSize)
                map(lambda x: cp.fill(x, 0), self.dWeights)
                map(lambda x: cp.fill(x, 0), self.dBias)

            self.Errorrate.append(
                (numberPictures - self.NumCorrect) / float(numberPictures))

            print "Train Correctly Classified: ", self.NumCorrect, "/", numberPictures
            print "Train Error-Rate:                 %2.3f" % (
                self.Errorrate[-1] * 100)
예제 #54
0
파일: knn.py 프로젝트: kgl-prml/CUV
 def __init__(self, data, data_l, k):
     self.k = k
     self.data = cp.dev_tensor_float_cm(data)
     self.data_l = data_l
     self.dsq = cp.dev_tensor_float(self.data.shape[0])
     cp.reduce_to_col(self.dsq, self.data, cp.reduce_functor.ADD_SQUARED)