Example #1
0
 def updateGradientNeg(self, layer1, layer2, batchsize):
     cp.prod(self.w_tmp, layer1.act, layer2.act, 'n', 't', -1. / batchsize,
             1. / batchsize)
     cp.reduce_to_col(self.blo_tmp, layer1.act, cp.reduce_functor.ADD,
                      -1. / batchsize, 1. / batchsize)
     cp.reduce_to_col(self.bhi_tmp, layer2.act, cp.reduce_functor.ADD,
                      -1. / batchsize, 1. / batchsize)
Example #2
0
File: knn.py Project: kgl-prml/CUV
 def get_distance_matrix(self, test):
     t = cp.dev_tensor_float_cm(test)
     assert t.shape[1] == self.data.shape[1]
     tsq = cp.dev_tensor_float(t.shape[0])
     cp.reduce_to_col(tsq, t, cp.reduce_functor.ADD_SQUARED)
     p = cp.dev_tensor_float_cm([self.data.shape[0], t.shape[0]])
     cp.prod(p, self.data, t, "n", "t", -2, 0)
     cp.matrix_plus_col(p, self.dsq)
     cp.matrix_plus_row(p, tsq)
     return p
Example #3
0
 def get_distance_matrix(self, test):
     t   = cp.dev_tensor_float_cm(test)
     assert t.shape[1] == self.data.shape[1]
     tsq = cp.dev_tensor_float(t.shape[0])
     cp.reduce_to_col(tsq,t,cp.reduce_functor.ADD_SQUARED)
     p   = cp.dev_tensor_float_cm([self.data.shape[0], t.shape[0]])
     cp.prod(p, self.data, t, 'n','t',-2, 0)
     cp.matrix_plus_col(p,self.dsq)
     cp.matrix_plus_row(p,tsq)
     return p
Example #4
0
 def update_stats(self,batch):
     vmin  = cp.dev_tensor_float(batch.shape[0])
     vmax  = cp.dev_tensor_float(batch.shape[0])
     mean  = cp.dev_tensor_float(batch.shape[0])
     mean2 = cp.dev_tensor_float(batch.shape[0])
     map(lambda x: cp.fill(x,0), [mean,mean2])
     cp.reduce_to_col(mean,batch)
     cp.reduce_to_col(mean2,batch,cp.reduce_functor.ADD_SQUARED)
     cp.reduce_to_col(vmin,batch,cp.reduce_functor.MIN)
     cp.reduce_to_col(vmax,batch,cp.reduce_functor.MAX)
     if "N" in self.__dict__:
         self.N += batch.shape[1]
         cp.apply_binary_functor(self.mean, mean, cp.binary_functor.ADD)
         cp.apply_binary_functor(self.mean2,mean2,cp.binary_functor.ADD)
         cp.apply_binary_functor(self.min,vmin,cp.binary_functor.MIN)
         cp.apply_binary_functor(self.max,vmin,cp.binary_functor.MAX)
         mean.dealloc()
         mean2.dealloc()
         vmin.dealloc()
         vmax.dealloc()
     else:
         self.N     = batch.shape[1]
         self.mean  = mean
         self.mean2 = mean2
         self.min   = vmin
         self.max   = vmax
Example #5
0
 def update_stats(self, batch):
     vmin = cp.dev_tensor_float(batch.shape[0])
     vmax = cp.dev_tensor_float(batch.shape[0])
     mean = cp.dev_tensor_float(batch.shape[0])
     mean2 = cp.dev_tensor_float(batch.shape[0])
     map(lambda x: cp.fill(x, 0), [mean, mean2])
     cp.reduce_to_col(mean, batch)
     cp.reduce_to_col(mean2, batch, cp.reduce_functor.ADD_SQUARED)
     cp.reduce_to_col(vmin, batch, cp.reduce_functor.MIN)
     cp.reduce_to_col(vmax, batch, cp.reduce_functor.MAX)
     if "N" in self.__dict__:
         self.N += batch.shape[1]
         cp.apply_binary_functor(self.mean, mean, cp.binary_functor.ADD)
         cp.apply_binary_functor(self.mean2, mean2, cp.binary_functor.ADD)
         cp.apply_binary_functor(self.min, vmin, cp.binary_functor.MIN)
         cp.apply_binary_functor(self.max, vmin, cp.binary_functor.MAX)
         mean.dealloc()
         mean2.dealloc()
         vmin.dealloc()
         vmax.dealloc()
     else:
         self.N = batch.shape[1]
         self.mean = mean
         self.mean2 = mean2
         self.min = vmin
         self.max = vmax
Example #6
0
    def weight_update(self, learnrate=0.01, decay=0.0):
        """Updates the weights and the bias
           using source activations and target deltas.

           @param learnrate  how strongly the gradient influences the weights
           @param decay      large values result in a regularization with
                             to the squared weight value"""
				batch_size=self.source.activations.w
				h = cp.dev_matrix_cmf(self.weight.h, self.weight.w)
				cp.prod(h, self.target.deltas, self.source.activations, 'n', 't')
				cp.learn_step_weight_decay(self.weight, h, learnrate/batch_size, decay)
				h.dealloc()
				h = cp.get_filled_matrix(self.target.activations.h, 1, 0)
				cp.reduce_to_col(h.vec, self.target.deltas)
				cp.learn_step_weight_decay(self.bias, h, learnrate/batch_size, decay)
				h.dealloc()
Example #7
0
    def backward(self, output, teacher, indices, batchSize, updateOnlyLast,
                 batch_idx):
        deltaWeights = []
        deltaBias = []
        derivative = []
        if self.cfg.finetune_softmax:
            derivative.append(self.delta_outputSoftMax(output[-1], teacher))
        else:
            derivative.append(self.delta_output(output[-1], teacher))

        for i in reversed(xrange(1, self.NumberOfLayers - 1)):
            derivative.append(
                self.delta_hidden(self.Weights[i], derivative[-1], output[i]))
        derivative.reverse()

        #DeltaWeights
        for i in reversed(xrange(self.NumberOfLayers - 1)):
            deltaWeights.append(
                self.calculateDeltaWeights(derivative[i], output[i],
                                           self.Weights[i]))
        deltaWeights.reverse()

        #DeltaBias
        for i in xrange(self.NumberOfLayers - 1):
            self.createFilled(deltaBias, self.Bias[i].size, 1, 0)
            cp.reduce_to_col(deltaBias[-1], derivative[i])

        # Weight Update
        if self.cfg.finetune_online_learning and not self.useRPROP:
            self.applyDeltaWeights(deltaWeights, deltaBias, updateOnlyLast,
                                   batchSize)
        elif self.cfg.finetune_online_learning and self.useRPROP and batch_idx % 16 == 0:
            self.applyDeltaWeights(self.dWeights, self.dBias, updateOnlyLast,
                                   batchSize)
            map(lambda x: cp.fill(x, 0), self.dWeights)
            map(lambda x: cp.fill(x, 0), self.dBias)
        else:
            for i in xrange(self.NumberOfLayers - 1):
                cp.apply_binary_functor(self.dWeights[i], deltaWeights[i],
                                        cp.binary_functor.ADD)
                cp.apply_binary_functor(self.dBias[i], deltaBias[i],
                                        cp.binary_functor.ADD)

        da = lambda x: x.dealloc()
        map(da, deltaWeights)
        map(da, deltaBias)
        map(da, derivative)
Example #8
0
File: mlp.py Project: 0rchard/CUV
  def backward(self, output, teacher, indices, batchSize, updateOnlyLast, batch_idx):
    deltaWeights = []
    deltaBias = []
    derivative = []
    if self.cfg.finetune_softmax:
        derivative.append(self.delta_outputSoftMax(output[-1], teacher))
    else:
        derivative.append(self.delta_output(output[-1], teacher))

    for i in reversed(xrange(1,self.NumberOfLayers-1)):
        derivative.append(self.delta_hidden(self.Weights[i], derivative[-1], output[i]))
    derivative.reverse()

    #DeltaWeights                    
    for i in reversed(xrange(self.NumberOfLayers-1)):
        deltaWeights.append(self.calculateDeltaWeights(derivative[i], output[i],self.Weights[i]))
    deltaWeights.reverse()

    #DeltaBias                    
    for i in xrange(self.NumberOfLayers-1):
        self.createFilled(deltaBias, self.Bias[i].size, 1, 0)
        cp.reduce_to_col(deltaBias[-1], derivative[i])

    # Weight Update
    if self.cfg.finetune_online_learning and not self.useRPROP:
        self.applyDeltaWeights(deltaWeights,deltaBias,updateOnlyLast,batchSize)
    elif self.cfg.finetune_online_learning and self.useRPROP and batch_idx%16 == 0:
        self.applyDeltaWeights(self.dWeights,self.dBias,updateOnlyLast, batchSize)
        map(lambda x: cp.fill(x,0),self.dWeights)
        map(lambda x: cp.fill(x,0),self.dBias)
    else:
        for i in xrange(self.NumberOfLayers-1):
            cp.apply_binary_functor(self.dWeights[i], deltaWeights[i], cp.binary_functor.ADD)
            cp.apply_binary_functor(self.dBias[i], deltaBias[i], cp.binary_functor.ADD)

    da = lambda x:x.dealloc()
    map(da, deltaWeights)
    map(da, deltaBias)
    map(da, derivative)
Example #9
0
def kmeans(dataset, num_clusters, iters):
    # initialize clusters randomly
    rand_indices = np.random.randint(0, dataset.shape[0], num_clusters)
    clusters = dataset[rand_indices,:]

    # push initial clusters and dataset to device
    dataset_dev = cp.dev_tensor_float(dataset)
    clusters_dev = cp.dev_tensor_float(clusters)

    # allocate matrices for calculations (so we don't need to allocate in loop)
    dists = cp.dev_tensor_float([dataset_dev.shape[0], num_clusters])
    nearest = cp.dev_tensor_uint(dataset_dev.shape[0])

    # main loop
    for i in xrange(iters):
        # compute pairwise distances
        cp.pdist2(dists, dataset_dev, clusters_dev)
        # find closest cluster
        cp.reduce_to_col(nearest, dists, cp.reduce_functor.ARGMIN)
        # update cluster centers
        # (this is a special purpose function for kmeans)
        cp.compute_clusters(clusters_dev, dataset_dev, nearest)
    return [clusters_dev.np, nearest.np]
Example #10
0
File: base.py Project: 0rchard/CUV
 def updateGradientPos(self, layer1, layer2):
     cp.prod(self.w_tmp, layer1.act, layer2.act, 'n', 't')
     cp.reduce_to_col(self.blo_tmp, layer1.act)
     cp.reduce_to_col(self.bhi_tmp, layer2.act)
Example #11
0
File: base.py Project: 0rchard/CUV
 def updateGradientNeg(self, layer1, layer2, batchsize):
     cp.prod(self.w_tmp, layer1.act, layer2.act, 'n', 't', -1./batchsize, 1./batchsize)
     cp.reduce_to_col(self.blo_tmp, layer1.act, cp.reduce_functor.ADD, -1./batchsize, 1./batchsize)
     cp.reduce_to_col(self.bhi_tmp, layer2.act, cp.reduce_functor.ADD, -1./batchsize, 1./batchsize)
Example #12
0
File: knn.py Project: kgl-prml/CUV
 def __init__(self, data, data_l, k):
     self.k = k
     self.data = cp.dev_tensor_float_cm(data)
     self.data_l = data_l
     self.dsq = cp.dev_tensor_float(self.data.shape[0])
     cp.reduce_to_col(self.dsq, self.data, cp.reduce_functor.ADD_SQUARED)
Example #13
0
 def updateGradientPos(self, layer1, layer2):
     cp.prod(self.w_tmp, layer1.act, layer2.act, 'n', 't')
     cp.reduce_to_col(self.blo_tmp, layer1.act)
     cp.reduce_to_col(self.bhi_tmp, layer2.act)
Example #14
0
 def __init__(self, data, data_l, k):
     self.k      = k
     self.data   = cp.dev_tensor_float_cm(data)
     self.data_l = data_l
     self.dsq    = cp.dev_tensor_float(self.data.shape[0])
     cp.reduce_to_col(self.dsq,self.data,cp.reduce_functor.ADD_SQUARED)