コード例 #1
0
ファイル: base.py プロジェクト: stjordanis/CUV
    def project_down(self):
        """ Project filters down to first (visible) layer """
        self.projection_results = dict()
        self.projection_results_lateral = dict()
        print "projecting down..."
        seqs = dict()

        for layernum in xrange(1, len(self.layers)):
            # project down from layer ``layernum''
            layer = self.layers[layernum]

            # turn on exactly one unit per layer
            cp.fill(layer.act, 0)
            if self.cfg.batchsize > layer.size:
                seqs[layernum] = np.array(xrange(0, layer.size))
            else:
                #seqs[layernum] = np.array(random.sample(xrange(layer.size), self.cfg.batchsize))
                seqs[layernum] = np.array(xrange(self.cfg.batchsize))

            for i in xrange(len(seqs[layernum])):
                layer.act.set(int(seqs[layernum][i]), i, 1)

            for i in reversed(xrange(1, layernum + 1)):
                lower_layer = self.layers[i - 1]
                self.weights[i - 1].downPass(lower_layer,
                                             self.layers[i],
                                             sample=False)

            img = self.layers[0].act.np
            img -= np.tile(img.mean(axis=1), (img.shape[1], 1)).T
            img -= np.tile(img.mean(axis=0), (img.shape[0], 1))
            self.projection_results[layernum] = img
コード例 #2
0
ファイル: base.py プロジェクト: 0rchard/CUV
    def project_down(self):
        """ Project filters down to first (visible) layer """
        self.projection_results = dict()
        self.projection_results_lateral = dict()
        print "projecting down..."
        seqs = dict()

        for layernum in xrange(1, len(self.layers)):
            # project down from layer ``layernum''
            layer = self.layers[layernum]

            # turn on exactly one unit per layer
            cp.fill(layer.act, 0)
            if self.cfg.batchsize>layer.size:
                seqs[layernum] = np.array(xrange(0, layer.size))
            else:
                #seqs[layernum] = np.array(random.sample(xrange(layer.size), self.cfg.batchsize))
                seqs[layernum] = np.array(xrange(self.cfg.batchsize))

            for i in xrange(len(seqs[layernum])):
                layer.act.set(int(seqs[layernum][i]), i, 1)

            for i in reversed(xrange(1, layernum+1)):
                lower_layer = self.layers[i-1]
                self.weights[i-1].downPass(lower_layer, self.layers[i], sample = False)

            img = self.layers[0].act.np
            img -= np.tile(img.mean(axis = 1), (img.shape[1], 1)).T
            img -= np.tile(img.mean(axis = 0), (img.shape[0], 1))
            self.projection_results[layernum] = img
コード例 #3
0
ファイル: mlp.py プロジェクト: 0rchard/CUV
  def forward(self, input, weight, bias,linear=False):

    result = cp.dev_tensor_float_cm([weight.shape[1], input.shape[1]])
    cp.fill(result,0)
    cp.prod(result, weight, input, "t", "n")
    cp.matrix_plus_col(result, bias)
    if not linear: cp.apply_scalar_functor(result, cp.scalar_functor.SIGM)

    return result
コード例 #4
0
ファイル: mlp.py プロジェクト: stjordanis/CUV
    def forward(self, input, weight, bias, linear=False):

        result = cp.dev_tensor_float_cm([weight.shape[1], input.shape[1]])
        cp.fill(result, 0)
        cp.prod(result, weight, input, "t", "n")
        cp.matrix_plus_col(result, bias)
        if not linear: cp.apply_scalar_functor(result, cp.scalar_functor.SIGM)

        return result
コード例 #5
0
 def update_func():
     try:
         cp.fill(node.delta, 0.0)
     except:
         glog.warn("Could not reset node.delta!")
         pass
     gd = cn.gradient_descent(deriv_op, 0, [node])
     gd.swiper.fprop()
     gd.swiper.bprop()
     return node.delta.np
コード例 #6
0
ファイル: image_ops.py プロジェクト: 0rchard/CUV
def gray_test(ni):
    src = cp.push(to_cmuc(np.tile(ni,(1,4))))
    dst = cp.dev_matrix_cmf(src.h,src.w)
    cp.fill(dst,0)
    cp.image_move(dst,src,128,128,1,-10,-4)
    res = cp.pull(dst)
    #set_trace()
    plt.matshow(res[0:128**2,0].reshape(128,128))
    plt.colorbar()
    plt.show()
コード例 #7
0
def gray_test(ni):
    src = cp.push(to_cmuc(np.tile(ni, (1, 4))))
    dst = cp.dev_matrix_cmf(src.h, src.w)
    cp.fill(dst, 0)
    cp.image_move(dst, src, 128, 128, 1, -10, -4)
    res = cp.pull(dst)
    #set_trace()
    plt.matshow(res[0:128**2, 0].reshape(128, 128))
    plt.colorbar()
    plt.show()
コード例 #8
0
def _tmp(dim1, dim2, value):
    """Function to create a filled matrix.
       This demonstrates how CUV can be extended using python.

    @param dim1 -- number of rows.
    @param dim2 -- number of columns.

    """
    mat = cp.dev_tensor_float_cm([dim1, dim2])
    cp.fill(mat,  value)
    return mat
コード例 #9
0
ファイル: image_ops.py プロジェクト: 0rchard/CUV
def color_test(ni):
    ts = 128
    src = cp.push(to_cmuc(np.tile(ni,(1,4))))
    dst = cp.dev_matrix_cmf(ts**2*3,src.w)
    cp.fill(dst,0)
    cp.image_move(dst,src,128,ts,4,-10,-4)
    res = cp.pull(dst)
    plt.matshow(res[0:ts**2,0].reshape(ts,ts), cmap = plt.cm.bone_r)
    plt.matshow(res[ts**2:2*ts**2,0].reshape(ts,ts), cmap = plt.cm.bone_r)
    plt.matshow(res[2*ts**2:3*ts**2,0].reshape(ts,ts), cmap = plt.cm.bone_r)
    plt.show()
コード例 #10
0
ファイル: base.py プロジェクト: 0rchard/CUV
    def __init__(self, layer1, layer2, cfg, layernum):
        self.mat = cp.dev_tensor_float_cm([layer1.size, layer2.size])
        cp.fill(self.mat, 0)
        cp.add_rnd_normal(self.mat)
        fact = 1.0
        if layer2.unit_type == UnitType.binary or layer1.unit_type == UnitType.binary:
            # the 0.5 stems from the fact that our upper layer has activation 0.5 on average, not 0, if we use binary hidden units.
            fact = 0.5

        self.mat *= fact / math.sqrt(max(layer1.size, layer2.size))
        self.allocBias(layer1, layer2)
コード例 #11
0
ファイル: base.py プロジェクト: stjordanis/CUV
    def __init__(self, layer1, layer2, cfg, layernum):
        self.mat = cp.dev_tensor_float_cm([layer1.size, layer2.size])
        cp.fill(self.mat, 0)
        cp.add_rnd_normal(self.mat)
        fact = 1.0
        if layer2.unit_type == UnitType.binary or layer1.unit_type == UnitType.binary:
            # the 0.5 stems from the fact that our upper layer has activation 0.5 on average, not 0, if we use binary hidden units.
            fact = 0.5

        self.mat *= fact / math.sqrt(max(layer1.size, layer2.size))
        self.allocBias(layer1, layer2)
コード例 #12
0
ファイル: mlp.py プロジェクト: 0rchard/CUV
  def train(self, mbatch_provider, numberRounds, batchSize, useRPROP = 0):
    self.useRPROP = useRPROP

    for r in xrange(numberRounds):
        numberPictures = 0

        print self.cfg.workdir + ": Epoch ", r+1, "/", numberRounds
        self.preEpochHook(self, r)

        self.NumCorrect = 0
        updateOnlyLast = r < self.cfg.finetune_onlylast

        teachbatch = None

        batch_idx = 0
        output, indices = [], []
        while True:
            try:
                output= []
                output.append(cp.dev_tensor_float_cm([self.cfg.px*self.cfg.py*self.cfg.maps_bottom,batchSize]))
                teachbatch = mbatch_provider.getMiniBatch(batchSize, output[0], return_teacher=True, id=batch_idx)

                numberPictures += teachbatch.shape[1]
                batch_idx += 1

                # forward pass trough all layers
                for i in xrange(self.NumberOfLayers-1):
                   linear = self.cfg.finetune_softmax and i==self.NumberOfLayers-2 # set output layer to linear
                   output.append(self.forward(output[i], self.Weights[i], self.Bias[i], linear=linear))


                self.NumCorrect += self.getCorrect(output[-1], teachbatch)

                ## backward pass
                self.backward(output, teachbatch,indices, batchSize, updateOnlyLast, batch_idx)

            except MiniBatchProviderEmpty: # mbatch provider is empty
                break
            finally:
                map(lambda x:x.dealloc(), output)
                map(lambda x:x and x.dealloc(), indices)
                if teachbatch: teachbatch.dealloc()
                mbatch_provider.forgetOriginalData()

        if not self.cfg.finetune_online_learning:
            self.applyDeltaWeights(self.dWeights,self.dBias,updateOnlyLast, batchSize)
            map(lambda x: cp.fill(x,0),self.dWeights)
            map(lambda x: cp.fill(x,0),self.dBias)

        self.Errorrate.append((numberPictures - self.NumCorrect)/ float(numberPictures) )

        print "Train Correctly Classified: ", self.NumCorrect, "/", numberPictures
        print "Train Error-Rate:                 %2.3f"% (self.Errorrate[-1]*100)
コード例 #13
0
def color_test(ni):
    ts = 128
    src = cp.push(to_cmuc(np.tile(ni, (1, 4))))
    dst = cp.dev_matrix_cmf(ts**2 * 3, src.w)
    cp.fill(dst, 0)
    cp.image_move(dst, src, 128, ts, 4, -10, -4)
    res = cp.pull(dst)
    plt.matshow(res[0:ts**2, 0].reshape(ts, ts), cmap=plt.cm.bone_r)
    plt.matshow(res[ts**2:2 * ts**2, 0].reshape(ts, ts), cmap=plt.cm.bone_r)
    plt.matshow(res[2 * ts**2:3 * ts**2, 0].reshape(ts, ts),
                cmap=plt.cm.bone_r)
    plt.show()
コード例 #14
0
ファイル: mlp.py プロジェクト: 0rchard/CUV
  def delta_outputSoftMax(self, calculated, correct):
    derivative = calculated.copy()
    cp.apply_scalar_functor(derivative,  cp.scalar_functor.EXP)
    sums = cp.dev_tensor_float(calculated.shape[1])
    cp.fill(sums,0)
    cp.reduce_to_row(sums, derivative, cp.reduce_functor.ADD)
    cp.apply_scalar_functor(sums,cp.scalar_functor.ADD,0.1/derivative.shape[0])
    rv = cp.transposed_view(derivative)
    cp.matrix_divide_col(rv,sums)

    cp.apply_binary_functor(derivative,  correct,  cp.binary_functor.AXPBY, -1.,1.)
    sums.dealloc()

    return derivative
コード例 #15
0
ファイル: minibatch_provider.py プロジェクト: stjordanis/CUV
 def update_stats(self, batch):
     vmin = cp.dev_tensor_float(batch.shape[0])
     vmax = cp.dev_tensor_float(batch.shape[0])
     mean = cp.dev_tensor_float(batch.shape[0])
     mean2 = cp.dev_tensor_float(batch.shape[0])
     map(lambda x: cp.fill(x, 0), [mean, mean2])
     cp.reduce_to_col(mean, batch)
     cp.reduce_to_col(mean2, batch, cp.reduce_functor.ADD_SQUARED)
     cp.reduce_to_col(vmin, batch, cp.reduce_functor.MIN)
     cp.reduce_to_col(vmax, batch, cp.reduce_functor.MAX)
     if "N" in self.__dict__:
         self.N += batch.shape[1]
         cp.apply_binary_functor(self.mean, mean, cp.binary_functor.ADD)
         cp.apply_binary_functor(self.mean2, mean2, cp.binary_functor.ADD)
         cp.apply_binary_functor(self.min, vmin, cp.binary_functor.MIN)
         cp.apply_binary_functor(self.max, vmin, cp.binary_functor.MAX)
         mean.dealloc()
         mean2.dealloc()
         vmin.dealloc()
         vmax.dealloc()
     else:
         self.N = batch.shape[1]
         self.mean = mean
         self.mean2 = mean2
         self.min = vmin
         self.max = vmax
コード例 #16
0
ファイル: mlp.py プロジェクト: stjordanis/CUV
    def delta_outputSoftMax(self, calculated, correct):
        derivative = calculated.copy()
        cp.apply_scalar_functor(derivative, cp.scalar_functor.EXP)
        sums = cp.dev_tensor_float(calculated.shape[1])
        cp.fill(sums, 0)
        cp.reduce_to_row(sums, derivative, cp.reduce_functor.ADD)
        cp.apply_scalar_functor(sums, cp.scalar_functor.ADD,
                                0.1 / derivative.shape[0])
        rv = cp.transposed_view(derivative)
        cp.matrix_divide_col(rv, sums)

        cp.apply_binary_functor(derivative, correct, cp.binary_functor.AXPBY,
                                -1., 1.)
        sums.dealloc()

        return derivative
コード例 #17
0
ファイル: minibatch_provider.py プロジェクト: 0rchard/CUV
 def update_stats(self,batch):
     vmin  = cp.dev_tensor_float(batch.shape[0])
     vmax  = cp.dev_tensor_float(batch.shape[0])
     mean  = cp.dev_tensor_float(batch.shape[0])
     mean2 = cp.dev_tensor_float(batch.shape[0])
     map(lambda x: cp.fill(x,0), [mean,mean2])
     cp.reduce_to_col(mean,batch)
     cp.reduce_to_col(mean2,batch,cp.reduce_functor.ADD_SQUARED)
     cp.reduce_to_col(vmin,batch,cp.reduce_functor.MIN)
     cp.reduce_to_col(vmax,batch,cp.reduce_functor.MAX)
     if "N" in self.__dict__:
         self.N += batch.shape[1]
         cp.apply_binary_functor(self.mean, mean, cp.binary_functor.ADD)
         cp.apply_binary_functor(self.mean2,mean2,cp.binary_functor.ADD)
         cp.apply_binary_functor(self.min,vmin,cp.binary_functor.MIN)
         cp.apply_binary_functor(self.max,vmin,cp.binary_functor.MAX)
         mean.dealloc()
         mean2.dealloc()
         vmin.dealloc()
         vmax.dealloc()
     else:
         self.N     = batch.shape[1]
         self.mean  = mean
         self.mean2 = mean2
         self.min   = vmin
         self.max   = vmax
コード例 #18
0
ファイル: mlp.py プロジェクト: stjordanis/CUV
    def backward(self, output, teacher, indices, batchSize, updateOnlyLast,
                 batch_idx):
        deltaWeights = []
        deltaBias = []
        derivative = []
        if self.cfg.finetune_softmax:
            derivative.append(self.delta_outputSoftMax(output[-1], teacher))
        else:
            derivative.append(self.delta_output(output[-1], teacher))

        for i in reversed(xrange(1, self.NumberOfLayers - 1)):
            derivative.append(
                self.delta_hidden(self.Weights[i], derivative[-1], output[i]))
        derivative.reverse()

        #DeltaWeights
        for i in reversed(xrange(self.NumberOfLayers - 1)):
            deltaWeights.append(
                self.calculateDeltaWeights(derivative[i], output[i],
                                           self.Weights[i]))
        deltaWeights.reverse()

        #DeltaBias
        for i in xrange(self.NumberOfLayers - 1):
            self.createFilled(deltaBias, self.Bias[i].size, 1, 0)
            cp.reduce_to_col(deltaBias[-1], derivative[i])

        # Weight Update
        if self.cfg.finetune_online_learning and not self.useRPROP:
            self.applyDeltaWeights(deltaWeights, deltaBias, updateOnlyLast,
                                   batchSize)
        elif self.cfg.finetune_online_learning and self.useRPROP and batch_idx % 16 == 0:
            self.applyDeltaWeights(self.dWeights, self.dBias, updateOnlyLast,
                                   batchSize)
            map(lambda x: cp.fill(x, 0), self.dWeights)
            map(lambda x: cp.fill(x, 0), self.dBias)
        else:
            for i in xrange(self.NumberOfLayers - 1):
                cp.apply_binary_functor(self.dWeights[i], deltaWeights[i],
                                        cp.binary_functor.ADD)
                cp.apply_binary_functor(self.dBias[i], deltaBias[i],
                                        cp.binary_functor.ADD)

        da = lambda x: x.dealloc()
        map(da, deltaWeights)
        map(da, deltaBias)
        map(da, derivative)
コード例 #19
0
    def __init__(self, source_layer, target_layer):
        """Constructor

        @param source_layer reference to previous neuron layer.
        @param target_layer reference to next neuron layer.
        """

        self.source = source_layer
        self.target = target_layer
        dim1 = self.target.activations.shape[0]
        dim2 = self.source.activations.shape[0]
        self.weight = cp.get_filled_matrix(dim1, dim2, 0.0)
        cp.fill_rnd_uniform(self.weight)
        self.weight -= 0.5
        self.weight /= 10.0
        self.bias = cp.dev_tensor_float(dim1)
        cp.fill(self.bias, 0)
コード例 #20
0
 def denominator(self, batchsize):
     acth = cp.dev_tensor_float_cm([self.weight.shape[1], batchsize])
     actv = cp.dev_tensor_float_cm([self.weight.shape[0], batchsize])
     row = cp.dev_tensor_float([batchsize])
     cp.fill(acth, 0.0)
     cp.fill(actv, 0.0)
     cp.fill(row, 0.0)
     n = acth.shape[0]
     nmax = 2**n
     if nmax % batchsize != 0:
         print "Error: 2**n=%d must be dividable by batchsize=%d!" % (
             nmax, batchsize)
         sys.exit(1)
     L = []
     widgets = [
         "Denominator: ",
         Percentage(), ' ',
         Bar(marker=RotatingMarker()), ' ',
         ETA()
     ]
     pbar = ProgressBar(widgets=widgets, maxval=nmax)
     for i in xrange(0, nmax, acth.shape[1]):
         cp.set_binary_sequence(acth, i)
         L.append(self.partialsum(acth, actv, row))
         if (i / acth.shape[1]) % 100 == 0:
             pbar.update(i)
     pbar.finish()
     for m in [actv, acth, row]:
         m.dealloc()
     return math.fsum(L)
コード例 #21
0
ファイル: base.py プロジェクト: stjordanis/CUV
 def allocUpdateMatrix(self):
     self.w_tmp = cp.dev_tensor_float_cm(self.mat.shape)
     cp.fill(self.w_tmp, 0)
     self.blo_tmp = cp.dev_tensor_float(len(self.bias_lo))
     self.bhi_tmp = cp.dev_tensor_float(len(self.bias_hi))
     cp.fill(self.blo_tmp, 0)
     cp.fill(self.bhi_tmp, 0)
コード例 #22
0
ファイル: calc_part_func.py プロジェクト: kgl-prml/CUV
    def numerator(self, mbp, batchsize):
        sid = 0
        actv = cp.dev_tensor_float_cm([self.weight.shape[0], batchsize])
        acth = cp.dev_tensor_float_cm([self.weight.shape[1], batchsize])
        row = cp.dev_tensor_float([batchsize])
        cp.fill(acth, 0.0)
        cp.fill(actv, 0.0)
        cp.fill(row, 0)
        print "Numerator: ",
        L = []
        try:
            while True:
                mbp.getMiniBatch(batchsize, actv, sid)
                mbp.forgetOriginalData()
                sid += 1
                L.append(self.partialsumV(actv, acth, row))
                sys.stdout.write(".")
                sys.stdout.flush()

        except minibatch_provider.MiniBatchProviderEmpty:
            print "done."
            pass
        for m in [actv, acth, row]:
            m.dealloc()
        return math.fsum(L) / (len(L))
コード例 #23
0
    def numerator(self, mbp, batchsize):
        sid = 0
        actv = cp.dev_tensor_float_cm([self.weight.shape[0], batchsize])
        acth = cp.dev_tensor_float_cm([self.weight.shape[1], batchsize])
        row = cp.dev_tensor_float([batchsize])
        cp.fill(acth, 0.0)
        cp.fill(actv, 0.0)
        cp.fill(row, 0)
        print "Numerator: ",
        L = []
        try:
            while True:
                mbp.getMiniBatch(batchsize, actv, sid)
                mbp.forgetOriginalData()
                sid += 1
                L.append(self.partialsumV(actv, acth, row))
                sys.stdout.write('.')
                sys.stdout.flush()

        except minibatch_provider.MiniBatchProviderEmpty:
            print "done."
            pass
        for m in [actv, acth, row]:
            m.dealloc()
        return math.fsum(L) / (len(L))
コード例 #24
0
ファイル: base.py プロジェクト: 0rchard/CUV
 def allocUpdateMatrix(self):
     self.w_tmp = cp.dev_tensor_float_cm(self.mat.shape)
     cp.fill(self.w_tmp, 0)
     self.blo_tmp = cp.dev_tensor_float(len(self.bias_lo))
     self.bhi_tmp = cp.dev_tensor_float(len(self.bias_hi))
     cp.fill(self.blo_tmp, 0)
     cp.fill(self.bhi_tmp, 0)
コード例 #25
0
ファイル: mlp.py プロジェクト: 0rchard/CUV
  def backward(self, output, teacher, indices, batchSize, updateOnlyLast, batch_idx):
    deltaWeights = []
    deltaBias = []
    derivative = []
    if self.cfg.finetune_softmax:
        derivative.append(self.delta_outputSoftMax(output[-1], teacher))
    else:
        derivative.append(self.delta_output(output[-1], teacher))

    for i in reversed(xrange(1,self.NumberOfLayers-1)):
        derivative.append(self.delta_hidden(self.Weights[i], derivative[-1], output[i]))
    derivative.reverse()

    #DeltaWeights                    
    for i in reversed(xrange(self.NumberOfLayers-1)):
        deltaWeights.append(self.calculateDeltaWeights(derivative[i], output[i],self.Weights[i]))
    deltaWeights.reverse()

    #DeltaBias                    
    for i in xrange(self.NumberOfLayers-1):
        self.createFilled(deltaBias, self.Bias[i].size, 1, 0)
        cp.reduce_to_col(deltaBias[-1], derivative[i])

    # Weight Update
    if self.cfg.finetune_online_learning and not self.useRPROP:
        self.applyDeltaWeights(deltaWeights,deltaBias,updateOnlyLast,batchSize)
    elif self.cfg.finetune_online_learning and self.useRPROP and batch_idx%16 == 0:
        self.applyDeltaWeights(self.dWeights,self.dBias,updateOnlyLast, batchSize)
        map(lambda x: cp.fill(x,0),self.dWeights)
        map(lambda x: cp.fill(x,0),self.dBias)
    else:
        for i in xrange(self.NumberOfLayers-1):
            cp.apply_binary_functor(self.dWeights[i], deltaWeights[i], cp.binary_functor.ADD)
            cp.apply_binary_functor(self.dBias[i], deltaBias[i], cp.binary_functor.ADD)

    da = lambda x:x.dealloc()
    map(da, deltaWeights)
    map(da, deltaBias)
    map(da, derivative)
コード例 #26
0
ファイル: calc_part_func.py プロジェクト: kgl-prml/CUV
 def denominator(self, batchsize):
     acth = cp.dev_tensor_float_cm([self.weight.shape[1], batchsize])
     actv = cp.dev_tensor_float_cm([self.weight.shape[0], batchsize])
     row = cp.dev_tensor_float([batchsize])
     cp.fill(acth, 0.0)
     cp.fill(actv, 0.0)
     cp.fill(row, 0.0)
     n = acth.shape[0]
     nmax = 2 ** n
     if nmax % batchsize != 0:
         print "Error: 2**n=%d must be dividable by batchsize=%d!" % (nmax, batchsize)
         sys.exit(1)
     L = []
     widgets = ["Denominator: ", Percentage(), " ", Bar(marker=RotatingMarker()), " ", ETA()]
     pbar = ProgressBar(widgets=widgets, maxval=nmax)
     for i in xrange(0, nmax, acth.shape[1]):
         cp.set_binary_sequence(acth, i)
         L.append(self.partialsum(acth, actv, row))
         if (i / acth.shape[1]) % 100 == 0:
             pbar.update(i)
     pbar.finish()
     for m in [actv, acth, row]:
         m.dealloc()
     return math.fsum(L)
コード例 #27
0
ファイル: base.py プロジェクト: 0rchard/CUV
 def alloc(self):
     self.act = cp.dev_tensor_float_cm([self.size, self.bsize])
     cp.fill(self.act, 0)
     return self
コード例 #28
0
ファイル: mlp.py プロジェクト: stjordanis/CUV
 def createFilled(self, matList, dim1, dim2, value):
     if dim2 == 1:
         matList.append(cp.dev_tensor_float([dim1]))
     else:
         matList.append(cp.dev_tensor_float_cm([dim1, dim2]))
     cp.fill(matList[-1], value)
コード例 #29
0
ファイル: base.py プロジェクト: 0rchard/CUV
 def allocBias(self, layer1, layer2):
     self.bias_lo = cp.dev_tensor_float(layer1.size)
     self.bias_hi = cp.dev_tensor_float(layer2.size)
     cp.fill(self.bias_lo, 0)
     cp.fill(self.bias_hi, 0)
コード例 #30
0
ファイル: mlp.py プロジェクト: stjordanis/CUV
 def createCopyFilled(self, matList, someMat, value):
     if len(someMat.shape) < 2 or someMat.shape[1] == 1:
         matList.append(cp.dev_tensor_float(someMat.shape))
     else:
         matList.append(cp.dev_tensor_float_cm(someMat.shape))
     cp.fill(matList[-1], value)
コード例 #31
0
ファイル: base.py プロジェクト: 0rchard/CUV
 def allocPChain(self):
     self.pchain = cp.dev_tensor_float_cm([self.size, self.bsize])
     cp.fill(self.pchain, 0)
コード例 #32
0
ファイル: example1.py プロジェクト: 0rchard/CUV
import cuv_python as cp
import numpy as np

h = np.zeros((1,256))                                   # create numpy matrix
d = cp.dev_tensor_float(h)                              # constructs by copying numpy_array

h2 = np.zeros((1,256)).copy("F")                        # create numpy matrix
d2 = cp.dev_tensor_float_cm(h2)                         # creates dev_tensor_float_cm (column-major float) object

cp.fill(d,1)                                            # terse form
cp.apply_nullary_functor(d,cp.nullary_functor.FILL,1)   # verbose form

h = d.np                                                # pull and convert to numpy
assert(np.sum(h) == 256)
assert(cp.sum(d) == 256)
d.dealloc()                                             # explicitly deallocate memory (optional)
コード例 #33
0
ファイル: example2.py プロジェクト: 0rchard/CUV
import cuv_python as cp

C = cp.dev_tensor_float_cm([2048,2048])   # column major tensor
A = cp.dev_tensor_float_cm([2048,2048])
B = cp.dev_tensor_float_cm([2048,2048])
cp.fill(C,0)                       # fill with some defined values, not really necessary here
cp.sequence(A)
cp.sequence(B)
cp.apply_binary_functor(B,A,cp.binary_functor.MULT) # elementwise multiplication
B *= A                                              # operators also work (elementwise)
cp.prod(C,A,B,'n','t')                              # matrix multiplication
C = cp.prod(A, B.T)                                 # numpy-like form, allocates new matrix for result
コード例 #34
0
ファイル: base.py プロジェクト: stjordanis/CUV
 def alloc(self):
     self.act = cp.dev_tensor_float_cm([self.size, self.bsize])
     cp.fill(self.act, 0)
     return self
コード例 #35
0
ファイル: base.py プロジェクト: stjordanis/CUV
 def allocPChain(self):
     self.pchain = cp.dev_tensor_float_cm([self.size, self.bsize])
     cp.fill(self.pchain, 0)
コード例 #36
0
ファイル: mlp.py プロジェクト: 0rchard/CUV
 def createCopyFilled(self, matList, someMat, value):
     if len(someMat.shape)<2 or someMat.shape[1] == 1:
         matList.append(cp.dev_tensor_float(someMat.shape))
     else:
         matList.append(cp.dev_tensor_float_cm(someMat.shape))
     cp.fill(matList[-1], value)
コード例 #37
0
ファイル: mlp.py プロジェクト: 0rchard/CUV
 def createFilled(self, matList, dim1, dim2, value):
   if dim2==1:
       matList.append(cp.dev_tensor_float([dim1]))
   else:
       matList.append(cp.dev_tensor_float_cm([dim1, dim2]))
   cp.fill(matList[-1], value)
コード例 #38
0
ファイル: base.py プロジェクト: stjordanis/CUV
 def allocBias(self, layer1, layer2):
     self.bias_lo = cp.dev_tensor_float(layer1.size)
     self.bias_hi = cp.dev_tensor_float(layer2.size)
     cp.fill(self.bias_lo, 0)
     cp.fill(self.bias_hi, 0)
コード例 #39
0
ファイル: mlp.py プロジェクト: stjordanis/CUV
    def train(self, mbatch_provider, numberRounds, batchSize, useRPROP=0):
        self.useRPROP = useRPROP

        for r in xrange(numberRounds):
            numberPictures = 0

            print self.cfg.workdir + ": Epoch ", r + 1, "/", numberRounds
            self.preEpochHook(self, r)

            self.NumCorrect = 0
            updateOnlyLast = r < self.cfg.finetune_onlylast

            teachbatch = None

            batch_idx = 0
            output, indices = [], []
            while True:
                try:
                    output = []
                    output.append(
                        cp.dev_tensor_float_cm([
                            self.cfg.px * self.cfg.py * self.cfg.maps_bottom,
                            batchSize
                        ]))
                    teachbatch = mbatch_provider.getMiniBatch(
                        batchSize,
                        output[0],
                        return_teacher=True,
                        id=batch_idx)

                    numberPictures += teachbatch.shape[1]
                    batch_idx += 1

                    # forward pass trough all layers
                    for i in xrange(self.NumberOfLayers - 1):
                        linear = self.cfg.finetune_softmax and i == self.NumberOfLayers - 2  # set output layer to linear
                        output.append(
                            self.forward(output[i],
                                         self.Weights[i],
                                         self.Bias[i],
                                         linear=linear))

                    self.NumCorrect += self.getCorrect(output[-1], teachbatch)

                    ## backward pass
                    self.backward(output, teachbatch, indices, batchSize,
                                  updateOnlyLast, batch_idx)

                except MiniBatchProviderEmpty:  # mbatch provider is empty
                    break
                finally:
                    map(lambda x: x.dealloc(), output)
                    map(lambda x: x and x.dealloc(), indices)
                    if teachbatch: teachbatch.dealloc()
                    mbatch_provider.forgetOriginalData()

            if not self.cfg.finetune_online_learning:
                self.applyDeltaWeights(self.dWeights, self.dBias,
                                       updateOnlyLast, batchSize)
                map(lambda x: cp.fill(x, 0), self.dWeights)
                map(lambda x: cp.fill(x, 0), self.dBias)

            self.Errorrate.append(
                (numberPictures - self.NumCorrect) / float(numberPictures))

            print "Train Correctly Classified: ", self.NumCorrect, "/", numberPictures
            print "Train Error-Rate:                 %2.3f" % (
                self.Errorrate[-1] * 100)
コード例 #40
0
ファイル: example2.py プロジェクト: stjordanis/CUV
import cuv_python as cp

C = cp.dev_tensor_float_cm([2048, 2048])  # column major tensor
A = cp.dev_tensor_float_cm([2048, 2048])
B = cp.dev_tensor_float_cm([2048, 2048])
cp.fill(C, 0)  # fill with some defined values, not really necessary here
cp.sequence(A)
cp.sequence(B)
cp.apply_binary_functor(B, A,
                        cp.binary_functor.MULT)  # elementwise multiplication
B *= A  # operators also work (elementwise)
cp.prod(C, A, B, 'n', 't')  # matrix multiplication
C = cp.prod(A, B.T)  # numpy-like form, allocates new matrix for result