示例#1
0
文件: base.py 项目: 0rchard/CUV
    def save(self, layer, iter, mbatch_provider):

        mbatch_provider.forgetOriginalData()
        mbatch_provider.getMiniBatch(self.cfg.batchsize, self.layers[layer].act)
        self.upPass(layer, sample = False);
        self.downPass(layer+1, sample = False);
        for l in reversed(xrange(1, layer+1)):
            self.downPass(l, sample = False)


        timestr = ""
        if "last_time_stamp" in self.__dict__:
            ts   = time.clock()
            td = ts - self.last_time_stamp
            if td > 0 and iter != self.last_time_stamp_iter:
                timestr = " %2.4es/img ; %2.4e img/s"% ( td / (self.cfg.batchsize*(iter - self.last_time_stamp_iter)), (self.cfg.batchsize*(iter - self.last_time_stamp_iter))/td)
        err = self.getErr(layer, mbatch_provider.sampleset)
        n   = cp.norm2(self.weights[layer].mat)
        print "Iter: ", iter, "Err: %02.06f |W| = %2.2f"%(err, n), timestr
        if self.cfg.save_every != 0 and iter % self.cfg.save_every ==  0 and iter>0 :
            self.saveLayer(layer, self.cfg.workdir, "-%010d"%iter)
            self.saveOptions({"iter":iter}, layer)
            self.saveOptions({"reconstruction":err}, layer, "-%010d"%iter)

        self.err.append(err)
        self.last_time_stamp = time.clock()
        self.last_time_stamp_iter = iter
示例#2
0
文件: base.py 项目: stjordanis/CUV
    def save(self, layer, iter, mbatch_provider):

        mbatch_provider.forgetOriginalData()
        mbatch_provider.getMiniBatch(self.cfg.batchsize,
                                     self.layers[layer].act)
        self.upPass(layer, sample=False)
        self.downPass(layer + 1, sample=False)
        for l in reversed(xrange(1, layer + 1)):
            self.downPass(l, sample=False)

        timestr = ""
        if "last_time_stamp" in self.__dict__:
            ts = time.clock()
            td = ts - self.last_time_stamp
            if td > 0 and iter != self.last_time_stamp_iter:
                timestr = " %2.4es/img ; %2.4e img/s" % (
                    td / (self.cfg.batchsize *
                          (iter - self.last_time_stamp_iter)),
                    (self.cfg.batchsize *
                     (iter - self.last_time_stamp_iter)) / td)
        err = self.getErr(layer, mbatch_provider.sampleset)
        n = cp.norm2(self.weights[layer].mat)
        print "Iter: ", iter, "Err: %02.06f |W| = %2.2f" % (err, n), timestr
        if self.cfg.save_every != 0 and iter % self.cfg.save_every == 0 and iter > 0:
            self.saveLayer(layer, self.cfg.workdir, "-%010d" % iter)
            self.saveOptions({"iter": iter}, layer)
            self.saveOptions({"reconstruction": err}, layer, "-%010d" % iter)

        self.err.append(err)
        self.last_time_stamp = time.clock()
        self.last_time_stamp_iter = iter
示例#3
0
文件: base.py 项目: 0rchard/CUV
 def getErr(self, layernum, orig_data):
     cp.apply_binary_functor(self.layers[layernum].act, orig_data, cp.binary_functor.SUBTRACT)
     sqerr = cp.norm2(self.layers[layernum].act)**2
     return sqerr/((self.layers[layernum].size)*self.cfg.batchsize)
示例#4
0
文件: dbm.py 项目: 0rchard/CUV
    def trainDBM(self, mbatch_provider, itermax):
        try:
            """ Train all layers of a RBM-Stack as a DBM using minibatches provided by mbatch_provider for itermax minibatches """
            ### if pcd get starting point for fantasies 
            if self.cfg.cd_type == CDType.pcd:
                self.resetPChain(mbatch_provider)
                mbatch_provider.forgetOriginalData()

            ### temporary matrix to save update
            for weightlayer in self.weights:
                weightlayer.allocUpdateMatrix()

            ### iterate over updates
            for iter in xrange(1,itermax):
                ### new learnrate if schedule
                learnrate=self.getLearnrate(iter,itermax)/100
                sys.stdout.write('.')
                sys.stdout.flush()
                ### positive phase
                mbatch_provider.getMiniBatch(self.cfg.batchsize,self.layers[0].act)
                for layernum in xrange(len(self.weights)):
                    self.upPass(layernum,sample=False)
                uq = UpdateQ(len(self.layers))
                uq.push([1]) # must start w/ 0-th layer
                while uq.minupdates([0]) < self.cfg.dbm_minupdates:
                    layernum = uq.pop(firstlayer=1)
                    self.updateLayer(layernum,sample=False)
                for layernum in xrange(len(self.weights)):
                    self.weights[layernum].updateGradientPos(self.layers[layernum],self.layers[layernum+1])

                ### output stuff
                if iter != 0 and (iter%100) == 0:
                    self.downPass(1,sample=False)
                    err=self.getErr(0,mbatch_provider.sampleset)
                    print "Iter: ",iter, "Err: %02.06f"%err, "|W|: %02.06f"%cp.norm2(self.weights[0].mat)
                    print self.cfg.workdir,
                    if self.cfg.save_every!=0 and iter % self.cfg.save_every == 0 and iter>0 :
                        for layernum in xrange(len(self.weights)):
                            self.saveLayer(layernum,self.cfg.workdir,"-%010d"%iter)

                ### negative phase
                ### replace layer nodes with pcd-chain or do initial uppass 
                if self.cfg.cd_type == CDType.cdn:
                    for layernum in xrange(len(self.weights)):
                        self.upPass(layernum,sample=True)
                else:
                    for layer in self.layers:
                        layer.switchToPChain()

                uq = UpdateQ(len(self.layers))
                uq.push([1])
                while uq.minupdates() < self.cfg.dbm_minupdates:
                    layernum = uq.pop(firstlayer=0)
                    self.updateLayer(layernum,sample=True)

                ### update gradients
                for layernum in xrange(len(self.weights)):
                    self.weights[layernum].updateGradientNeg(self.layers[layernum],self.layers[layernum+1],self.cfg.batchsize)
                ### update weights and biases
                for weightlayer in self.weights:
                    weightlayer.updateStep(learnrate,self.cfg.cost)

                ### put original layer back in place
                if self.cfg.cd_type == CDType.pcd:
                    for layer in self.layers:
                        layer.switchToOrg()
                mbatch_provider.forgetOriginalData()

        finally:
            for weightlayer in self.weights:
                if "w_tmp" in weightlayer.__dict__:
                    weightlayer.deallocUpdateMatrix()
示例#5
0
文件: base.py 项目: stjordanis/CUV
 def getErr(self, layernum, orig_data):
     cp.apply_binary_functor(self.layers[layernum].act, orig_data,
                             cp.binary_functor.SUBTRACT)
     sqerr = cp.norm2(self.layers[layernum].act)**2
     return sqerr / ((self.layers[layernum].size) * self.cfg.batchsize)
示例#6
0
文件: dbm.py 项目: stjordanis/CUV
    def trainDBM(self, mbatch_provider, itermax):
        try:
            """ Train all layers of a RBM-Stack as a DBM using minibatches provided by mbatch_provider for itermax minibatches """
            ### if pcd get starting point for fantasies
            if self.cfg.cd_type == CDType.pcd:
                self.resetPChain(mbatch_provider)
                mbatch_provider.forgetOriginalData()

            ### temporary matrix to save update
            for weightlayer in self.weights:
                weightlayer.allocUpdateMatrix()

            ### iterate over updates
            for iter in xrange(1, itermax):
                ### new learnrate if schedule
                learnrate = self.getLearnrate(iter, itermax) / 100
                sys.stdout.write('.')
                sys.stdout.flush()
                ### positive phase
                mbatch_provider.getMiniBatch(self.cfg.batchsize,
                                             self.layers[0].act)
                for layernum in xrange(len(self.weights)):
                    self.upPass(layernum, sample=False)
                uq = UpdateQ(len(self.layers))
                uq.push([1])  # must start w/ 0-th layer
                while uq.minupdates([0]) < self.cfg.dbm_minupdates:
                    layernum = uq.pop(firstlayer=1)
                    self.updateLayer(layernum, sample=False)
                for layernum in xrange(len(self.weights)):
                    self.weights[layernum].updateGradientPos(
                        self.layers[layernum], self.layers[layernum + 1])

                ### output stuff
                if iter != 0 and (iter % 100) == 0:
                    self.downPass(1, sample=False)
                    err = self.getErr(0, mbatch_provider.sampleset)
                    print "Iter: ", iter, "Err: %02.06f" % err, "|W|: %02.06f" % cp.norm2(
                        self.weights[0].mat)
                    print self.cfg.workdir,
                    if self.cfg.save_every != 0 and iter % self.cfg.save_every == 0 and iter > 0:
                        for layernum in xrange(len(self.weights)):
                            self.saveLayer(layernum, self.cfg.workdir,
                                           "-%010d" % iter)

                ### negative phase
                ### replace layer nodes with pcd-chain or do initial uppass
                if self.cfg.cd_type == CDType.cdn:
                    for layernum in xrange(len(self.weights)):
                        self.upPass(layernum, sample=True)
                else:
                    for layer in self.layers:
                        layer.switchToPChain()

                uq = UpdateQ(len(self.layers))
                uq.push([1])
                while uq.minupdates() < self.cfg.dbm_minupdates:
                    layernum = uq.pop(firstlayer=0)
                    self.updateLayer(layernum, sample=True)

                ### update gradients
                for layernum in xrange(len(self.weights)):
                    self.weights[layernum].updateGradientNeg(
                        self.layers[layernum], self.layers[layernum + 1],
                        self.cfg.batchsize)
                ### update weights and biases
                for weightlayer in self.weights:
                    weightlayer.updateStep(learnrate, self.cfg.cost)

                ### put original layer back in place
                if self.cfg.cd_type == CDType.pcd:
                    for layer in self.layers:
                        layer.switchToOrg()
                mbatch_provider.forgetOriginalData()

        finally:
            for weightlayer in self.weights:
                if "w_tmp" in weightlayer.__dict__:
                    weightlayer.deallocUpdateMatrix()