Exemplo n.º 1
0
Arquivo: dbm.py Projeto: 0rchard/CUV
    def prepare_dbg(self,mbatch_provider,Npoint,nsteps,eval_start,save_callback):
        """ Prepare the data for visualization """
        print "Preparing data for visualization..."
        mbatch_provider.getMiniBatch(self.cfg.batchsize, self.layers[0].act)
        if  eval_start == EvalStartType.trainingset:
            print "Starting Eval from Trainingset"
            pass
        elif eval_start == EvalStartType.vnoise:
            print "Starting Eval from VNoise"
            cp.fill_rnd_uniform(self.layers[0].act)
            cp.apply_scalar_functor(self.layers[0].act,cp.scalar_functor.MULT,0.3)
        elif eval_start == EvalStartType.h1noise:
            print "Starting Eval from H1Noise"
            cp.fill_rnd_uniform(self.layers[1].act)
            cp.apply_scalar_functor(self.layers[1].act,cp.scalar_functor.MULT,0.3)
            self.downPass(1,sample=True)
        self.dbg_datout    = []
        video              = self.cfg.video
        for layer_num,layer in enumerate(self.layers[0:-1]):
            self.upPass(layer_num, sample=False)

        uq = UpdateQ(len(self.layers))
        uq.push([1]) # start with some layer in between
        step = 0
        while uq.minupdates([]) < nsteps:
            layernum = uq.pop(firstlayer=0)
            if video and layernum == 0:
                self.updateLayer(layernum,sample=False)
                self.save_fantasy(step, Npoint,save_callback, self.layers[0].act)
            self.updateLayer(layernum,sample=True)
            step+=1
        while uq.minupdates([]) < nsteps+2:
            layernum = uq.pop(firstlayer=0)
            self.updateLayer(layernum,sample=False)
        self.updateLayer(0,sample=False)
        # pass up again before we save fantasies -- assures that we see bottom-up activities!
        for layer_num,layer in enumerate(self.layers[0:-1]):
            self.upPass(layer_num, sample=False)
        self.save_fantasy(nsteps+1,Npoint,save_callback, self.layers[0].act)
        self.dbg_sampleset = mbatch_provider.sampleset_[:, 0:Npoint].T
        print "Pulling Layer-Activations..."
        self.act = {}
        self.act_info = {}
        for l in xrange(1, self.cfg.num_layers):
            L = self.layers[l]
            if l<self.cfg.num_layers-1:
                self.act_info["%d-subs"%l]   = dict(px=np.sqrt(L.size), py=np.sqrt(L.size))
                self.act["%d-subs"%l]   = L.act.np
        if self.weights[0].mat.shape[0] < 800*6:
            print "Trying to pull W0..."
            try:
                self.W=self.weights[0].mat.np
                if len(self.weights)>1:
                    self.W1=self.weights[1].mat.np
            except MemoryError:
                print("weights too big!")
        print "done."
Exemplo n.º 2
0
Arquivo: mlp.py Projeto: 0rchard/CUV
  def __init__(self, cfg, weights,biases):
    self.cfg=cfg
    self.NumCorrect = 0
    self.Errorrate=[]
    self.testError=[]
    self.NumberOfLayers = cfg.num_layers+1

    self.preEpochHook = lambda mlp,epoch: mlp

    self.Weights = weights

    self.DeltaWeightsOld = []
    self.WeightsLearnRate = []
    self.dWeights = []
    self.dBias = []

    self.Bias = biases
    self.DeltaBiasOld = []
    self.BiasLearnRate = []
    l = 0.001

    self.NumberOfNeuronsPerLayer = []
    for i in xrange(self.NumberOfLayers-2):
        #self.Weights.append(newWeights)
        dim1, dim2 = self.Weights[i].shape
        self.createCopyFilled(self.DeltaWeightsOld,self.Weights[i] , 0)
        self.createCopyFilled(self.WeightsLearnRate,self.Weights[i] , l)
        if not self.cfg.finetune_online_learning or (self.cfg.finetune_online_learning and self.cfg.finetune_rprop):
            self.createCopyFilled(self.dWeights,self.Weights[i] , 0)
            self.createCopyFilled(self.dBias,self.Bias[i] , 0)
        self.createFilled(self.DeltaBiasOld, dim2, 1, 0)
        self.createFilled(self.BiasLearnRate, dim2, 1, l)
        self.NumberOfNeuronsPerLayer.append(dim1)

    # create dense matrix for last layer
    dim1,dim2 = self.Weights[-1].shape[1], self.cfg.num_classes
    if self.cfg.load and self.loadLastLayer(dim1,dim2):
        pass
    else:
        self.Weights.append(cp.dev_tensor_float_cm([dim1,dim2]))
        cp.fill_rnd_uniform(self.Weights[-1])
        #print "Initializing weights with rnd(%2.5f)", 
        cp.apply_scalar_functor(self.Weights[-1],cp.scalar_functor.SUBTRACT, 0.5)
        #cp.apply_scalar_functor(self.Weights[-1],cp.scalar_functor.MULT, 1./math.sqrt(self.Weights[-2].w))
        cp.apply_scalar_functor(self.Weights[-1],cp.scalar_functor.MULT, 1./self.Weights[-2].shape[1])
        self.createFilled(self.Bias, dim2, 1, 0)
    self.createFilled(self.DeltaBiasOld, dim2, 1, 0)
    self.createFilled(self.BiasLearnRate, dim2, 1, l)
    self.createFilled(self.DeltaWeightsOld,dim1,dim2,0)
    self.createFilled(self.WeightsLearnRate,dim1,dim2,l)
    if not self.cfg.finetune_online_learning or (self.cfg.finetune_online_learning and self.cfg.finetune_rprop):
            self.createCopyFilled(self.dWeights,self.Weights[-1] , 0)
            self.createCopyFilled(self.dBias,self.Bias[-1] , 0)
    self.NumberOfNeuronsPerLayer.append(dim1)
    self.NumberOfNeuronsPerLayer.append(dim2)

    self.reconstruction_error = []
Exemplo n.º 3
0
    def __init__(self, source_layer, target_layer):
        """Constructor

        @param source_layer pointer to the previous neuron layer.
        @param target_layer pointer to the next neuron layer.

        """
        self.source=source_layer
        self.target=target_layer
        dim1 = self.target.activations.h
        dim2 = self.source.activations.h
        self.weight = cp.get_filled_matrix(dim1, dim2, 0.0)
        cp.fill_rnd_uniform(self.weight)
        cp.apply_scalar_functor(self.weight, cp.scalar_functor.SUBTRACT, 0.5)
        cp.apply_scalar_functor(self.weight, cp.scalar_functor.DIV, 10)
        self.bias = cp.get_filled_matrix(dim1, 1, 0)
Exemplo n.º 4
0
    def __init__(self, source_layer, target_layer):
        """Constructor

        @param source_layer reference to previous neuron layer.
        @param target_layer reference to next neuron layer.
        """

        self.source = source_layer
        self.target = target_layer
        dim1 = self.target.activations.shape[0]
        dim2 = self.source.activations.shape[0]
        self.weight = cp.get_filled_matrix(dim1, dim2, 0.0)
        cp.fill_rnd_uniform(self.weight)
        self.weight -= 0.5
        self.weight /= 10.0
        self.bias = cp.dev_tensor_float(dim1)
        cp.fill(self.bias, 0)
Exemplo n.º 5
0
Arquivo: base.py Projeto: 0rchard/CUV
    def prepare_dbg(self, mbatch_provider, Npoint, nsteps, eval_start, save_callback):
        """ Prepare the data for visualization """
        print "Preparing data for visualization..."
        mbatch_provider.getMiniBatch(self.cfg.batchsize, self.layers[0].act)
        if  eval_start ==  EvalStartType.trainingset:
            print "Starting Eval from Trainingset"
            pass
        elif eval_start ==  EvalStartType.vnoise:
            print "Starting Eval from VNoise"
            cp.fill_rnd_uniform(self.layers[0].act)
            cp.apply_scalar_functor(self.layers[0].act, cp.scalar_functor.MULT, 0.3)
        elif eval_start ==  EvalStartType.h1noise:
            print "Starting Eval from H1Noise"
            cp.fill_rnd_uniform(self.layers[1].act)
            cp.apply_scalar_functor(self.layers[1].act, cp.scalar_functor.MULT, 0.3)
            self.downPass(1, sample = True)
        self.dbg_datout = []
        video = self.cfg.video
        for layer_num, layer in enumerate(self.layers[0:-1]):
            self.upPass(layer_num, sample = False)
            #if layer_num+2 < len(self.layers):
                #assert(False)

        num_meanfield = 100
        for step in xrange(nsteps+num_meanfield):
            sample = not step>nsteps
            self.upPass(self.cfg.num_layers-2, sample = sample)
            if video:
                for lay_num in reversed(xrange(1, self.cfg.num_layers)):
                      self.downPass(lay_num, sample = False)
                self.save_fantasy(step, Npoint, save_callback, self.layers[0].act)

            self.downPass(self.cfg.num_layers-1, sample = sample)
        for layer_num in reversed(xrange(1, self.cfg.num_layers)):
          self.downPass(layer_num, sample = False)
          layer = self.layers[layer_num-1]
        #for bla in xrange(1):
        #    self.downPass(1, sample = False)
        #    self.upPass(0, sample = False)
        # pass up again before we save fantasies -- assures that we see bottom-up activities!
        for layer_num, layer in enumerate(self.layers[0:-1]):
            self.upPass(layer_num, sample = False)
            #if layer_num+2 < len(self.layers):
                #assert(False)
        self.save_fantasy(nsteps+1, Npoint, save_callback, self.layers[0].act)
        self.dbg_sampleset = mbatch_provider.sampleset_[:, 0:Npoint].T
        print "Pulling Layer-Activations..."
        self.act = {}
        self.act_info = {}
        for l in xrange(1, self.cfg.num_layers):
            L = self.layers[l]
            if l<self.cfg.num_layers-1:
                self.act_info["%d-subs"%l]   = dict(px = np.sqrt(L.size), py = np.sqrt(L.size))
                self.act["%d-subs"%l]   = L.act.np
        if self.weights[0].mat.shape[0] < 800*6:
            print "Trying to pull W0..."
            try:
                self.W = self.weights[0].mat.np
                if len(self.weights)>1:
                    self.W1 = self.weights[1].mat.np
            except MemoryError:
                print("weights too big!")
        print "done."
Exemplo n.º 6
0
    def prepare_dbg(self, mbatch_provider, Npoint, nsteps, eval_start,
                    save_callback):
        """ Prepare the data for visualization """
        print "Preparing data for visualization..."
        mbatch_provider.getMiniBatch(self.cfg.batchsize, self.layers[0].act)
        if eval_start == EvalStartType.trainingset:
            print "Starting Eval from Trainingset"
            pass
        elif eval_start == EvalStartType.vnoise:
            print "Starting Eval from VNoise"
            cp.fill_rnd_uniform(self.layers[0].act)
            cp.apply_scalar_functor(self.layers[0].act, cp.scalar_functor.MULT,
                                    0.3)
        elif eval_start == EvalStartType.h1noise:
            print "Starting Eval from H1Noise"
            cp.fill_rnd_uniform(self.layers[1].act)
            cp.apply_scalar_functor(self.layers[1].act, cp.scalar_functor.MULT,
                                    0.3)
            self.downPass(1, sample=True)
        self.dbg_datout = []
        video = self.cfg.video
        for layer_num, layer in enumerate(self.layers[0:-1]):
            self.upPass(layer_num, sample=False)
            #if layer_num+2 < len(self.layers):
            #assert(False)

        num_meanfield = 100
        for step in xrange(nsteps + num_meanfield):
            sample = not step > nsteps
            self.upPass(self.cfg.num_layers - 2, sample=sample)
            if video:
                for lay_num in reversed(xrange(1, self.cfg.num_layers)):
                    self.downPass(lay_num, sample=False)
                self.save_fantasy(step, Npoint, save_callback,
                                  self.layers[0].act)

            self.downPass(self.cfg.num_layers - 1, sample=sample)
        for layer_num in reversed(xrange(1, self.cfg.num_layers)):
            self.downPass(layer_num, sample=False)
            layer = self.layers[layer_num - 1]
        #for bla in xrange(1):
        #    self.downPass(1, sample = False)
        #    self.upPass(0, sample = False)
        # pass up again before we save fantasies -- assures that we see bottom-up activities!
        for layer_num, layer in enumerate(self.layers[0:-1]):
            self.upPass(layer_num, sample=False)
            #if layer_num+2 < len(self.layers):
            #assert(False)
        self.save_fantasy(nsteps + 1, Npoint, save_callback,
                          self.layers[0].act)
        self.dbg_sampleset = mbatch_provider.sampleset_[:, 0:Npoint].T
        print "Pulling Layer-Activations..."
        self.act = {}
        self.act_info = {}
        for l in xrange(1, self.cfg.num_layers):
            L = self.layers[l]
            if l < self.cfg.num_layers - 1:
                self.act_info["%d-subs" % l] = dict(px=np.sqrt(L.size),
                                                    py=np.sqrt(L.size))
                self.act["%d-subs" % l] = L.act.np
        if self.weights[0].mat.shape[0] < 800 * 6:
            print "Trying to pull W0..."
            try:
                self.W = self.weights[0].mat.np
                if len(self.weights) > 1:
                    self.W1 = self.weights[1].mat.np
            except MemoryError:
                print("weights too big!")
        print "done."
Exemplo n.º 7
0
    def __init__(self, cfg, weights, biases):
        self.cfg = cfg
        self.NumCorrect = 0
        self.Errorrate = []
        self.testError = []
        self.NumberOfLayers = cfg.num_layers + 1

        self.preEpochHook = lambda mlp, epoch: mlp

        self.Weights = weights

        self.DeltaWeightsOld = []
        self.WeightsLearnRate = []
        self.dWeights = []
        self.dBias = []

        self.Bias = biases
        self.DeltaBiasOld = []
        self.BiasLearnRate = []
        l = 0.001

        self.NumberOfNeuronsPerLayer = []
        for i in xrange(self.NumberOfLayers - 2):
            #self.Weights.append(newWeights)
            dim1, dim2 = self.Weights[i].shape
            self.createCopyFilled(self.DeltaWeightsOld, self.Weights[i], 0)
            self.createCopyFilled(self.WeightsLearnRate, self.Weights[i], l)
            if not self.cfg.finetune_online_learning or (
                    self.cfg.finetune_online_learning
                    and self.cfg.finetune_rprop):
                self.createCopyFilled(self.dWeights, self.Weights[i], 0)
                self.createCopyFilled(self.dBias, self.Bias[i], 0)
            self.createFilled(self.DeltaBiasOld, dim2, 1, 0)
            self.createFilled(self.BiasLearnRate, dim2, 1, l)
            self.NumberOfNeuronsPerLayer.append(dim1)

        # create dense matrix for last layer
        dim1, dim2 = self.Weights[-1].shape[1], self.cfg.num_classes
        if self.cfg.load and self.loadLastLayer(dim1, dim2):
            pass
        else:
            self.Weights.append(cp.dev_tensor_float_cm([dim1, dim2]))
            cp.fill_rnd_uniform(self.Weights[-1])
            #print "Initializing weights with rnd(%2.5f)",
            cp.apply_scalar_functor(self.Weights[-1],
                                    cp.scalar_functor.SUBTRACT, 0.5)
            #cp.apply_scalar_functor(self.Weights[-1],cp.scalar_functor.MULT, 1./math.sqrt(self.Weights[-2].w))
            cp.apply_scalar_functor(self.Weights[-1], cp.scalar_functor.MULT,
                                    1. / self.Weights[-2].shape[1])
            self.createFilled(self.Bias, dim2, 1, 0)
        self.createFilled(self.DeltaBiasOld, dim2, 1, 0)
        self.createFilled(self.BiasLearnRate, dim2, 1, l)
        self.createFilled(self.DeltaWeightsOld, dim1, dim2, 0)
        self.createFilled(self.WeightsLearnRate, dim1, dim2, l)
        if not self.cfg.finetune_online_learning or (
                self.cfg.finetune_online_learning and self.cfg.finetune_rprop):
            self.createCopyFilled(self.dWeights, self.Weights[-1], 0)
            self.createCopyFilled(self.dBias, self.Bias[-1], 0)
        self.NumberOfNeuronsPerLayer.append(dim1)
        self.NumberOfNeuronsPerLayer.append(dim2)

        self.reconstruction_error = []
Exemplo n.º 8
0
    def prepare_dbg(self, mbatch_provider, Npoint, nsteps, eval_start,
                    save_callback):
        """ Prepare the data for visualization """
        print "Preparing data for visualization..."
        mbatch_provider.getMiniBatch(self.cfg.batchsize, self.layers[0].act)
        if eval_start == EvalStartType.trainingset:
            print "Starting Eval from Trainingset"
            pass
        elif eval_start == EvalStartType.vnoise:
            print "Starting Eval from VNoise"
            cp.fill_rnd_uniform(self.layers[0].act)
            cp.apply_scalar_functor(self.layers[0].act, cp.scalar_functor.MULT,
                                    0.3)
        elif eval_start == EvalStartType.h1noise:
            print "Starting Eval from H1Noise"
            cp.fill_rnd_uniform(self.layers[1].act)
            cp.apply_scalar_functor(self.layers[1].act, cp.scalar_functor.MULT,
                                    0.3)
            self.downPass(1, sample=True)
        self.dbg_datout = []
        video = self.cfg.video
        for layer_num, layer in enumerate(self.layers[0:-1]):
            self.upPass(layer_num, sample=False)

        uq = UpdateQ(len(self.layers))
        uq.push([1])  # start with some layer in between
        step = 0
        while uq.minupdates([]) < nsteps:
            layernum = uq.pop(firstlayer=0)
            if video and layernum == 0:
                self.updateLayer(layernum, sample=False)
                self.save_fantasy(step, Npoint, save_callback,
                                  self.layers[0].act)
            self.updateLayer(layernum, sample=True)
            step += 1
        while uq.minupdates([]) < nsteps + 2:
            layernum = uq.pop(firstlayer=0)
            self.updateLayer(layernum, sample=False)
        self.updateLayer(0, sample=False)
        # pass up again before we save fantasies -- assures that we see bottom-up activities!
        for layer_num, layer in enumerate(self.layers[0:-1]):
            self.upPass(layer_num, sample=False)
        self.save_fantasy(nsteps + 1, Npoint, save_callback,
                          self.layers[0].act)
        self.dbg_sampleset = mbatch_provider.sampleset_[:, 0:Npoint].T
        print "Pulling Layer-Activations..."
        self.act = {}
        self.act_info = {}
        for l in xrange(1, self.cfg.num_layers):
            L = self.layers[l]
            if l < self.cfg.num_layers - 1:
                self.act_info["%d-subs" % l] = dict(px=np.sqrt(L.size),
                                                    py=np.sqrt(L.size))
                self.act["%d-subs" % l] = L.act.np
        if self.weights[0].mat.shape[0] < 800 * 6:
            print "Trying to pull W0..."
            try:
                self.W = self.weights[0].mat.np
                if len(self.weights) > 1:
                    self.W1 = self.weights[1].mat.np
            except MemoryError:
                print("weights too big!")
        print "done."