def _ulogprob_hid(self, Y, num_is_samples=100):
		"""
		Estimates the unnormalized marginal log-probabilities of hidden states.
		
		Use this method only if you know what you are doing.
		"""

		# approximate this SRBM with an RBM
		rbm = RBM(self.X.shape[0], self.Y.shape[0])
		rbm.W = self.W
		rbm.b = self.b
		rbm.c = self.c

		# allocate memory
		Q = np.asmatrix(np.zeros([num_is_samples, Y.shape[1]]))

		for k in range(num_is_samples):
			# draw importance samples
			X = rbm.backward(Y)

			# store importance weights
			Q[k, :] = self._ulogprob(X, Y) - rbm._clogprob_vis_hid(X, Y)

		# average importance weights to get estimates
		return utils.logmeanexp(Q, 0)
Beispiel #2
0
    def _ulogprob_hid(self, Y, num_is_samples=100):
        """
		Estimates the unnormalized marginal log-probabilities of hidden states.
		
		Use this method only if you know what you are doing.
		"""

        # approximate this SRBM with an RBM
        rbm = RBM(self.X.shape[0], self.Y.shape[0])
        rbm.W = self.W
        rbm.b = self.b
        rbm.c = self.c

        # allocate memory
        Q = np.asmatrix(np.zeros([num_is_samples, Y.shape[1]]))

        for k in range(num_is_samples):
            # draw importance samples
            X = rbm.backward(Y)

            # store importance weights
            Q[k, :] = self._ulogprob(X, Y) - rbm._clogprob_vis_hid(X, Y)

        # average importance weights to get estimates
        return utils.logmeanexp(Q, 0)
Beispiel #3
0
 def load_dbn_param(self,dbnpath,softmaxpath):
     weights = cPickle.load(open(dbnpath,'rb'))
     vlen,hlen = 0,0
     self.nlayers = len(weights)
     for i in range(self.nlayers):
         weight = weights[i]
         vlen,hlen = weight.shape[0],weight.shape[1]
         rbm = RBM(vlen,hlen)
         rbm.W = weight
         self.rbm_layers.append(rbm)
         print "RBM layer%d shape:%s" %(i,str(rbm.W.shape))
     self.softmax = SoftMax()
     self.softmax.load_theta(softmaxpath)
     print "softmax parameter: "+str(self.softmax.theta.shape)
        input = rbm3.reconstruct_from_output(input)
        input = rbm2.reconstruct_from_output(input)
        input = rbm1.reconstruct_from_output(input)

        for i in xrange(input.shape[0]):
            first_input = rbm1.input[i]
            last_input = input[i]

            delta = [x-y for(x, y) in zip(first_input, last_input)]
            delta = numpy.array(delta)

            # RBM1 finetune
            W = rbm1.W
            for j in xrange(W.shape[0]):
                W[j] = W[j] + finetuning_lr * delta
            rbm1.W = W
            delta = rbm1.output_from_input(delta)

            # RBM2 finetune
            W = rbm2.W
            for j in xrange(W.shape[0]):
                W[j] = W[j] + finetuning_lr * delta
            rbm2.W = W
            delta = rbm2.output_from_input(delta)

            # RBM3 finetune
            W = rbm3.W
            for j in xrange(W.shape[0]):
                W[j] = W[j] + finetuning_lr * delta
            rbm3.W = W
            delta = rbm3.output_from_input(delta)