Пример #1
0
    def train(self, X):
        n_samples, n_features = X.shape
        
        for i in range(self.numepochs):
            kk = np.random.permutation(m)
            err = 0

            for l in gen_even_slices(n_samples, self.batchsize):
                batch = x[l, :]
                v1 = batch # n_samples X n_visible
                h1 = sigmrnd(np.dot(v1, self.W.T) + self.c)  # n_samples X n_hidden
                v2 = sigmrnd(np.dot(h1, self.W) + self.b) # n_samples X n_visible
                h2 = sigm(np.dot(v2, self.W.T) + self.c) # n_samples X n_hidden

                c1 = np.dot(h1.T, v1) # n_hidden X n_visible
                c2 = np.dot(h2.T, v2) # n_hidden X n_visible

                self.vW = self.momentum * self.vW + self.alpha * (c1 - c2) / self.batchsize  # n_hidden X n_visible
                self.vb = self.momentum * self.vb + self.alpha * np.sum(v1 - v2, axis=0) / self.batchsize # n_visible X 1
                self.vc = self.momentum * self.vc + self.alpha * np.sum(h1 - h2, axis=0) / self.batchsize # n_hidden X 1

                self.W = self.W + self.vW # n_hidden X n_visible
                self.b = self.b + self.vb # n_visible X 1
                self.c = self.c + self.vc # n_visible X 1

                err = err + np.sum(np.power(v1 - v2), 2) / self.batchsize
            
            print 'epoch '+ str(i) + '/' + str(self.numepochs) + '. Average reconstruction error is: ' + str(err / numbatches)
Пример #2
0
 def get_cost_monitor(self):
     if not hasattr(self, 'cost_monitor'):
         bit_i_idx = theano.shared(value=0, name='bit_i_idx')
         xi = T.round(self.input)
         fe_xi = self.free_energy(xi)
         xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
         fe_xi_flip = self.free_energy(xi_flip)
         cost = T.mean(self.n_vis * T.log(sigm(fe_xi_flip - fe_xi)))
         self.cost_monitor = theano.function(
             inputs=[self.input],
             outputs=cost,
             updates={bit_i_idx: (bit_i_idx + 1) % self.n_vis},
             name='monitoring_function')
     return self.cost_monitor
Пример #3
0
    def predict(self, input):
        L = np.shape(input)[0]
        az = np.zeros((L, self.Nhidden))
        ar = np.zeros((L, self.Nhidden))
        ahhat = np.zeros((L, self.Nhidden))
        ah = np.zeros((L, self.Nhidden))

        a1 = tanh(np.dot(input, self.w1) + self.b1)
        x = np.concatenate((np.zeros((self.Nhidden)), a1[1, :]))
        az[1, :] = sigm(np.dot(x, self.wz) + self.bz)
        ar[1, :] = sigm(np.dot(x, self.wr) + self.br)
        ahhat[1, :] = tanh(np.dot(x, self.wh) + self.bh)
        ah[1, :] = az[1, :] * ahhat[1, :]

        for i in range(1, L):
            x = np.concatenate((ah[i - 1, :], a1[i, :]))
            az[i, :] = sigm(np.dot(x, self.wz) + self.bz)
            ar[i, :] = sigm(np.dot(x, self.wr) + self.br)
            x = np.concatenate((ar[i, :] * ah[i - 1, :], a1[i, :]))
            ahhat[i, :] = tanh(np.dot(x, self.wh) + self.bh)
            ah[i, :] = (1 - az[i, :]) * ah[i - 1, :] + az[i, :] * ahhat[i, :]

        a2 = tanh(np.dot(ah, self.w2) + self.b2)
        return [a1, az, ar, ahhat, ah, a2]
Пример #4
0
 def gibbs_step(v, binomial=False):
     mean_h = sigm(T.dot(v, W) + bh)
     h = rng.binomial(size=mean_h.shape, n=1, p=mean_h, dtype=dtype)
     mean_v = sigm(T.dot(h, W.T) + bv)
     v = rng.binomial(size=mean_v.shape, n=1, p=mean_v, dtype=theano.config.floatX) if binomial else mean_v
     return mean_v, v
Пример #5
0
 def hidden_to_visible(self, h_sample):
     rw = T.tensordot(self.R, self.W, axes=[2, 0])
     return sigm(
         T.tensordot(h_sample, rw, axes=[[1, 2], [1, 2]]) + self.vbias)
Пример #6
0
 def hidden_to_visible(self, h_sample):
     wx_b = T.dot(h_sample, self.W.T) + self.vbias
     return sigm(wx_b)
Пример #7
0
 def visible_to_hidden(self, v_sample):
     wx_b = T.dot(v_sample, self.W) + self.hbias
     return binomial(sigm(wx_b))
Пример #8
0
 def rbmup(self, X):
     X = sigm(np.dot(X, self.W.T) + self.c)
     return X
Пример #9
0
 def rbmdown(self, X):
     X = sigm(np.dot(X, self.W) + self.b)
     return X