Пример #1
0
    def sample(self, nSamples, nCycles):

        print('----------------------------------')
        print('Sampling dbn...')

        # initialize state of sample dgm
        sampleDgm = [
            np.random.randint(0, 2, (nSamples, self.net[i]))
            for i in range(self.nL)
        ]

        # run Markov chain to generate equilibrium samples in top rbm
        for i in range(nCycles):
            sampleDgm[-1] = neuron(
                np.dot(sampleDgm[-2], self.w[-1]) + self.b[-1])
            sampleDgm[-2] = neuron(
                np.dot(sampleDgm[-1], self.w[-1].transpose()) + self.b[-2])

        # propagate signal down to visibles
        for i in range(self.nHL - 2, -1, -1):
            sampleDgm[i] = neuron(
                np.dot(sampleDgm[i + 1], self.w[i].transpose()) + self.b[i])

        # return equilibrium samples
        return sampleDgm[0]
Пример #2
0
    def initializeHiddens(self, state):

        # run over hidden layers except top one
        for i in range(1, self.nHL):
            # initialize this layer by doubling the input from the layer below and ignoring layer above
            state[i] = neuron(2 * np.dot(state[i - 1], self.w[i - 1]) +
                              self.b[i])

        # initialize top layer
        state[self.nHL] = neuron(
            np.dot(state[self.nHL - 1], self.w[self.nHL - 1]) +
            self.b[self.nHL])
Пример #3
0
    def inferEvenHiddens(self, state):

        # run over all even hidden layers
        for i in range(2, self.nL, 2):
            # do differently if top layer
            if i == self.nL - 1:
                state[i] = neuron(
                    np.dot(state[i - 1], self.w[i - 1]) + self.b[i])
            else:
                state[i] = neuron(
                    np.dot(state[i - 1], self.w[i - 1]) +
                    np.dot(state[i + 1], self.w[i].transpose()) + self.b[i])
Пример #4
0
    def sleepUpdate(self, wakeState, sleepState):

        # run downwards over layers in the network
        for i in range(self.nHL, 1, -1):

            # downwards inference
            sleepState[i - 1] = neuron(
                np.dot(sleepState[i], self.w[i - 1].transpose()) +
                self.b[i - 1])
            # reconstruction
            wakeState[i] = neuron(
                np.dot(sleepState[i - 1], self.wR[i - 1]) + self.bR[i])

            # update recognition weights
            self.wR[i - 1] += (self.lR / self.bS) * np.dot(
                sleepState[i - 1].transpose(), (sleepState[i] - wakeState[i]))

            # update recognition biases
            self.bR[i] += (self.lR / self.bS) * np.sum(
                sleepState[i] - wakeState[i], axis=0)
Пример #5
0
    def wakeUpdate(self, wakeState, sleepState):

        # run upwards over all but the last layer in the network
        for i in range(1, self.nHL):

            # upards inference
            wakeState[i] = neuron(
                np.dot(wakeState[i - 1], self.wR[i - 1]) + self.bR[i])
            # reconstruction
            sleepState[i - 1] = neuron(
                np.dot(wakeState[i], self.w[i - 1].transpose()) +
                self.b[i - 1])

            # update generative weights
            self.w[i - 1] += (self.lR / self.bS) * np.dot(
                (wakeState[i - 1] - sleepState[i - 1]).transpose(),
                wakeState[i])

            # update generative biases
            self.b[i - 1] += (self.lR / self.bS) * np.sum(
                wakeState[i - 1] - sleepState[i - 1], axis=0)
Пример #6
0
    def downPass(self,state):

        # run downwards over layers in the network
        for j in range(self.nL-1):
            i = self.nL-2 - j
            state[i] = neuron( np.dot(state[i+1],self.w[i].transpose()) + self.b[i] )
Пример #7
0
    def upPass(self,state):

        # run upwards over layers in the network
        for i in range(1,self.nL):
            state[i] = neuron( np.dot(state[i-1],self.w[i-1]) + self.bR[i] )
Пример #8
0
 def inferVisibles(self, state):
     state[0] = neuron(np.dot(state[1], self.w[0].transpose()) + self.b[0])
Пример #9
0
 def inferHiddens(self, state):
     state[1] = neuron(np.dot(state[0], self.w[0]) + self.b[1])
Пример #10
0
    def train(self):

        print('----------------------------------')
        print('Fine tuning...')

        # data container for wake phase
        wakeState = [
            np.random.randint(0, 2, (self.bS, self.net[i]))
            for i in range(self.nL)
        ]
        # data container for sleep phase
        sleepState = [
            np.random.randint(0, 2, (self.bS, self.net[i]))
            for i in range(self.nL)
        ]

        for e in range(self.nE):

            if (e + 1) % 10 == 0:
                print('----------------------------------')
                print("epoch ", e + 1, " of ", self.nE)

            for b in range(self.nB):

                # set new batch of data
                self.newBatch()
                wakeState[0] = self.batch
                sleepState[0] = self.batch

                ### WAKE PHASE

                # upwards wake update
                self.wakeUpdate(wakeState, sleepState)

                # CDk on top layer RBM
                # data dependent statistics
                wakeState[-1] = neuron(
                    np.dot(wakeState[-2], self.w[-1]) + self.b[-1])
                # model dependent statistics
                sleepState[-1] = neuron(
                    np.dot(wakeState[-2], self.w[-1]) + self.b[-1])
                for i in range(self.k):
                    sleepState[-2] = neuron(
                        np.dot(sleepState[-1], self.w[-1].transpose()) +
                        self.b[-2])
                    sleepState[-1] = neuron(
                        np.dot(sleepState[-2], self.w[-1]) + self.b[-1])
                # update top layer RBM weights
                self.w[-1] += (self.lR / self.bS) * (
                    np.dot(wakeState[-2].transpose(), wakeState[-1]) -
                    np.dot(sleepState[-2].transpose(), sleepState[-1]))
                # update top layer RBM biases
                self.b[-1] += (self.lR / self.bS) * np.sum(
                    wakeState[-1] - sleepState[-1], axis=0)
                self.b[-2] += (self.lR / self.bS) * np.sum(
                    wakeState[-2] - sleepState[-2], axis=0)

                # downwards sleep update
                sleepState[-2] = neuron(
                    np.dot(sleepState[-1], self.w[-1].transpose()) +
                    self.b[-2])
                self.sleepUpdate(wakeState, sleepState)