Пример #1
0
    def test_updown_phase_functionality(self):
        N, I, J = 100, 5, 2
        flow = DBNFlow([DBNLayerNode(I), DBNLayerNode(J)])

        v = mdp.numx_rand.randint(0, 2, size=(N, I)).astype('d')
        flow.train(v)
        flow.updown_phase(v, max_iter=10)
        h = flow.execute(v)
        v = flow.inverse(h)
Пример #2
0
    def _test_updown_stability(self):
        """Test that the updown phase does not change optimal greedy weights.
        """

        # number of visible and hidden units
        I, J = 8, 2

        # create DBNLayer node
        node = DBNLayerNode(J, I)
        node._rbm._init_weights()
        # init to random model
        node._rbm.w = mdp.utils.random_rot(max(I, J), dtype='d')[:I, :J]
        node._rbm.bv = mdp.numx_rand.randn(I)
        node._rbm.bh = mdp.numx_rand.randn(J)

        # Gibbs sample to reach the equilibrium distribution
        N = 1e4
        v = mdp.numx_rand.randint(0, 2, (N, I)).astype('d')
        for k in range(100):
            #if k%5==0: spinner()
            p, h = node._rbm._sample_h(v)
            p, v = node._rbm._sample_v(h)

        # greedy learning phase (it shouldn't change the weights by much,
        # since the input is already taken from the equilibrium distr)
        for k in range(100):
            #if k%5==0: spinner()
            node.train(v)
        node.stop_training()

        # save original weights
        real_w = node._rbm.w.copy()
        real_bv = node._rbm.bv.copy()
        real_bh = node._rbm.bh.copy()

        # up-down training
        node._init_updown()
        for k in range(100):
            h, ph, deltah = node._up_pass(v)
            _, _, deltav = node._down_pass(h)
            #print k, deltah, deltav

        assert_array_almost_equal(real_w, node.w_rec, 2)
        assert_array_almost_equal(real_w, node.w_gen, 2)
        assert_array_almost_equal(real_bv, node.bv, 2)
        assert_array_almost_equal(real_bh, node.bh, 2)
Пример #3
0
    def _test_updown_stability(self):
        """Test that the updown phase does not change optimal greedy weights.
        """

        # number of visible and hidden units
        I, J = 8, 2

        # create DBNLayer node
        node = DBNLayerNode(J, I)
        node._rbm._init_weights()
        # init to random model
        node._rbm.w = mdp.utils.random_rot(max(I,J), dtype='d')[:I, :J]
        node._rbm.bv = mdp.numx_rand.randn(I)
        node._rbm.bh = mdp.numx_rand.randn(J)

        # Gibbs sample to reach the equilibrium distribution
        N = 1e4
        v = mdp.numx_rand.randint(0,2,(N,I)).astype('d')
        for k in range(100):
            #if k%5==0: spinner()
            p, h = node._rbm._sample_h(v)
            p, v = node._rbm._sample_v(h)

        # greedy learning phase (it shouldn't change the weights by much,
        # since the input is already taken from the equilibrium distr)
        for k in range(100):
            #if k%5==0: spinner()
            node.train(v)
        node.stop_training()
        
        # save original weights
        real_w = node._rbm.w.copy()
        real_bv = node._rbm.bv.copy()
        real_bh = node._rbm.bh.copy()

        # up-down training
        node._init_updown()
        for k in range(100):
            h, ph, deltah = node._up_pass(v)
            _, _, deltav = node._down_pass(h)
            #print k, deltah, deltav

        assert_array_almost_equal(real_w, node.w_rec, 2)
        assert_array_almost_equal(real_w, node.w_gen, 2)
        assert_array_almost_equal(real_bv, node.bv, 2)
        assert_array_almost_equal(real_bh, node.bh, 2)
Пример #4
0
    def _test_updown_learning(self):
        """Test that DBNLayer is able to learn by up-down passes alone."""

        # number of visible and hidden units
        I, J = 4, 2

        node = DBNLayerNode(J, I)

        # the observations consist of two disjunct patterns that
        # never appear together
        N = 10000
        v = mdp.numx.zeros((N, I))
        for n in range(N):
            r = mdp.numx_rand.random()
            if r > 0.666: v[n, :] = [0, 1, 0, 1]
            elif r > 0.333: v[n, :] = [1, 0, 1, 0]

        # fake to train the node with a very short greedy phase
        node.train(v[:1, :])
        node.stop_training()
        # start up-down phase
        node._init_updown()

        for k in range(1500):
            #if k%5==0: spinner()
            if k > 5:
                mom = 0.9
                eps = 0.2
            else:
                mom = 0.5
                eps = 0.5
            h, ph, deltah = node._up_pass(v, epsilon=eps, momentum=mom)
            rec_v, rec_pv, deltav = node._down_pass(h,
                                                    epsilon=eps,
                                                    momentum=mom)
            train_err = float(((v - rec_v)**2.).sum())
            #print k, train_err, train_err/N, deltah, deltav
            if train_err / N < 0.1: break

        assert train_err / N < 0.1
Пример #5
0
    def _test_updown_learning(self):
        """Test that DBNLayer is able to learn by up-down passes alone."""
        
        # number of visible and hidden units
        I, J = 4, 2
        
        node = DBNLayerNode(J, I)

        # the observations consist of two disjunct patterns that
        # never appear together
        N = 10000
        v = mdp.numx.zeros((N,I))
        for n in range(N):
            r = mdp.numx_rand.random()
            if r>0.666: v[n,:] = [0,1,0,1]
            elif r>0.333: v[n,:] = [1,0,1,0]

        # fake to train the node with a very short greedy phase
        node.train(v[:1,:])
        node.stop_training()
        # start up-down phase
        node._init_updown()

        for k in range(1500):
            #if k%5==0: spinner()
            if k>5:
                mom = 0.9
                eps = 0.2
            else:
                mom = 0.5
                eps = 0.5
            h, ph, deltah = node._up_pass(v, epsilon=eps, momentum=mom)
            rec_v, rec_pv, deltav = node._down_pass(h, epsilon=eps, momentum=mom)
            train_err = float(((v-rec_v)**2.).sum())
            #print k, train_err, train_err/N, deltah, deltav
            if train_err/N<0.1: break

        assert train_err/N<0.1
Пример #6
0
 def test_training_finished_before_updown(self):
     flow = DBNFlow([DBNLayerNode(5), DBNLayerNode(2)])
     self.assertRaises(DBNFlowException, flow.updown_phase, None)