Esempio n. 1
0
    def get_base_rate(self, base_rate_type="uniform"):
        base_rate, annealable_params = RBM.get_base_rate(self, base_rate_type)
        #annealable_params.append(self.beta)  # Seems to work better without annealing self.beta (see unit tests)

        if base_rate_type == "uniform":
            def compute_lnZ(self):
                # Since biases and weights are all 0, there are $2^input_size$ different
                #  visible neuron's states having the following energy
                #  $\sum_{z=1}^H \sum_{h \in \{0,1\}^z} \exp(-\beta z \ln(2))$
                r = T.exp((1-self.beta) * T.log(2))  # Ratio of a geometric serie
                lnZ = T.log((r - r**(self.hidden_size+1)) / (1-r))
                return (self.input_size * T.log(2) +  # ln(2^input_size)
                        lnZ)  # $ln( \sum_{z=1}^H \sum_{h \in \{0,1\}^z} \exp(-\beta z \ln(2)) )$

        elif base_rate_type == "c":
            def compute_lnZ(self):
                # Since the hidden biases (but not the visible ones) and the weights are all 0
                r = T.exp((1-self.beta) * T.log(2))  # Ratio of a geometric serie
                lnZ = T.log((r - r**(self.hidden_size+1)) / (1-r))
                return (lnZ +  # $ln( \sum_{z=1}^H \sum_{h \in \{0,1\}^z} \exp(-\beta z \ln(2)) )$
                        T.sum(T.nnet.softplus(self.c)))

        elif base_rate_type == "b":
            raise NotImplementedError()

        import types
        base_rate.compute_lnZ = types.MethodType(compute_lnZ, base_rate)

        return base_rate, annealable_params
Esempio n. 2
0
    def get_base_rate(self, base_rate_type="uniform"):
        base_rate, annealable_params = RBM.get_base_rate(self, base_rate_type)
        #annealable_params.append(self.beta)  # Seems to work better without annealing self.beta (see unit tests)

        if base_rate_type == "uniform":
            def compute_lnZ(self):
                # Since biases and weights are all 0, there are $2^input_size$ different
                #  visible neuron's states having the following energy
                #  $\sum_{z=1}^H \sum_{h \in \{0,1\}^z} \exp(-\beta z \ln(2))$
                r = T.exp((1-self.beta) * T.log(2))  # Ratio of a geometric serie
                lnZ = T.log(r / (1-r))  # Convergence of the geometric serie
                return (self.input_size * T.log(2) +  # ln(2^input_size)
                        lnZ)  # $ln( \sum_{z=1}^H \sum_{h \in \{0,1\}^z} \exp(-\beta z \ln(2)) )$

        elif base_rate_type == "c":
            def compute_lnZ(self):
                # Since the hidden biases (but not the visible ones) and the weights are all 0
                r = T.exp((1-self.beta) * T.log(2))  # Ratio of a geometric serie
                lnZ = T.log(r / (1-r))  # Convergence of the geometric serie
                return (lnZ +  # $ln( \sum_{z=1}^H \sum_{h \in \{0,1\}^z} \exp(-\beta z \ln(2)) )$
                        T.sum(T.nnet.softplus(self.c)))

        elif base_rate_type == "b":
            raise NotImplementedError()

        import types
        base_rate.compute_lnZ = types.MethodType(compute_lnZ, base_rate)

        return base_rate, annealable_params
Esempio n. 3
0
class Test_RBM(unittest.TestCase):
    def setUp(self):
        self.input_size = 4
        self.hidden_size = 3
        self.batch_size = 100

        rng = np.random.RandomState(42)
        self.W = rng.randn(self.hidden_size,
                           self.input_size).astype(config.floatX)
        self.b = rng.randn(self.hidden_size).astype(config.floatX)
        self.c = rng.randn(self.input_size).astype(config.floatX)

        self.model = RBM(input_size=self.input_size,
                         hidden_size=self.hidden_size)

        self.model.W.set_value(self.W)
        self.model.b.set_value(self.b)
        self.model.c.set_value(self.c)

    def test_free_energy(self):
        v = T.matrix('v')
        h = T.matrix('h')
        logsumexp_E = theano.function([v, h], -logsumexp(-self.model.E(v, h)))

        v1 = np.random.rand(1, self.input_size).astype(config.floatX)
        H = cartesian([(0, 1)] * self.hidden_size, dtype=config.floatX)
        Fv = logsumexp_E(v1, H)  # Marginalization over $\bh$

        v = T.matrix('v')
        free_energy = theano.function([v], self.model.free_energy(v))
        assert_array_almost_equal(free_energy(v1), [Fv])

        v2 = np.tile(v1, (self.batch_size, 1))
        assert_array_almost_equal(free_energy(v2), [Fv] * self.batch_size)

    def test_marginalize_over_v(self):
        v = T.matrix('v')
        h = T.matrix('h')
        E = theano.function([v, h], -logsumexp(-self.model.E(v, h)))

        h1 = np.random.rand(1, self.hidden_size).astype(config.floatX)
        V = cartesian([(0, 1)] * self.input_size, dtype=config.floatX)
        expected_energy = E(V, h1)

        h = T.matrix('h')
        marginalize_over_v = theano.function([h],
                                             self.model.marginalize_over_v(h))
        assert_array_almost_equal(marginalize_over_v(h1), [expected_energy])

        h2 = np.tile(h1, (self.batch_size, 1))
        assert_array_almost_equal(marginalize_over_v(h2),
                                  [expected_energy] * self.batch_size)

    def test_compute_lnZ(self):
        v = T.matrix('v')
        h = T.matrix('h')
        lnZ = theano.function([v, h], logsumexp(-self.model.E(v, h)))

        V = cartesian([(0, 1)] * self.input_size, dtype=config.floatX)
        H = cartesian([(0, 1)] * self.hidden_size, dtype=config.floatX)

        lnZ_using_free_energy = theano.function(
            [v], logsumexp(-self.model.free_energy(v)))
        assert_equal(lnZ_using_free_energy(V), lnZ(V, H))

        lnZ_using_marginalize_over_v = theano.function(
            [h], logsumexp(-self.model.marginalize_over_v(h)))
        assert_almost_equal(lnZ_using_marginalize_over_v(H),
                            lnZ(V, H),
                            decimal=6)

    def test_base_rate(self):
        # All binary combinaisons for V and H.
        V = cartesian([(0, 1)] * self.input_size, dtype=config.floatX)
        H = cartesian([(0, 1)] * self.hidden_size, dtype=config.floatX)

        base_rates = []
        # Add the uniform base rate, i.e. all parameters of the model are set to 0.
        base_rates.append(self.model.get_base_rate())
        # Add the base rate where visible biases are the ones from the model.
        base_rates.append(self.model.get_base_rate('c'))
        # Add the base rate where hidden biases are the ones from the model.
        base_rates.append(self.model.get_base_rate('b'))  # Not implemented

        for base_rate, anneable_params in base_rates:
            base_rate_lnZ = base_rate.compute_lnZ().eval().astype(
                config.floatX)

            brute_force_lnZ = logsumexp(-base_rate.E(V, H)).eval()
            assert_almost_equal(brute_force_lnZ.astype(config.floatX),
                                base_rate_lnZ,
                                decimal=6)

            theano_lnZ = logsumexp(-base_rate.free_energy(V), axis=0).eval()
            assert_almost_equal(theano_lnZ.astype(config.floatX),
                                base_rate_lnZ,
                                decimal=6)

            theano_lnZ = logsumexp(-base_rate.marginalize_over_v(H)).eval()
            assert_almost_equal(theano_lnZ.astype(config.floatX),
                                base_rate_lnZ,
                                decimal=6)

    @npt.dec.slow
    def test_binomial_from_uniform_cpu(self):
        #Test using numpy
        rng = np.random.RandomState(42)
        probs = rng.rand(10)

        seed = 1337
        nb_samples = 1000000
        rng = np.random.RandomState(seed)
        success1 = np.zeros(len(probs))
        for i in range(nb_samples):
            success1 += rng.binomial(n=1, p=probs)

        rng = np.random.RandomState(seed)
        success2 = np.zeros(len(probs))
        for i in range(nb_samples):
            success2 += (rng.rand(len(probs)) < probs).astype('int')

        success1 = success1 / nb_samples
        success2 = success2 / nb_samples

        assert_array_almost_equal(success1, success2)

        #Test using Theano's default RandomStreams
        theano_rng = RandomStreams(1337)
        rng_bin = theano_rng.binomial(size=probs.shape,
                                      n=1,
                                      p=probs,
                                      dtype=theano.config.floatX)
        success1 = np.zeros(len(probs))
        for i in range(nb_samples):
            success1 += rng_bin.eval()

        theano_rng = RandomStreams(1337)
        rng_bin = theano_rng.uniform(size=probs.shape,
                                     dtype=theano.config.floatX) < probs
        success2 = np.zeros(len(probs))
        for i in range(nb_samples):
            success2 += rng_bin.eval()

        assert_array_almost_equal(success1 / nb_samples, success2 / nb_samples)

        #Test using Theano's sandbox MRG RandomStreams
        theano_rng = MRG_RandomStreams(1337)
        success1 = theano_rng.binomial(size=probs.shape,
                                       n=1,
                                       p=probs,
                                       dtype=theano.config.floatX)

        theano_rng = MRG_RandomStreams(1337)
        success2 = theano_rng.uniform(size=probs.shape,
                                      dtype=theano.config.floatX) < probs

        assert_array_equal(success1.eval(), success2.eval())

    def test_gradients_auto_vs_manual(self):
        rng = np.random.RandomState(42)

        batch_size = 5
        input_size = 10

        rbm = RBM(input_size=input_size,
                  hidden_size=32,
                  CDk=1,
                  rng=np.random.RandomState(42))

        W = (rng.rand(rbm.hidden_size, rbm.input_size) > 0.5).astype(
            theano.config.floatX)
        rbm.W = theano.shared(value=W.astype(theano.config.floatX),
                              name='b',
                              borrow=True)

        b = (rng.rand(rbm.hidden_size) > 0.5).astype(theano.config.floatX)
        rbm.b = theano.shared(value=b.astype(theano.config.floatX),
                              name='b',
                              borrow=True)

        c = (rng.rand(rbm.input_size) > 0.5).astype(theano.config.floatX)
        rbm.c = theano.shared(value=c.astype(theano.config.floatX),
                              name='c',
                              borrow=True)

        params = [rbm.W, rbm.b, rbm.c]
        chain_start = T.matrix('start')
        chain_end = T.matrix('end')

        chain_start_value = (rng.rand(batch_size, input_size) > 0.5).astype(
            theano.config.floatX)
        chain_end_value = (rng.rand(batch_size, input_size) > 0.5).astype(
            theano.config.floatX)
        chain_start.tag.test_value = chain_start_value
        chain_end.tag.test_value = chain_end_value

        ### Computing gradients using automatic differentation ###
        cost = T.mean(rbm.free_energy(chain_start)) - T.mean(
            rbm.free_energy(chain_end))
        gparams_auto = T.grad(cost, params, consider_constant=[chain_end])

        ### Computing gradients manually ###
        h = rbm.sample_h_given_v(chain_start, return_probs=True)
        _h = rbm.sample_h_given_v(chain_end, return_probs=True)

        grad_W = (T.dot(chain_end.T, _h) -
                  T.dot(chain_start.T, h)).T / batch_size
        grad_b = T.mean(_h - h, 0)
        grad_c = T.mean(chain_end - chain_start, 0)

        gparams_manual = [grad_W, grad_b, grad_c]
        grad_W.name, grad_b.name, grad_c.name = "grad_W", "grad_b", "grad_c"

        for gparam_auto, gparam_manual in zip(gparams_auto, gparams_manual):
            param1 = gparam_auto.eval({
                chain_start: chain_start_value,
                chain_end: chain_end_value
            })
            param2 = gparam_manual.eval({
                chain_start: chain_start_value,
                chain_end: chain_end_value
            })
            assert_array_almost_equal(param1,
                                      param2,
                                      err_msg=gparam_manual.name)
Esempio n. 4
0
class Test_RBM(unittest.TestCase):
    def setUp(self):
        self.input_size = 4
        self.hidden_size = 3
        self.batch_size = 100

        rng = np.random.RandomState(42)
        self.W = rng.randn(self.hidden_size, self.input_size).astype(config.floatX)
        self.b = rng.randn(self.hidden_size).astype(config.floatX)
        self.c = rng.randn(self.input_size).astype(config.floatX)

        self.model = RBM(input_size=self.input_size,
                         hidden_size=self.hidden_size)

        self.model.W.set_value(self.W)
        self.model.b.set_value(self.b)
        self.model.c.set_value(self.c)

    def test_free_energy(self):
        v = T.matrix('v')
        h = T.matrix('h')
        logsumexp_E = theano.function([v, h], -logsumexp(-self.model.E(v, h)))

        v1 = np.random.rand(1, self.input_size).astype(config.floatX)
        H = cartesian([(0, 1)] * self.hidden_size, dtype=config.floatX)
        Fv = logsumexp_E(v1, H)  # Marginalization over $\bh$

        v = T.matrix('v')
        free_energy = theano.function([v], self.model.free_energy(v))
        assert_array_almost_equal(free_energy(v1), [Fv])

        v2 = np.tile(v1, (self.batch_size, 1))
        assert_array_almost_equal(free_energy(v2), [Fv]*self.batch_size)

    def test_marginalize_over_v(self):
        v = T.matrix('v')
        h = T.matrix('h')
        E = theano.function([v, h], -logsumexp(-self.model.E(v, h)))

        h1 = np.random.rand(1, self.hidden_size).astype(config.floatX)
        V = cartesian([(0, 1)] * self.input_size, dtype=config.floatX)
        expected_energy = E(V, h1)

        h = T.matrix('h')
        marginalize_over_v = theano.function([h], self.model.marginalize_over_v(h))
        assert_array_almost_equal(marginalize_over_v(h1), [expected_energy])

        h2 = np.tile(h1, (self.batch_size, 1))
        assert_array_almost_equal(marginalize_over_v(h2), [expected_energy]*self.batch_size)

    def test_compute_lnZ(self):
        v = T.matrix('v')
        h = T.matrix('h')
        lnZ = theano.function([v, h], logsumexp(-self.model.E(v, h)))

        V = cartesian([(0, 1)] * self.input_size, dtype=config.floatX)
        H = cartesian([(0, 1)] * self.hidden_size, dtype=config.floatX)

        lnZ_using_free_energy = theano.function([v], logsumexp(-self.model.free_energy(v)))
        assert_equal(lnZ_using_free_energy(V), lnZ(V, H))

        lnZ_using_marginalize_over_v = theano.function([h], logsumexp(-self.model.marginalize_over_v(h)))
        assert_almost_equal(lnZ_using_marginalize_over_v(H), lnZ(V, H), decimal=6)

    def test_base_rate(self):
        # All binary combinaisons for V and H.
        V = cartesian([(0, 1)] * self.input_size, dtype=config.floatX)
        H = cartesian([(0, 1)] * self.hidden_size, dtype=config.floatX)

        base_rates = []
        # Add the uniform base rate, i.e. all parameters of the model are set to 0.
        base_rates.append(self.model.get_base_rate())
        # Add the base rate where visible biases are the ones from the model.
        base_rates.append(self.model.get_base_rate('c'))
        # Add the base rate where hidden biases are the ones from the model.
        base_rates.append(self.model.get_base_rate('b'))  # Not implemented

        for base_rate, anneable_params in base_rates:
            base_rate_lnZ = base_rate.compute_lnZ().eval().astype(config.floatX)

            brute_force_lnZ = logsumexp(-base_rate.E(V, H)).eval()
            assert_almost_equal(brute_force_lnZ.astype(config.floatX), base_rate_lnZ, decimal=6)

            theano_lnZ = logsumexp(-base_rate.free_energy(V), axis=0).eval()
            assert_almost_equal(theano_lnZ.astype(config.floatX), base_rate_lnZ, decimal=6)

            theano_lnZ = logsumexp(-base_rate.marginalize_over_v(H)).eval()
            assert_almost_equal(theano_lnZ.astype(config.floatX), base_rate_lnZ, decimal=6)

    @npt.dec.slow
    def test_binomial_from_uniform_cpu(self):
        #Test using numpy
        rng = np.random.RandomState(42)
        probs = rng.rand(10)

        seed = 1337
        nb_samples = 1000000
        rng = np.random.RandomState(seed)
        success1 = np.zeros(len(probs))
        for i in range(nb_samples):
            success1 += rng.binomial(n=1, p=probs)

        rng = np.random.RandomState(seed)
        success2 = np.zeros(len(probs))
        for i in range(nb_samples):
            success2 += (rng.rand(len(probs)) < probs).astype('int')

        success1 = success1 / nb_samples
        success2 = success2 / nb_samples

        assert_array_almost_equal(success1, success2)

        #Test using Theano's default RandomStreams
        theano_rng = RandomStreams(1337)
        rng_bin = theano_rng.binomial(size=probs.shape, n=1, p=probs, dtype=theano.config.floatX)
        success1 = np.zeros(len(probs))
        for i in range(nb_samples):
            success1 += rng_bin.eval()

        theano_rng = RandomStreams(1337)
        rng_bin = theano_rng.uniform(size=probs.shape, dtype=theano.config.floatX) < probs
        success2 = np.zeros(len(probs))
        for i in range(nb_samples):
            success2 += rng_bin.eval()

        assert_array_almost_equal(success1/nb_samples, success2/nb_samples)

        #Test using Theano's sandbox MRG RandomStreams
        theano_rng = MRG_RandomStreams(1337)
        success1 = theano_rng.binomial(size=probs.shape, n=1, p=probs, dtype=theano.config.floatX)

        theano_rng = MRG_RandomStreams(1337)
        success2 = theano_rng.uniform(size=probs.shape, dtype=theano.config.floatX) < probs

        assert_array_equal(success1.eval(), success2.eval())

    def test_gradients_auto_vs_manual(self):
        rng = np.random.RandomState(42)

        batch_size = 5
        input_size = 10

        rbm = RBM(input_size=input_size,
                  hidden_size=32,
                  CDk=1,
                  rng=np.random.RandomState(42))

        W = (rng.rand(rbm.hidden_size, rbm.input_size) > 0.5).astype(theano.config.floatX)
        rbm.W = theano.shared(value=W.astype(theano.config.floatX), name='b', borrow=True)

        b = (rng.rand(rbm.hidden_size) > 0.5).astype(theano.config.floatX)
        rbm.b = theano.shared(value=b.astype(theano.config.floatX), name='b', borrow=True)

        c = (rng.rand(rbm.input_size) > 0.5).astype(theano.config.floatX)
        rbm.c = theano.shared(value=c.astype(theano.config.floatX), name='c', borrow=True)

        params = [rbm.W, rbm.b, rbm.c]
        chain_start = T.matrix('start')
        chain_end = T.matrix('end')

        chain_start_value = (rng.rand(batch_size, input_size) > 0.5).astype(theano.config.floatX)
        chain_end_value = (rng.rand(batch_size, input_size) > 0.5).astype(theano.config.floatX)
        chain_start.tag.test_value = chain_start_value
        chain_end.tag.test_value = chain_end_value

        ### Computing gradients using automatic differentation ###
        cost = T.mean(rbm.free_energy(chain_start)) - T.mean(rbm.free_energy(chain_end))
        gparams_auto = T.grad(cost, params, consider_constant=[chain_end])

        ### Computing gradients manually ###
        h = rbm.sample_h_given_v(chain_start, return_probs=True)
        _h = rbm.sample_h_given_v(chain_end, return_probs=True)

        grad_W = (T.dot(chain_end.T, _h) - T.dot(chain_start.T, h)).T / batch_size
        grad_b = T.mean(_h - h, 0)
        grad_c = T.mean(chain_end - chain_start, 0)

        gparams_manual = [grad_W, grad_b, grad_c]
        grad_W.name, grad_b.name, grad_c.name = "grad_W", "grad_b", "grad_c"

        for gparam_auto, gparam_manual in zip(gparams_auto, gparams_manual):
            param1 = gparam_auto.eval({chain_start: chain_start_value, chain_end: chain_end_value})
            param2 = gparam_manual.eval({chain_start: chain_start_value, chain_end: chain_end_value})
            assert_array_almost_equal(param1, param2, err_msg=gparam_manual.name)