Esempio n. 1
0
def test_get_weights():
    #Tests that the RBM, when constructed
    #with nvis and nhid arguments, supports the
    #weights interface

    model = RBM(nvis = 2, nhid = 3)
    W = model.get_weights()
Esempio n. 2
0
def test_get_weights():
    #Tests that the RBM, when constructed
    #with nvis and nhid arguments, supports the
    #weights interface

    model = RBM(nvis=2, nhid=3)
    W = model.get_weights()
Esempio n. 3
0
def load_matlab_dbm(path, num_chains = 1):
    """
    .. todo::

        WRITEME properly

    Loads a two layer DBM stored in the format used by Ruslan Salakhutdinov's
    matlab demo
    """

    d = io.loadmat(path)

    for key in d:
        try:
            d[key] = np.cast[config.floatX](d[key])
        except:
            pass

    visbiases = d['visbiases']
    assert len(visbiases.shape) == 2
    assert visbiases.shape[0] == 1
    visbiases = visbiases[0,:]

    hidbiases = d['hidbiases']
    assert len(hidbiases.shape) == 2
    assert hidbiases.shape[0] == 1
    hidbiases = hidbiases[0,:]

    penbiases = d['penbiases']
    assert len(penbiases.shape) == 2
    assert penbiases.shape[0] == 1
    penbiases = penbiases[0,:]

    vishid = d['vishid']
    hidpen = d['hidpen']

    D ,= visbiases.shape
    N1 ,= hidbiases.shape
    N2 ,= penbiases.shape

    assert vishid.shape == (D,N1)
    assert hidpen.shape == (N1,N2)

    rbms = [ RBM( nvis = D, nhid = N1),
            RBM( nvis = N1, nhid = N2) ]

    dbm = DBM(rbms, negative_chains = num_chains)

    dbm.bias_vis.set_value(visbiases)
    dbm.bias_hid[0].set_value(hidbiases)
    dbm.bias_hid[1].set_value(penbiases)
    dbm.W[0].set_value(vishid)
    dbm.W[1].set_value(hidpen)

    return dbm
Esempio n. 4
0
def test_gibbs_step_for_v():
    # Just tests that gibbs_step_for_v can be called
    # without crashing

    model = RBM(nvis=2, nhid=3)

    theano_rng = make_theano_rng(17, which_method='binomial')

    X = T.matrix()

    Y = model.gibbs_step_for_v(X, theano_rng)
Esempio n. 5
0
def test_gibbs_step_for_v():
    #Just tests that gibbs_step_for_v can be called
    #without crashing (protection against refactoring
    #damage, aren't interpreted languages great?)

    model = RBM(nvis=2, nhid=3)

    theano_rng = RandomStreams(17)

    X = T.matrix()

    Y = model.gibbs_step_for_v(X, theano_rng)
Esempio n. 6
0
def test_gibbs_step_for_v():
    #Just tests that gibbs_step_for_v can be called
    #without crashing (protection against refactoring
    #damage, aren't interpreted languages great?)

    model = RBM(nvis = 2, nhid = 3)

    theano_rng = make_theano_rng(17, which_method='binomial')

    X = T.matrix()

    Y = model.gibbs_step_for_v(X, theano_rng)
Esempio n. 7
0
def get_rbm(structure):
    n_input, n_output = structure
    config = {
        'nvis': n_input,
        'nhid': n_output,
        "irange": 0.05,
        "init_bias_hid": 0.0,
        "init_bias_vis": 0.0,
    }

    return RBM(**config)
Esempio n. 8
0
def test_train_batch():
    # Just tests that train_batch can be called without crashing

    m = 1
    dim = 2
    rng = np.random.RandomState([2014, 03, 17])
    X = rng.randn(m, dim)
    train = DenseDesignMatrix(X=X)

    rbm = RBM(nvis=dim, nhid=3)
    trainer = DefaultTrainingAlgorithm(batch_size=1, batches_per_iter=10)
    trainer.setup(rbm, train)
    trainer.train(train)
Esempio n. 9
0
    def __init__(self):
        """ gets a small batch of data
            sets up a PD-DBM model
        """

        self.tol = 1e-5

        X = np.random.RandomState([1,2,3]).randn(1000,5)

        X -= X.mean()
        X /= X.std()
        m, D = X.shape
        N = 6
        N2 = 7


        s3c = S3C(nvis = D,
                 nhid = N,
                 irange = .1,
                 init_bias_hid = -1.5,
                 init_B = 3.,
                 min_B = 1e-8,
                 max_B = 1000.,
                 init_alpha = 1., min_alpha = 1e-8, max_alpha = 1000.,
                 init_mu = 1., e_step = None,
                 m_step = Grad_M_Step(),
                 min_bias_hid = -1e30, max_bias_hid = 1e30,
                )

        rbm = RBM(nvis = N, nhid = N2, irange = .1, init_bias_vis = -1.5, init_bias_hid = 1.5)

        #don't give the model an inference procedure or learning rate so it won't spend years compiling a learn_func
        self.model = PDDBM(
                dbm = DBM(  use_cd = 1,
                            rbms = [ rbm  ]),
                s3c = s3c
        )

        self.model.make_pseudoparams()

        self.inference_procedure = InferenceProcedure(
                    clip_reflections = True,
                    rho = .5 )
        self.inference_procedure.register_model(self.model)

        self.X = X
        self.N = N
        self.N2 = N2
        self.m = m
Esempio n. 10
0
def test_unspecified_batch_size():

    # Test that failing to specify the batch size results in a
    # NoBatchSizeError

    m = 1
    dim = 2
    rng = np.random.RandomState([2014, 03, 17])
    X = rng.randn(m, dim)
    train = DenseDesignMatrix(X=X)

    rbm = RBM(nvis=dim, nhid=3)
    trainer = DefaultTrainingAlgorithm()
    try:
        trainer.setup(rbm, train)
    except NoBatchSizeError:
        return
    raise AssertionError("Missed the lack of a batch size")
Esempio n. 11
0
    def __init__(self, model = None, X = None, tol = 1e-5,
            init_H = None, init_S = None, init_G = None):
        """ gets a small batch of data
            sets up a PD-DBM model
        """

        self.tol = tol

        if X is None:
            X = np.random.RandomState([1,2,3]).randn(1000,5)
            X -= X.mean()
            X /= X.std()
        m, D = X.shape

        if model is None:
            N = 6
            N2 = 7


            s3c = S3C(nvis = D,
                     nhid = N,
                     irange = .1,
                     init_bias_hid = -1.5,
                     init_B = 3.,
                     min_B = 1e-8,
                     max_B = 1000.,
                     init_alpha = 1., min_alpha = 1e-8, max_alpha = 1000.,
                     init_mu = 1., e_step = None,
                     m_step = Grad_M_Step(),
                     min_bias_hid = -1e30, max_bias_hid = 1e30,
                    )

            rbm = RBM(nvis = N, nhid = N2, irange = .5, init_bias_vis = -1.5, init_bias_hid = 1.5)

            #don't give the model an inference procedure or learning rate so it won't spend years compiling a learn_func
            self.model = PDDBM(
                    dbm = DBM(  use_cd = 1,
                                rbms = [ rbm  ]),
                    s3c = s3c
            )

            self.model.make_pseudoparams()

            self.inference_procedure = InferenceProcedure(
                        clip_reflections = True,
                        rho = .5 )
            self.inference_procedure.register_model(self.model)
        else:
            self.model = model
            self.inference_procedure = model.inference_procedure
            N = model.s3c.nhid
            N2 = model.dbm.rbms[0].nhid

        self.X = X
        self.N = N
        self.N2 = N2
        self.m = m

        if init_H is None:
            self.init_H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,(self.m, self.N)))
            self.init_S = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,(self.m, self.N)))
            self.init_G = np.cast[config.floatX](self.model.rng.uniform(0.,1.,(self.m,self.N2)))
        else:
            assert init_S is not None
            assert init_G is not None
            self.init_H = init_H
            self.init_S = init_S
            self.init_G = init_G
Esempio n. 12
0
for i in xrange(2 ** D):
    all_states.append(int_to_bits(i))

all_states = np.asarray(all_states, dtype = 'float32')

good_states = []

for i in xrange(D):
    good_states.append(int_to_bits(2 ** i) )

good_states = np.asarray(good_states, dtype = 'float32')


all_states_var = T.matrix()

rbm = RBM( nvis = D, nhid = D)

Z = T.exp( - rbm.free_energy_given_v(all_states_var) ).sum()

good_states_var = T.matrix()

good_prob = T.exp( - rbm.free_energy_given_v(good_states_var) ).sum() / Z

good_prob_func = function([all_states_var, good_states_var], good_prob)

def run_rbm( pos_weight = 1., neg_weight = 1., bias_hid = -1.,
             bias_vis = inverse_sigmoid_numpy( 1. / float(D) ) ):
    rbm.bias_vis.set_value( np.ones( (D,), dtype='float32') *  bias_vis)
    rbm.bias_hid.set_value( np.ones( (D,), dtype='float32') * bias_hid)
    rbm.transformer._W.set_value( np.identity(D, dtype='float32') * \
            (pos_weight + neg_weight) - np.ones( (D,D), dtype = 'float32') \
Esempio n. 13
0
def test_get_input_space():

    model = RBM(nvis = 2, nhid = 3)

    space = model.get_input_space()
Esempio n. 14
0
def test_get_weights():

    model = RBM(nvis = 2, nhid = 3)

    W = model.get_weights()
Esempio n. 15
0
def test_get_input_space():
    #Tests that the RBM supports
    #the Space interface

    model = RBM(nvis=2, nhid=3)
    space = model.get_input_space()
Esempio n. 16
0
for i in xrange(2**D):
    all_states.append(int_to_bits(i))

all_states = np.asarray(all_states, dtype='float32')

good_states = []

for i in xrange(D):
    good_states.append(int_to_bits(2**i))

good_states = np.asarray(good_states, dtype='float32')

all_states_var = T.matrix()

rbm = RBM(nvis=D, nhid=D)

Z = T.exp(-rbm.free_energy_given_v(all_states_var)).sum()

good_states_var = T.matrix()

good_prob = T.exp(-rbm.free_energy_given_v(good_states_var)).sum() / Z

good_prob_func = function([all_states_var, good_states_var], good_prob)


def run_rbm(pos_weight=1.,
            neg_weight=1.,
            bias_hid=-1.,
            bias_vis=inverse_sigmoid_numpy(1. / float(D))):
    rbm.bias_vis.set_value(np.ones((D, ), dtype='float32') * bias_vis)
Esempio n. 17
0
    def __init__(self):
        """ gets a small batch of data
            sets up a PD-DBM model with its DBM weights set to 0
            so that it represents the same distribution as an S3C
            model
            Makes the S3C model it matches
            (Note that their learning rules don't match since the
            complete partition function of the S3C model is tractable
            but the PD-DBM has to approximate the h partition function
            via sampling)
        """

        self.tol = 1e-5

        X = np.random.RandomState([1,2,3]).randn(1000,5)

        X -= X.mean()
        X /= X.std()
        m, D = X.shape
        N = 6
        N2 = 7

        self.X = X
        self.N = N
        self.N2 = N2
        self.m = m
        self.D = D


        s3c_for_pddbm = self.make_s3c()
        self.s3c = self.make_s3c()

        self.s3c.W.set_value(s3c_for_pddbm.W.get_value())

        rbm = RBM(nvis = N, nhid = N2, irange = .0, init_bias_vis = -1.5, init_bias_hid = 6.)

        #don't give the model an inference procedure or learning rate so it won't spend years compiling a learn_func
        self.model = PDDBM(
                dbm = DBM(  use_cd = 1,
                            rbms = [ rbm  ]),
                s3c = s3c_for_pddbm
        )

        self.model.make_pseudoparams()

        self.inference_procedure = InferenceProcedure(
                    schedule = [ ['s',.1],   ['h',.1],   ['g',0, 0.2],   ['s', 0.2], ['h',0.2],
                                ['s',0.3], ['g',0,.3],   ['h',0.3], ['s',0.4], ['h',0.4],
                                ['g',0,.4],   ['s',0.4],  ['h',0.4],
                                ['g',0,.5],   ['s',0.5], ['h', 0.5], ['s',0.1],
                                ['h',0.5] ],
                    clip_reflections = True,
                    rho = .5 )
        self.inference_procedure.register_model(self.model)

        self.e_step = make_e_step_from_inference_procedure(self.inference_procedure)

        self.e_step.register_model(self.s3c)

        self.s3c.make_pseudoparams()

        #check that all the parameters match
        assert np.abs(self.s3c.W.get_value() - self.model.s3c.W.get_value()).max() == 0.0
        assert np.abs(self.s3c.bias_hid.get_value() - self.model.s3c.bias_hid.get_value()).max() == 0.0
        assert np.abs(self.s3c.alpha.get_value() - self.model.s3c.alpha.get_value()).max() == 0.0
        assert np.abs(self.s3c.mu.get_value() - self.model.s3c.mu.get_value()).max() == 0.0
        assert np.abs(self.s3c.B_driver.get_value() - self.model.s3c.B_driver.get_value()).max() == 0.0

        #check that the assumptions making these tests valid are met
        assert np.abs(self.model.dbm.W[0].get_value()).max() == 0.0
Esempio n. 18
0
 def get_model(self):
     self.model = RBM(nvis=self.vis, nhid=self.hid, irange=.05)
     return self.model
Esempio n. 19
0
def test_get_input_space():
    #Tests that the RBM supports
    #the Space interface

    model = RBM(nvis = 2, nhid = 3)
    space = model.get_input_space()