def __init__(self, n_visible, n_hidden):
     super(BinaryBinaryRBM, self).__init__()
     # data shape
     self.n_visible = n_visible
     self.n_hidden = n_hidden
     # units
     self.v = units.BinaryUnits(self, name='v')  # visibles
     self.h = units.BinaryUnits(self, name='h')  # hiddens
     # parameters
     self.W = parameters.ProdParameters(self, [self.v, self.h],
                                        theano.shared(
                                            value=self._initial_W(),
                                            name='W'),
                                        name='W')  # weights
     self.bv = parameters.BiasParameters(self,
                                         self.v,
                                         theano.shared(
                                             value=self._initial_bv(),
                                             name='bv'),
                                         name='bv')  # visible bias
     self.bh = parameters.BiasParameters(self,
                                         self.h,
                                         theano.shared(
                                             value=self._initial_bh(),
                                             name='bh'),
                                         name='bh')  # hidden bias
 def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
     super(LearntPrecisionSeparateGaussianBinaryRBM, self).__init__()
     # data shape
     self.n_visible = n_visible
     self.n_hidden_mean = n_hidden_mean
     self.n_hidden_precision = n_hidden_precision
     # units
     self.v = units.LearntPrecisionGaussianUnits(self, name='v')  # visibles
     self.hm = units.BinaryUnits(self, name='hm')  # hiddens for mean
     self.hp = units.BinaryUnits(self, name='hp')  # hiddens for precision
     # parameters
     self.Wm = parameters.ProdParameters(
         self, [self.v, self.hm],
         theano.shared(value=self._initial_W(self.n_visible,
                                             self.n_hidden_mean),
                       name='Wm'),
         name='Wm')  # weights
     self.Wp = parameters.ProdParameters(
         self, [self.v.precision_units, self.hp],
         theano.shared(value=-np.abs(
             self._initial_W(self.n_visible, self.n_hidden_precision)) /
                       1000,
                       name='Wp'),
         name='Wp')  # weights
     self.bvm = parameters.BiasParameters(
         self,
         self.v,
         theano.shared(value=self._initial_bias(self.n_visible),
                       name='bvm'),
         name='bvm')  # visible bias
     self.bvp = parameters.BiasParameters(
         self,
         self.v.precision_units,
         theano.shared(value=self._initial_bias(self.n_visible),
                       name='bvp'),
         name='bvp')  # precision bias
     self.bhm = parameters.BiasParameters(
         self,
         self.hm,
         theano.shared(value=self._initial_bias(self.n_hidden_mean),
                       name='bhm'),
         name='bhm')  # hidden bias for mean
     self.bhp = parameters.BiasParameters(
         self,
         self.hp,
         theano.shared(value=self._initial_bias(self.n_hidden_precision) +
                       1.0,
                       name='bhp'),
         name='bhp')  # hidden bias for precision
 def __init__(self, n_visible, n_hidden):
     super(LearntPrecisionGaussianBinaryRBM, self).__init__()
     # data shape
     self.n_visible = n_visible
     self.n_hidden = n_hidden
     # units
     self.v = units.LearntPrecisionGaussianUnits(self, name='v')  # visibles
     self.h = units.BinaryUnits(self, name='h')  # hiddens
     # parameters
     self.Wm = parameters.ProdParameters(self, [self.v, self.h],
                                         theano.shared(
                                             value=self._initial_W(),
                                             name='Wm'),
                                         name='Wm')  # weights
     self.Wp = parameters.ProdParameters(
         self, [self.v.precision_units, self.h],
         theano.shared(value=-np.abs(self._initial_W()) / 1000, name='Wp'),
         name='Wp')  # weights
     self.bvm = parameters.BiasParameters(
         self,
         self.v,
         theano.shared(value=self._initial_bias(self.n_visible),
                       name='bvm'),
         name='bvm')  # visible bias
     self.bvp = parameters.BiasParameters(
         self,
         self.v.precision_units,
         theano.shared(value=self._initial_bias(self.n_visible),
                       name='bvp'),
         name='bvp')  # precision bias
     self.bh = parameters.BiasParameters(
         self,
         self.h,
         theano.shared(value=self._initial_bias(self.n_hidden), name='bh'),
         name='bh')  # hidden bias
Пример #4
0
    def __init__(self, n_visible, n_hidden, n_factors):
        super(FactoredBinaryBinaryRBM, self).__init__()
        # data shape
        self.n_visible = n_visible
        self.n_hidden = n_hidden
        self.n_factors = n_factors
        # units
        self.v = units.BinaryUnits(self, name='v')  # visibles
        self.h = units.BinaryUnits(self, name='h')  # hiddens
        # parameters
        Wv = theano.shared(value=self._initial_W(self.n_visible,
                                                 self.n_factors),
                           name='Wv')
        Wh = theano.shared(value=self._initial_W(self.n_hidden,
                                                 self.n_factors),
                           name='Wh')
        self.F = factors.Factor(self, name='F')  # factor
        self.Wv = parameters.ProdParameters(self.F, [self.v, self.F],
                                            Wv,
                                            name='Wv')
        self.Wh = parameters.ProdParameters(self.F, [self.h, self.F],
                                            Wh,
                                            name='Wh')
        self.F.initialize()

        self.bv = parameters.BiasParameters(self,
                                            self.v,
                                            theano.shared(
                                                value=self._initial_bv(),
                                                name='bv'),
                                            name='bv')  # visible bias
        self.bh = parameters.BiasParameters(self,
                                            self.h,
                                            theano.shared(
                                                value=self._initial_bh(),
                                                name='bh'),
                                            name='bh')  # hidden bias
Пример #5
0
from morb import base, units, parameters, stats, param_updaters, trainers, monitors

# This example shows how the FIOTRBM model from "Facial Expression Transfer with
# Input-Output Temporal Restricted Boltzmann Machines" by Zeiler et al. (NIPS
# 2011) can be recreated in Morb.

rbm = base.RBM()
rbm.v = units.GaussianUnits(rbm)  # output (visibles)
rbm.h = units.BinaryUnits(rbm)  # latent (hiddens)
rbm.s = units.Units(rbm)  # input (context)
rbm.vp = units.Units(rbm)  # output history (context)

initial_A = ...
initial_B = ...
initial_bv = ...
initial_bh = ...
initial_Wv = ...
initial_Wh = ...
initial_Ws = ...

parameters.FixedBiasParameters(
    rbm, rbm.v.precision_units)  # add precision term to the energy function
rbm.A = parameters.ProdParameters(
    rbm, [rbm.vp, rbm.v],
    initial_A)  # weights from past output to current output
rbm.B = parameters.ProdParameters(
    rbm, [rbm.vp, rbm.h], initial_B)  # weights from past output to hiddens
rbm.bv = parameters.BiasParameters(rbm, rbm.v, initial_bv)  # visible bias
rbm.bh = parameters.BiasParameters(rbm, rbm.h, initial_bh)  # hidden bias
rbm.W = parameters.ThirdOrderFactoredParameters(
    rbm, [rbm.v, rbm.h, rbm.s],
def morbrun1(f1=1, f2=1, v1=1, v2=1, kern=1):

    test_set_x = np.array(eval_print1).flatten(2)
    valid_set_x = np.array(eval_print3).flatten(2)
    train_set_x = np.array(eval_print2).flatten(2)

    train_set_x = train_set_x.reshape(
        np.array(eval_print2).shape[0] * batchm, kern, v1, v2)
    valid_set_x = valid_set_x.reshape(
        np.array(eval_print3).shape[0] * batchm, kern, v1, v2)
    test_set_x = test_set_x.reshape(
        np.array(eval_print1).shape[0] * batchm, kern, v1, v2)

    visible_maps = kern
    hidden_maps = neuron  # 100 # 50
    filter_height = f1  # 7 # 8
    filter_width = f2  # 30 # 8
    mb_size = batchm  # 1 minibatch

    print ">> Constructing RBM..."
    fan_in = visible_maps * filter_height * filter_width
    """
   initial_W = numpy.asarray(
            self.numpy_rng.uniform(
                low = - numpy.sqrt(3./fan_in),
                high = numpy.sqrt(3./fan_in),
                size = self.filter_shape
            ), dtype=theano.config.floatX)
  """
    numpy_rng = np.random.RandomState(123)
    initial_W = np.asarray(numpy_rng.normal(0,
                                            0.5 / np.sqrt(fan_in),
                                            size=(hidden_maps, visible_maps,
                                                  filter_height,
                                                  filter_width)),
                           dtype=theano.config.floatX)
    initial_bv = np.zeros(visible_maps, dtype=theano.config.floatX)
    initial_bh = np.zeros(hidden_maps, dtype=theano.config.floatX)

    shape_info = {
        'hidden_maps': hidden_maps,
        'visible_maps': visible_maps,
        'filter_height': filter_height,
        'filter_width': filter_width,
        'visible_height': v1,  #45+8,
        'visible_width': v2,  #30,
        'mb_size': mb_size
    }

    # rbms.SigmoidBinaryRBM(n_visible, n_hidden)
    rbm = morb.base.RBM()
    rbm.v = units.BinaryUnits(rbm, name='v')  # visibles
    rbm.h = units.BinaryUnits(rbm, name='h')  # hiddens
    rbm.W = parameters.Convolutional2DParameters(rbm, [rbm.v, rbm.h],
                                                 theano.shared(value=initial_W,
                                                               name='W'),
                                                 name='W',
                                                 shape_info=shape_info)
    # one bias per map (so shared across width and height):
    rbm.bv = parameters.SharedBiasParameters(rbm,
                                             rbm.v,
                                             3,
                                             2,
                                             theano.shared(value=initial_bv,
                                                           name='bv'),
                                             name='bv')
    rbm.bh = parameters.SharedBiasParameters(rbm,
                                             rbm.h,
                                             3,
                                             2,
                                             theano.shared(value=initial_bh,
                                                           name='bh'),
                                             name='bh')

    initial_vmap = {rbm.v: T.tensor4('v')}

    # try to calculate weight updates using CD-1 stats
    print ">> Constructing contrastive divergence updaters..."
    s = stats.cd_stats(rbm,
                       initial_vmap,
                       visible_units=[rbm.v],
                       hidden_units=[rbm.h],
                       k=5,
                       mean_field_for_stats=[rbm.v],
                       mean_field_for_gibbs=[rbm.v])

    lr_cd = 0.001
    if indk == -1:
        lr_cd = 0

    umap = {}
    for var in rbm.variables:
        pu = var + lr_cd * updaters.CDUpdater(rbm, var, s)
        umap[var] = pu

    print ">> Compiling functions..."
    t = trainers.MinibatchTrainer(rbm, umap)
    m = monitors.reconstruction_mse(s, rbm.v)

    e_data = rbm.energy(s['data']).mean()
    e_model = rbm.energy(s['model']).mean()

    # train = t.compile_function(initial_vmap, mb_size=32, monitors=[m], name='train', mode=mode)
    train = t.compile_function(initial_vmap,
                               mb_size=mb_size,
                               monitors=[m, e_data, e_model],
                               name='train',
                               mode=mode)

    # TRAINING

    epochs = epoch_cd
    print ">> Training for %d epochs..." % epochs

    for epoch in range(epochs):
        monitoring_data_train = [
            (cost, energy_data, energy_model)
            for cost, energy_data, energy_model in train({rbm.v: train_set_x})
        ]
        mses_train, edata_train_list, emodel_train_list = zip(
            *monitoring_data_train)

    #print rbm.W.var.get_value().shape
    lay1w = rbm.W.var.get_value()
    Wl = theano.shared(lay1w)
    lay1bh = rbm.bh.var.get_value()
    bhl = theano.shared(lay1bh)
    #print Wl.get_value().shape
    return [Wl, bhl]
Пример #7
0
n_visible = data.shape[1]
n_context = data_context.shape[1]
n_hidden = 20

print ">> Constructing RBM..."
numpy_rng = np.random.RandomState(123)
initial_W = np.asarray(np.random.uniform(
    low=-4 * np.sqrt(6. / (n_hidden + n_visible + n_context)),
    high=4 * np.sqrt(6. / (n_hidden + n_visible + n_context)),
    size=(n_visible, n_hidden, n_context)),
                       dtype=theano.config.floatX)
initial_bv = np.zeros(n_visible, dtype=theano.config.floatX)
initial_bh = np.zeros(n_hidden, dtype=theano.config.floatX)

rbm = morb.base.RBM()
rbm.v = units.BinaryUnits(rbm, name='v')  # visibles
rbm.h = units.BinaryUnits(rbm, name='h')  # hiddens
rbm.x = units.Units(rbm, name='x')  # context

rbm.W = parameters.ThirdOrderParameters(rbm, [rbm.v, rbm.h, rbm.x],
                                        theano.shared(value=initial_W,
                                                      name='W'),
                                        name='W')  # weights
rbm.bv = parameters.BiasParameters(rbm,
                                   rbm.v,
                                   theano.shared(value=initial_bv, name='bv'),
                                   name='bv')  # visible bias
rbm.bh = parameters.BiasParameters(rbm,
                                   rbm.h,
                                   theano.shared(value=initial_bh, name='bh'),
                                   name='bh')  # hidden bias