コード例 #1
0
 def __init__(self, n_visible, n_hidden):
     super(BinaryBinaryRBM, self).__init__()
     # data shape
     self.n_visible = n_visible
     self.n_hidden = n_hidden
     # units
     self.v = units.BinaryUnits(self, name='v')  # visibles
     self.h = units.BinaryUnits(self, name='h')  # hiddens
     # parameters
     self.W = parameters.ProdParameters(self, [self.v, self.h],
                                        theano.shared(
                                            value=self._initial_W(),
                                            name='W'),
                                        name='W')  # weights
     self.bv = parameters.BiasParameters(self,
                                         self.v,
                                         theano.shared(
                                             value=self._initial_bv(),
                                             name='bv'),
                                         name='bv')  # visible bias
     self.bh = parameters.BiasParameters(self,
                                         self.h,
                                         theano.shared(
                                             value=self._initial_bh(),
                                             name='bh'),
                                         name='bh')  # hidden bias
コード例 #2
0
 def __init__(self, n_visible, n_hidden_mean, n_hidden_precision):
     super(LearntPrecisionSeparateGaussianBinaryRBM, self).__init__()
     # data shape
     self.n_visible = n_visible
     self.n_hidden_mean = n_hidden_mean
     self.n_hidden_precision = n_hidden_precision
     # units
     self.v = units.LearntPrecisionGaussianUnits(self, name='v')  # visibles
     self.hm = units.BinaryUnits(self, name='hm')  # hiddens for mean
     self.hp = units.BinaryUnits(self, name='hp')  # hiddens for precision
     # parameters
     self.Wm = parameters.ProdParameters(
         self, [self.v, self.hm],
         theano.shared(value=self._initial_W(self.n_visible,
                                             self.n_hidden_mean),
                       name='Wm'),
         name='Wm')  # weights
     self.Wp = parameters.ProdParameters(
         self, [self.v.precision_units, self.hp],
         theano.shared(value=-np.abs(
             self._initial_W(self.n_visible, self.n_hidden_precision)) /
                       1000,
                       name='Wp'),
         name='Wp')  # weights
     self.bvm = parameters.BiasParameters(
         self,
         self.v,
         theano.shared(value=self._initial_bias(self.n_visible),
                       name='bvm'),
         name='bvm')  # visible bias
     self.bvp = parameters.BiasParameters(
         self,
         self.v.precision_units,
         theano.shared(value=self._initial_bias(self.n_visible),
                       name='bvp'),
         name='bvp')  # precision bias
     self.bhm = parameters.BiasParameters(
         self,
         self.hm,
         theano.shared(value=self._initial_bias(self.n_hidden_mean),
                       name='bhm'),
         name='bhm')  # hidden bias for mean
     self.bhp = parameters.BiasParameters(
         self,
         self.hp,
         theano.shared(value=self._initial_bias(self.n_hidden_precision) +
                       1.0,
                       name='bhp'),
         name='bhp')  # hidden bias for precision
コード例 #3
0
 def __init__(self, n_visible, n_hidden):
     super(LearntPrecisionGaussianBinaryRBM, self).__init__()
     # data shape
     self.n_visible = n_visible
     self.n_hidden = n_hidden
     # units
     self.v = units.LearntPrecisionGaussianUnits(self, name='v')  # visibles
     self.h = units.BinaryUnits(self, name='h')  # hiddens
     # parameters
     self.Wm = parameters.ProdParameters(self, [self.v, self.h],
                                         theano.shared(
                                             value=self._initial_W(),
                                             name='Wm'),
                                         name='Wm')  # weights
     self.Wp = parameters.ProdParameters(
         self, [self.v.precision_units, self.h],
         theano.shared(value=-np.abs(self._initial_W()) / 1000, name='Wp'),
         name='Wp')  # weights
     self.bvm = parameters.BiasParameters(
         self,
         self.v,
         theano.shared(value=self._initial_bias(self.n_visible),
                       name='bvm'),
         name='bvm')  # visible bias
     self.bvp = parameters.BiasParameters(
         self,
         self.v.precision_units,
         theano.shared(value=self._initial_bias(self.n_visible),
                       name='bvp'),
         name='bvp')  # precision bias
     self.bh = parameters.BiasParameters(
         self,
         self.h,
         theano.shared(value=self._initial_bias(self.n_hidden), name='bh'),
         name='bh')  # hidden bias
コード例 #4
0
def morbrun1(f1=1, f2=1, v1=1, v2=1, kern = 1):
      
  test_set_x = np.array(eval_print1).flatten(order='C')
  valid_set_x = np.array(eval_print3).flatten(order='C')
  train_set_x = np.array(eval_print2).flatten(order='C')

  train_set_x = train_set_x.reshape(np.array(eval_print2).shape[0]*batchm,kern,v1,v2)
  valid_set_x = valid_set_x.reshape(np.array(eval_print3).shape[0]*batchm,kern,v1,v2)   
  test_set_x = test_set_x.reshape(np.array(eval_print1).shape[0]*batchm,kern,v1,v2)

  visible_maps = kern
  hidden_maps = neuron 
  filter_height = f1 
  filter_width = f2
  mb_size = batchm # 1 minibatch
  
  print(">> Constructing RBM...")
  fan_in = visible_maps * filter_height * filter_width

  """
   initial_W = numpy.asarray(
            self.numpy_rng.uniform(
                low = - numpy.sqrt(3./fan_in),
                high = numpy.sqrt(3./fan_in),
                size = self.filter_shape
            ), dtype=theano.config.floatX)
  """
  numpy_rng = np.random.RandomState(123)
  initial_W = np.asarray(
            numpy_rng.normal(
                0, 0.5 / np.sqrt(fan_in),
                size = (hidden_maps, visible_maps, filter_height, filter_width)
            ), dtype=theano.config.floatX)
  initial_bv = np.zeros(visible_maps, dtype = theano.config.floatX)
  initial_bh = np.zeros(hidden_maps, dtype = theano.config.floatX)



  shape_info = {
   'hidden_maps': hidden_maps,
   'visible_maps': visible_maps,
   'filter_height': filter_height,
   'filter_width': filter_width,
   'visible_height': v1, #45+8,
   'visible_width': v2, #30,
   'mb_size': mb_size
  }

  # rbms.SigmoidBinaryRBM(n_visible, n_hidden)
  rbm = base.RBM()
  rbm.v = units.BinaryUnits(rbm, name='v') # visibles
  rbm.h = units.BinaryUnits(rbm, name='h') # hiddens
  rbm.W = parameters.Convolutional2DParameters(rbm, [rbm.v, rbm.h], theano.shared(value=initial_W, name='W'), name='W', shape_info=shape_info)
  # one bias per map (so shared across width and height):
  rbm.bv = parameters.SharedBiasParameters(rbm, rbm.v, 3, 2, theano.shared(value=initial_bv, name='bv'), name='bv')
  rbm.bh = parameters.SharedBiasParameters(rbm, rbm.h, 3, 2, theano.shared(value=initial_bh, name='bh'), name='bh')

  initial_vmap = { rbm.v: T.tensor4('v') }

  # try to calculate weight updates using CD-1 stats
  print(">> Constructing contrastive divergence updaters...")
  s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=5, mean_field_for_stats=[rbm.v], mean_field_for_gibbs=[rbm.v])


  lr_cd = 0.001
  if indk == -1:
      lr_cd = 0
  
  umap = {}
  for var in rbm.variables:
    pu =  var + lr_cd * updaters.CDUpdater(rbm, var, s)
    umap[var] = pu

  print(">> Compiling functions...")
  t = trainers.MinibatchTrainer(rbm, umap)
  m = monitors.reconstruction_mse(s, rbm.v)

  e_data = rbm.energy(s['data']).mean()
  e_model = rbm.energy(s['model']).mean()


  # train = t.compile_function(initial_vmap, mb_size=32, monitors=[m], name='train', mode=mode)
  train = t.compile_function(initial_vmap, mb_size=mb_size, monitors=[m, e_data, e_model], name='train', mode=mode)


  # TRAINING 

  epochs = epoch_cd
  print(">> Training for %d epochs..." % epochs)



  for epoch in range(epochs):
    monitoring_data_train = [(cost, energy_data, energy_model) for cost, energy_data, energy_model in train({ rbm.v: train_set_x })]
    mses_train, edata_train_list, emodel_train_list = zip(*monitoring_data_train)
  
  
  lay1w = rbm.W.var.get_value()
  Wl = theano.shared(lay1w) 
  lay1bh = rbm.bh.var.get_value() 
  bhl = theano.shared(lay1bh)
  return [Wl, bhl]