示例#1
0
    v1 * v2,
    h1 * h2,
    data=train_data,
    visible_activation_function=act.Identity(),
    hidden_activation_function=act.Sigmoid(),
    cost_function=cost.SquaredError(),
    initial_weights=0.01,
    initial_visible_bias=0.0,
    initial_hidden_bias=-2.0,
    # Set initially the units to be inactive, speeds up learning a little bit
    initial_visible_offsets=0.0,
    initial_hidden_offsets=0.02,
    dtype=numx.float64)

# Initialized gradient descent trainer
trainer = aeTrainer.GDTrainer(ae)

# Train model
print 'Training'
print 'Epoch\tRE train\t\tRE test\t\t\tSparsness train\t\tSparsness test '
for epoch in range(0, max_epochs + 1, 1):

    # Shuffle data
    train_data = numx.random.permutation(train_data)

    # Print reconstruction errors and sparseness for Training and test data
    print epoch, ' \t\t', numx.mean(ae.reconstruction_error(train_data)), \
        ' \t', numx.mean(ae.reconstruction_error(test_data)),\
        ' \t', numx.mean(ae.encode(train_data)), \
        ' \t', numx.mean(ae.encode(test_data))
    for b in range(0, train_data.shape[0], batch_size):
示例#2
0
 def perform_training(self,
                      ae,
                      data,
                      epsilon,
                      momentum,
                      update_visible_offsets,
                      update_hidden_offsets,
                      corruptor,
                      reg_L1Norm,
                      reg_L2Norm,
                      reg_sparseness,
                      desired_sparseness,
                      reg_contractive,
                      reg_slowness,
                      data_next,
                      restrict_gradient,
                      restriction_norm,
                      num_epochs=1000):
     numx.random.seed(42)
     tr = TRAINER.GDTrainer(ae)
     tr.train(data=data,
              num_epochs=num_epochs,
              epsilon=epsilon,
              momentum=momentum,
              update_visible_offsets=update_visible_offsets,
              update_hidden_offsets=update_hidden_offsets,
              corruptor=corruptor,
              reg_L1Norm=reg_L1Norm,
              reg_L2Norm=reg_L2Norm,
              reg_sparseness=reg_sparseness,
              desired_sparseness=desired_sparseness,
              reg_contractive=reg_contractive,
              reg_slowness=reg_slowness,
              data_next=data_next,
              restrict_gradient=restrict_gradient,
              restriction_norm=restriction_norm)
     rec1 = numx.mean(
         ae.energy(x=data,
                   contractive_penalty=reg_contractive,
                   sparse_penalty=reg_sparseness,
                   desired_sparseness=desired_sparseness,
                   x_next=data_next,
                   slowness_penalty=reg_slowness))
     tr.train(data=data,
              num_epochs=10,
              epsilon=epsilon,
              momentum=momentum,
              update_visible_offsets=update_visible_offsets,
              update_hidden_offsets=update_hidden_offsets,
              corruptor=corruptor,
              reg_L1Norm=reg_L1Norm,
              reg_L2Norm=reg_L2Norm,
              reg_sparseness=reg_sparseness,
              desired_sparseness=desired_sparseness,
              reg_contractive=reg_contractive,
              reg_slowness=reg_slowness,
              data_next=data_next,
              restrict_gradient=restrict_gradient,
              restriction_norm=restriction_norm)
     rec2 = numx.mean(
         ae.energy(x=data,
                   contractive_penalty=reg_contractive,
                   sparse_penalty=reg_sparseness,
                   desired_sparseness=desired_sparseness,
                   x_next=data_next,
                   slowness_penalty=reg_slowness))
     assert numx.all(rec1 - rec2 >= 0.0)