示例#1
0
def pretrain_autoencoder(net,
                         x,
                         x_val,
                         rbm_lr=0.001,
                         rbm_use_gauss_visible=False,
                         rbm_use_gauss_hidden=True,
                         rbm_mom=0.5,
                         rbm_weight_decay=0.0000,
                         rbm_lr_decay=0.0,
                         rbm_batch_size=100,
                         rbm_epochs=100,
                         rbm_patience=-1,
                         verbose=1):
    final_arch = net.arch[1:math.ceil(len(net.arch) /
                                      2.0)]  # without input layer
    n_dense_layers = len(final_arch)
    rbm_list = []
    #loop for training the RBMs
    for i in range(n_dense_layers):
        print("\nFine tuning layer number " + str(i))
        if (i == 0):
            x_new = x
            x_val_new = x_val
        else:
            x_new = rbm_list[-1].get_h(x_new)
            x_val_new = rbm_list[-1].get_h(x_val_new)
        rbm = RBM(x_new.shape[1],
                  final_arch[i],
                  use_gaussian_visible_sampling=rbm_use_gauss_visible,
                  use_gaussian_hidden_sampling=rbm_use_gauss_hidden,
                  use_sample_vis_for_learning=False)
        rbm.set_lr(
            rbm_lr,
            rbm_lr_decay,
            momentum=rbm_mom,
            weight_decay=rbm_weight_decay,
        )
        rbm.fit(x_new,
                x_val_new,
                batch_size=rbm_batch_size,
                epochs=rbm_epochs,
                patience=rbm_patience)
        rbm_list.append(rbm)
    rbm_iterator = 0
    rbm_iterator_direction = 1
    #loop to copy the weights from rbm to NN
    for n_layer in range(len(net.layers)):
        if (net.layers[n_layer].ID == "Dense"):
            copy_dense_weights_from_rbm(rbm_list[rbm_iterator],
                                        net.layers[n_layer],
                                        rbm_iterator_direction)
            if (rbm_iterator == len(rbm_list) - 1
                    and rbm_iterator_direction == 1):
                rbm_iterator_direction = -1
            else:
                rbm_iterator += rbm_iterator_direction
    print("Pre training finished!")
    return rbm_list