sampler=sampler, optimizer=optimizer, continuous_output=continuous_out, device=device) if pre_trained: dbn.load_model('DBN.h5') # ----------------------------------------------------------------------------- # Training # ----------------------------------------------------------------------------- if not pre_trained: dbn.pretrain(input_data=data, epochs=pretrain_epochs, batch_size=batch_size, test=test) dbn.finetune(input_data=data, lr=finetune_lr, epochs=finetune_epochs, batch_size=batch_size) dbn.save_model('DBN.h5') # ----------------------------------------------------------------------------- # Plotting # ----------------------------------------------------------------------------- print('#########################################') print('# Generating samples #') print('#########################################') top_RBM = dbn.gen_layers[-1] plt.figure(figsize=(20, 10)) zero = torch.zeros(25, len(top_RBM.vbias)).to(device) images = [np.zeros((5 * 28, 5 * 28))] for i in range(200):
x_train = trainset[:, :-1] y_train = trainset[:, -1:] x_test = testset[:, :-1] y_test = testset[:, -1:] print('x_train.shape:' + str(x_train.shape)) print('y_train.shape:' + str(y_train.shape)) print('x_test.shape:' + str(x_test.shape)) print('y_test.shape' + str(y_test.shape)) # Build model dbn = DBN(hidden_units, input_length, output_length, device=device) # Train model dbn.pretrain(x_train, epoch=epoch_pretrain, batch_size=batch_size) dbn.finetune(x_train, y_train, epoch_finetune, batch_size, loss_function, optimizer(dbn.parameters())) # Make prediction and plot y_predict = dbn.predict(x_test, batch_size) y_real = scaler.inverse_transform(y_test.reshape(-1, 1)).flatten() y_predict = scaler.inverse_transform(y_predict.reshape(-1, 1)).flatten() plt.figure(1) plt.plot(y_real, label='real') plt.plot(y_predict, label='prediction') plt.xlabel('MSE Error: {}'.format(mean_squared_error(y_real, y_predict))) plt.legend() plt.title('Prediction result') plt.show()