# Convert the history to a Pandas dataframe
hist = pd.DataFrame(hist.history)
hist.index.name = "Epochs"

# Plot training evolution
tit = "Validation loss: {:.3f} - Training loss: {:.3f}".format(*hist.min())
hist.plot(grid=True, title=tit)

# %%
# Test the trained neural network against the test dataset
x_test = dt[slc_tst][:, :, :, np.newaxis]
loss = ae.evaluate(x_test, x_test)
print("Test dataset loss: {:.3f}".format(loss))

global_loss = ae.evaluate(dt[:, :, :, slc], dt[:, :, :, slc])
print("Entire dataset loss: {:.3f}".format(global_loss))

# %%
# Comparing the input and output of the autoencoder neural network
data_index = 634

# Slice the data
dt_in = dt[data_index, :, :, slc]
# Get the neural network output
dt_out = ae.predict(dt_in[np.newaxis])
# Plot
alg = "Convolutional autoencoder with dropout"
plot_red_comp(dt_in, dt_out[0], 0, lt_sz, global_loss, alg)

# %%
示例#2
0
# Flatten
trn_flt = trn.reshape((trn.shape[0], np.prod(trn.shape[1:])))
vld_flt = vld.reshape((vld.shape[0], np.prod(vld.shape[1:])))

# Get the number of dimensions for 95%, 98% and 99% of the variance
# explained using PCA, to compare with the Kernel PCA

# Full PCA
pca = PCA()
pca.fit(trn_flt)
cum_exp = np.cumsum(pca.explained_variance_ratio_)
# Number of dimensions to compare
n_95 = np.where(cum_exp > 0.95)[0][0] + 1
n_98 = np.where(cum_exp > 0.98)[0][0] + 1
n_99 = np.where(cum_exp > 0.99)[0][0] + 1

# Fast ICA
shape = dt[slc_vld].shape
_, vld_95rec, mse_95 = fast_ICA_fit(n_95, trn_flt, vld_flt, shape)
_, vld_98rec, mse_98 = fast_ICA_fit(n_98, trn_flt, vld_flt, shape)
_, vld_99rec, mse_99 = fast_ICA_fit(n_99, trn_flt, vld_flt, shape)

alg = "Fast ICA"
i = 50
# 95%
plot_red_comp(vld[i], vld_95rec[i], n_95, mse_95, alg)
# 98%
plot_red_comp(vld[i], vld_98rec[i], n_98, mse_98, alg)
# 99%
plot_red_comp(vld[i], vld_99rec[i], n_99, mse_99, alg)
示例#3
0
hist = pd.DataFrame(hist.history)
hist.index.name = "Epochs"

# Plot training evolution
tit = "Validation loss: {:.3f} - Training loss: {:.3f}".format(*hist.min())
hist.plot(grid=True, title=tit)

# %%
# Test the trained neural network against the test dataset
x_test = x_data[slc_tst]
y_test = y_data[slc_tst]
loss = lstm.evaluate(x_test, y_test)
print("Test dataset loss: {:.3f}".format(loss))

global_loss = lstm.evaluate(x_data, y_data)
print("Entire dataset loss: {:.3f}".format(global_loss))

# %%
# Comparing the input and output of the LSTM neural network
data_index = 634

# Slice the data
dt_in = x_data[data_index]
# Get the neural network output
dt_out = lstm.predict(dt_in[np.newaxis])
# Plot
alg = "Convolutional LSTM"
plot_red_comp(y_data[data_index], dt_out[0], 0, lt_sz, global_loss, alg)

# %%
min_loss = hist[['val_loss', 'loss']].min()
# Plot training evolution
tit = "Validation loss: {:.3f} - Training loss: {:.3f}".format(*min_loss)
hist.plot(grid=True, title=tit)

# %%
# Test the trained neural network against the test dataset
x_test = x_data[slc_tst]
y_test = y_data[slc_tst]
loss = ae.evaluate(x_test, [x_test, y_test])
print("Test dataset loss - Same: {:.3f} - Next: {:.3f}".format(*loss[1:]))

global_loss = ae.evaluate(x_data, [x_data, y_data])
print("Entire dataset loss - Same: {:.3f} - Next: {:.3f}".format(
    *global_loss[1:]))

# %%
# Comparing the input and output of the autoencoder neural network
data_index = 634

# Slice the data
dt_in = x_data[data_index]
# Get the neural network output
dt_out, dt_out_nxt = ae.predict(dt_in[np.newaxis])
# Plot
alg = "Dual Convolutional Autoencoder"
plot_red_comp(dt_in, dt_out[0], 0, lt_sz, global_loss[1], alg)
plot_red_comp(y_data[data_index], dt_out_nxt[0], 0, lt_sz, global_loss[2], alg)

# %%