data = (data - min_val) / (max_val - min_val) # split into train and test sets split = int(len(data) * 0.70) train = data[:split] test = data[split:] trainX, trainY = create_dataset(train) testX, testY = create_dataset(test) trainX = trainX[:, :, np.newaxis] testX = testX[:, :, np.newaxis] # create and fit the RNN model = Sequential() model.add(CuDNNLSTM(15, input_shape=( config.look_back, 1))) #first parameter represents the number of blocks that are remembered model.add(Dense(1)) model.compile(loss='mae', optimizer='rmsprop') model.fit(trainX, trainY, epochs=1000, batch_size=40, validation_data=(testX, testY), callbacks=[ PlotCallback(trainX, trainY, testX, testY, config.look_back, config.repeated_predictions), WandbCallback() ])
# normalize data to between 0 and 1 max_val = max(data) min_val = min(data) data = (data - min_val) / (max_val - min_val) # split into train and test sets split = int(len(data) * 0.70) train = data[:split] test = data[split:] trainX, trainY = create_dataset(train) testX, testY = create_dataset(test) trainX = trainX[:, :, np.newaxis] testX = testX[:, :, np.newaxis] # create and fit the RNN model = Sequential() model.add(SimpleRNN(1, input_shape=(config.look_back, 1))) model.compile(loss='mae', optimizer='adam') model.fit(trainX, trainY, epochs=1000, batch_size=1, validation_data=(testX, testY), callbacks=[ WandbCallback(), PlotCallback(trainX, trainY, testX, testY, config.look_back) ])
data = load_data("sin") # normalize data to between 0 and 1 max_val = max(data) min_val = min(data) data=(data-min_val)/(max_val-min_val) # split into train and test sets split = int(len(data) * 0.70) train = data[:split] test = data[split:] trainX, trainY = create_dataset(train) testX, testY = create_dataset(test) trainX = trainX[:, :, np.newaxis] testX = testX[:, :, np.newaxis] # create and fit the RNN model = Sequential() model.add(SimpleRNN(10, input_shape=(config.look_back,1 ))) model.add(Dense(1)) model.compile(loss='mae', optimizer='rmsprop') model.fit(trainX, trainY, epochs=1000, batch_size=20, validation_data=(testX, testY), callbacks=[WandbCallback(), PlotCallback(trainX, trainY, testX, testY, config.look_back)])
# instantiate decoder model decoder = Model(latent_inputs, outputs, name='decoder') # instantiate VAE model outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae_mlp') models = (encoder, decoder) data = (x_test, y_test) reconstruction_loss = binary_crossentropy(inputs, outputs) reconstruction_loss *= original_dim kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var) kl_loss = K.sum(kl_loss, axis=-1) kl_loss *= -0.5 vae_loss = K.mean(reconstruction_loss + kl_loss) vae.add_loss(vae_loss) vae.compile(optimizer='adam') vae.fit(x_train, epochs=epochs, batch_size=batch_size, validation_data=(x_test, None), callbacks=[ WandbCallback(), PlotCallback(encoder, decoder, (x_test, y_test)) ]) vae.save_weights('vae_mlp_mnist.h5')
dataY.append(dataset[i + config.look_back]) return np.array(dataX), np.array(dataY) # normalize data to between 0 and 1 #max_val = max(data) #min_val = min(data) #data=(data-min_val)/(max_val-min_val) # split into train and test sets split = int(len(data) * 0.70) train = data[:split] test = data[split:] trainX, trainY = create_dataset(train) testX, testY = create_dataset(test) trainX = trainX[:, :, np.newaxis] testX = testX[:, :, np.newaxis] # create and fit the RNN model = Sequential() model.add(SimpleRNN(1, input_shape=(config.look_back,1 ))) model.compile(loss='mae', optimizer='adam', metrics=['mae']) model.fit(trainX, trainY, epochs=1000, batch_size=1, validation_data=(testX, testY), callbacks=[WandbCallback(), PlotCallback(trainX, trainY, testX, testY, config.look_back, config.repeated_predictions)])