def regression(): # Generate a random regression problem X, y = make_regression(n_samples=5000, n_features=25, n_informative=25, n_targets=1, random_state=100, noise=0.05) y *= 0.01 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1111) model = NeuralNet( layers=[ Dense(64, Parameters(init='normal')), Activation('linear'), Dense(32, Parameters(init='normal')), Activation('linear'), Dense(1), ], loss='mse', optimizer=Adam(), metric='mse', batch_size=256, max_epochs=15, ) model.fit(X_train, y_train) predictions = model.predict(X_test) print("regression mse", mean_squared_error(y_test, predictions.flatten()))
def mlp_model(n_actions, batch_size=64): model = NeuralNet( layers=[Dense(32), Activation("relu"), Dense(n_actions)], loss="mse", optimizer=Adam(), metric="mse", batch_size=batch_size, max_epochs=1, verbose=False, ) return model
def test_mlp(): model = NeuralNet( layers=[ Dense(16, Parameters(init='normal')), Activation('linear'), Dense(8, Parameters(init='normal')), Activation('linear'), Dense(1), ], loss='mse', optimizer=Adam(), metric='mse', batch_size=64, max_epochs=150, ) model.fit(X_train, y_train) predictions = model.predict(X_test) assert mean_squared_error(y_test, predictions.flatten()) < 1.0
def addition_nlp(ReccurentLayer): X_train, X_test, y_train, y_test = addition_dataset(8, 5000) print(X_train.shape, X_test.shape) model = NeuralNet( layers=[ ReccurentLayer, TimeDistributedDense(1), Activation('sigmoid'), ], loss='mse', optimizer=Adam(), metric='mse', batch_size=64, max_epochs=15, ) model.fit(X_train, y_train) predictions = np.round(model.predict(X_test)) predictions = np.packbits(predictions.astype(np.uint8)) y_test = np.packbits(y_test.astype(np.int)) print(accuracy(y_test, predictions))