from sklearn.neural_network import MLPRegressor from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split # Load the Boston housing dataset data = load_boston() X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.2) # Create a neural network model with one hidden layer of 20 nodes model = MLPRegressor(hidden_layer_sizes=(20,)) # Print the hyperparameters of the model print(model.get_params())
from sklearn.neural_network import MLPRegressor from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error # Load the Boston housing dataset data = load_boston() X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.2) # Create a neural network model with two hidden layers and logistic activation function model = MLPRegressor(hidden_layer_sizes=(100, 50), activation='logistic') # Train the model on the training data model.fit(X_train, y_train) # Make predictions on the test data y_pred = model.predict(X_test) # Compute the mean squared error mse = mean_squared_error(y_test, y_pred) # Print the mean squared error print('Mean squared error:', mse) # Print the hyperparameters of the model print(model.get_params())In this example, we create an instance of the MLPRegressor class with two hidden layers of 100 and 50 nodes, respectively, and a logistic activation function. We train the model on the training data and make predictions on the test data. We then compute the mean squared error of the model and print it. Finally, we use the get_params() method to retrieve the hyperparameters of the model.