Esempio n. 1
0
net_regr = NeuralNetRegressor(
    Net(hidden_size=500),
    max_epochs=5000,
    lr=0.01,
    device='cuda',
    optimizer=torch.optim.Adam,
    train_split=None,
    verbose=1,
)

res = net_regr.fit(t_d_inp, t_d_oup)
# save
net_regr.save_params(f_params='step1result')

pred = net_regr.predict(test_inp)
mse = ((test_oup - pred)**2).mean()
print('test error = ' + str(mse))
# plot 1 loss
loss = net_regr.history[:, 'train_loss']
plt.figure()
plt.plot(loss)
plt.ylabel('loss')
plt.ylim([0, loss[-1] * 4])
# plot 2
plt.figure()
s = 3
plt.scatter(yaxis, pred, s=s, label="Prediction")
plt.scatter(yaxis, test_oup, s=s, label="DNS")
plt.legend()
Esempio n. 2
0
class PyTorchModel(BaseModel):
    def build_model(
        self,
        network=MVRegressor,
        device: str = "cpu",
        scale_data: bool = False,
        num_layers: int = 10,
        num_units: int = 50,
        dropout: float = 0.5,
        num_epochs: int = 10,
        batch_size: int = 128,
    ):

        self.scale_data = scale_data
        self.num_layers = num_layers
        self.num_units = num_units
        self.dropout = dropout
        self.num_epochs = num_epochs
        self.batch_size = batch_size

        if not all([hasattr(self, "input_dim"), hasattr(self, "output_dim")]):

            raise ValueError("Please load dataset first to obtain proper sizes")

        if device == "cpu":
            self.device = device
        else:
            use_cuda = torch.cuda.is_available()
            self.device = torch.device("cuda" if use_cuda else "cpu")

        self.model = NeuralNetRegressor(
            network,
            device=self.device,
            module__input_dim=self.input_dim,
            module__output_dim=self.output_dim,
            module__n_layers=self.num_layers,
            module__num_units=self.num_units,
            module__p_dropout=self.dropout,
            max_epochs=self.num_epochs,
            criterion=nn.MSELoss,
            batch_size=self.batch_size,
            # Shuffle training data on each epoch
            iterator_train__shuffle=True,
            callbacks=[
                (
                    "lr_scheduler",
                    LRScheduler(
                        policy=CyclicLR, base_lr=0.001, max_lr=0.01, step_every="batch"
                    ),
                ),
            ],
        )

    def fit(self, X, y, **fit_params):

        if self.scale_data:
            X, y = self.scalar(X, y)

        X, y = (
            torch.tensor(X).float().to(device=self.device),
            torch.tensor(y).float().to(device=self.device),
        )
        self.model.fit(X, y, **fit_params)

    def load_model(
        self, input_dim: str, output_dim: str, filename: str, scale_data: bool = False,
    ):

        self.scale_data = scale_data
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.build_model()
        self.model = pickle.load(open(filename, "rb"))

    def predict(self, X):

        if self.scale_data:
            X = self.xscalar.transform(X)
        X = torch.tensor(X).float().to(device=self.device)
        preds = self.model.predict(X)

        if self.scale_data:
            preds = self.yscalar.inverse_transform(preds)

        return preds

    def sweep(
        self,
        params: Dict,
        X,
        y,
        search_algorithm: str = "bayesian",
        num_trials: int = 3,
        scoring_func: str = "r2",
    ):

        from tune_sklearn import TuneGridSearchCV, TuneSearchCV

        X, y = (
            torch.tensor(X).float().to(device=self.device),
            torch.tensor(y).float().to(device=self.device),
        )
        tune_search = TuneSearchCV(
            self.model,
            params,
            search_optimization=search_algorithm,
            n_trials=num_trials,
            early_stopping=True,
            scoring=scoring_func,
        )
        tune_search.fit(X, y)

        return tune_search
Esempio n. 3
0
# # Assess performance

# In[7]:

# Get into evaluation (predictive posterior) mode
model.eval()
likelihood.eval()

# Make the predictions
with torch.no_grad(), gpytorch.settings.fast_pred_var():
    preds = model(input_val)

# In[8]:

import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import seaborn as sns

# Calculate the error metrics
targets_pred = net.predict(sdts_val).reshape(-1)
mae = mean_absolute_error(targets_val, targets_pred)
rmse = np.sqrt(mean_squared_error(targets_val, targets_pred))
r2 = r2_score(targets_val, targets_pred)

with open('pfgp_plots_d20.pkl', 'wb') as saveplot:
    pickle.dump((targets_pred, targets_train, targets_val, preds), saveplot)

# Report
print('MAE = %.2f eV' % mae)
print('RMSE = %.2f eV' % rmse)
print('R^2 = %.2f' % r2)
Esempio n. 4
0
class PyTorchModel(BaseModel):
    def build_model(
        self,
        network=MVRegressor,
        device: str = "cpu",
        scale_data: bool = False,
        num_layers: int = 10,
        num_units: int = 50,
        dropout: float = 0.5,
        num_epochs: int = 10,
        batch_size: int = 128,
    ):

        self.scale_data = scale_data
        self.num_layers = num_layers
        self.num_units = num_units
        self.dropout = dropout
        self.num_epochs = num_epochs
        self.batch_size = batch_size

        if not all([hasattr(self, "input_dim"), hasattr(self, "output_dim")]):

            raise ValueError(
                "Please load dataset first to obtain proper sizes")

        if device == "cpu":
            self.device = device
        else:
            use_cuda = torch.cuda.is_available()
            self.device = torch.device("cuda" if use_cuda else "cpu")

        self.model = NeuralNetRegressor(
            network,
            device=self.device,
            module__input_dim=self.input_dim,
            module__output_dim=self.output_dim,
            module__n_layers=self.num_layers,
            module__num_units=self.num_units,
            module__p_dropout=self.dropout,
            max_epochs=self.num_epochs,
            criterion=nn.MSELoss,
            batch_size=self.batch_size,
            # Shuffle training data on each epoch
            iterator_train__shuffle=True,
            callbacks=[
                (
                    "lr_scheduler",
                    LRScheduler(policy=CyclicLR,
                                base_lr=0.001,
                                max_lr=0.01,
                                step_every="batch"),
                ),
            ],
        )

    def fit(self, X, y, **fit_params):

        if self.scale_data:
            X, y = self.scalar(X, y)

        X, y = (
            torch.tensor(X).float().to(device=self.device),
            torch.tensor(y).float().to(device=self.device),
        )
        self.model.fit(X, y, **fit_params)

    def load_model(
        self,
        input_dim: str,
        output_dim: str,
        filename: str,
        scale_data: bool = False,
    ):

        self.scale_data = scale_data
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.build_model(scale_data=scale_data)
        self.model = pickle.load(open(filename, "rb"))

    def predict(self, X):

        if self.scale_data:
            X = self.xscalar.transform(X)
        X = torch.tensor(X).float().to(device=self.device)
        preds = self.model.predict(X)

        if self.scale_data:
            preds = self.yscalar.inverse_transform(preds)

        return preds

    def sweep(
        self,
        params: Dict,
        X,
        y,
        search_algorithm: str = "bayesian",
        num_trials: int = 3,
        scoring_func: str = "r2",
        early_stopping: bool = False,
        results_csv_path: str = "outputs/results.csv",
        splitting_criteria: str = "timeseries",
        num_splits: int = 5,
    ):

        start_dir = str(pathlib.Path(os.getcwd()).parent)
        module_dir = str(pathlib.Path(__file__).parent)
        # temporarily change directory to file directory and then reset
        os.chdir(module_dir)

        if self.scale_data:
            X, y = self.scalar(X, y)

        X, y = (
            torch.tensor(X).float().to(device=self.device),
            torch.tensor(y).float().to(device=self.device),
        )

        if splitting_criteria.lower() == "cv":
            cv = None
        elif splitting_criteria.lower() == "timeseries":
            cv = TimeSeriesSplit(n_splits=num_splits)
        elif splitting_criteria.lower() == "grouped":
            cv = GroupShuffleSplit(n_splits=num_splits)
        elif splitting_criteria.lower() == "fixed":
            if type(test_indices) != list:
                raise ValueError(
                    "fixed split used but no test-indices provided...")
            cv = PredefinedSplit(test_fold=test_indices)
        else:
            raise ValueError(
                "Unknowing splitting criteria provided: {splitting_criteria}, should be one of [cv, timeseries, grouped]"
            )

        if search_algorithm.lower() == "bohb":
            early_stopping = True

        if any([
                search_algorithm.lower()
                in ["bohb", "bayesian", "hyperopt", "optuna"]
        ]):
            search = TuneSearchCV(
                self.model,
                params,
                search_optimization=search_algorithm,
                n_trials=num_trials,
                early_stopping=early_stopping,
                scoring=scoring_func,
            )
        elif search_algorithm == "grid":
            search = GridSearchCV(
                self.model,
                param_grid=params,
                refit=True,
                cv=num_trials,
                scoring=scoring_func,
            )
        elif search_algorithm == "random":
            search = RandomizedSearchCV(
                self.model,
                param_distributions=params,
                refit=True,
                cv=num_trials,
                scoring=scoring_func,
            )
        else:
            raise NotImplementedError(
                "Search algorithm should be one of grid, hyperopt, bohb, optuna, bayesian, or random"
            )
        with mlflow.start_run() as run:
            search.fit(X, y)
        self.model = search.best_estimator_

        # set path back to initial
        os.chdir(start_dir)

        results_df = pd.DataFrame(search.cv_results_)
        logger.info(f"Best hyperparams: {search.best_params_}")

        if not pathlib.Path(results_csv_path).parent.exists():
            pathlib.Path(results_csv_path).parent.mkdir(exist_ok=True,
                                                        parents=True)
        logger.info(f"Saving sweeping results to {results_csv_path}")
        logger.info(f"Best score: {search.best_score_}")
        results_df.to_csv(results_csv_path)
        cols_keep = [col for col in results_df if "param_" in col]
        cols_keep += ["mean_test_score"]

        results_df = results_df[cols_keep]

        return results_df
        # visualize the loss as the network trained
        # plotting training and validation loss
        epochs = [i for i in range(len(net.history))]
        train_loss = net.history[:, 'train_loss']
        valid_loss = net.history[:, 'valid_loss']

        fig = plt.figure(figsize=(8, 5))
        plt.plot(epochs, train_loss, 'g-')
        plt.plot(epochs, valid_loss, 'r-')
        plt.title('Training Loss Curves')
        plt.xlabel('Epochs')
        plt.ylabel('Mean Squared Error')
        plt.legend(['Train', 'Validation'])
        fig.savefig('loss_plot' + str(number) + '.png', bbox_inches='tight')

        y_pred = net.predict(X_test)

        a = open("test_losses.txt", "a+")
        a.write("Number: " + str(number) + '\n')
        a.write("MSE loss: " + str(mean_squared_error(y_test, y_pred)) +
                " MAE loss: " + str(mean_absolute_error(y_test, y_pred)) +
                '\n')
        a.write("RMSE loss: " + str(sqrt(mean_squared_error(y_test, y_pred))) +
                " MAPE loss: " +
                str(mean_absolute_percentage_error(y_test.numpy(), y_pred)) +
                '\n')
        a.close()

        target = scaler_y.inverse_transform(y_pred)
        real = scaler_y.inverse_transform(y_test)
    ):
        super(RegressorModule, self).__init__()
        self.num_units = num_units
        self.nonlin = nonlin

        self.dense0 = nn.Linear(20, num_units)
        self.nonlin = nonlin
        self.dense1 = nn.Linear(num_units, 10)
        self.output = nn.Linear(10, 1)

    def forward(self, X, **kwargs):
        X = self.nonlin(self.dense0(X))
        X = F.relu(self.dense1(X))
        X = self.output(X)
        return X


net_regr = NeuralNetRegressor(
    RegressorModule,
    max_epochs=20,
    lr=0.1,
    device='cuda',
)

net_regr.fit(X_regr, y_regr)

y_pred = net_regr.predict(X_regr[:5])
y_pred

a, b = net_regr.train_split(X_regr)