def f(**kwargs): kwargs['objective'] = 'reg:squarederror' kwargs = Jsonize(kwargs)() model = Model(inputs=inputs, outputs=outputs, lookback=1, batches="2d", val_data="same", test_fraction=0.3, model={"xgboostregressor": kwargs}, transformation=None, data=data, prefix='testing', verbosity=0) model.fit(indices="random") t, p = model.predict(indices=model.test_indices, prefix='test') mse = RegressionMetrics(t, p).mse() print(f"Validation mse {mse}") return mse
def test_ml_random_indices(self): model = Model(inputs=data_reg['feature_names'], outputs=["target"], lookback=1, batches="2d", val_fraction=0.0, val_data="same", test_fraction=0.3, category="ML", problem="regression", model={"xgboostregressor": {}}, transformation=None, data=df_reg, verbosity=0) model.fit(indices="random") trtt, trp = model.predict(indices=model.train_indices, prefix='train') t, p = model.predict(indices=model.test_indices, prefix='test') self.assertGreater(len(t), 1) self.assertGreater(len(trtt), 1) return
def fn(**suggestion): model = Model(inputs=inputs, outputs=outputs, model={"xgboostregressor": suggestion}, data=data, prefix=f'test_{algorithm}_xgboost_{backend}', verbosity=0) model.fit(indices="random") t, p = model.predict(indices=model.test_indices, prefix='test') mse = RegressionMetrics(t, p).mse() return mse
def fn(**suggestion): model = Model(inputs=inputs, outputs=outputs, model={"xgboostregressor": suggestion}, data=data, prefix='test_tpe_xgboost', verbosity=0) model.fit(indices="random") t, p = model.predict(indices=model.test_indices, prefix='test') mse = RegressionMetrics(t, p).mse() print(f"Validation mse {mse}") return mse
def build_and_run(outputs, transformation=None, indices=None): model = Model(model={"layers": make_layers(len(outputs['inp_1d']))}, lookback=lookback, inputs={ "inp_1d": inp_1d, "inp_2d": inp_2d }, outputs=outputs, data={ 'inp_1d': make_1d(outputs['inp_1d']), 'inp_2d': data_2d }, transformation=transformation, epochs=2, verbosity=0) model.fit(indices=indices) return model.predict(indices=model.test_indices if indices else None)
def test_datetimeindex(self): # makes sure that using datetime_index=True during prediction, the returned values are in correct order model = Model(data=data1, inputs=in_cols, outputs=out_cols, epochs=2, model={ 'layers': { "LSTM": { "config": { "units": 2 } }, "Dense": { "config": { "units": 1 } }, "Reshape": { "config": { "target_shape": (1, 1) } } } }, lookback=lookback, verbosity=0) model.fit(indices="random") t, p = model.predict(indices=model.train_indices, use_datetime_index=True) # the values in t must match the corresponding indices after adding 10000, because y column starts from 100000 for i in range(100): self.assertEqual(int(t[i]), model.train_indices[i] + 10000) return
"Dense_0": { 'units': 64, 'activation': 'relu' }, "Flatten": {}, "Dense_3": { 'units': 1 }, } } df = arg_beach() input_features = list(df.columns)[0:-1] # column in dataframe to bse used as output/target outputs = list(df.columns)[-1] model = Model(data=df, batch_size=16, lookback=1, model=mlp_model, inputs=input_features, outputs=[outputs], lr=0.0001) history = model.fit(indices='random') y, obs = model.predict() model.view_model(st=0)
# this example shows how to build the Models from `from_checkout` class method # first we will train and save a simple model and load it from config file import os from AI4Water import Model from AI4Water.utils.datasets import load_nasdaq from AI4Water.utils.utils import find_best_weight df = load_nasdaq() model = Model(lookback=1, epochs=2, data=df, ) history = model.fit(indices='random') w_path = model.path # for clarity, delete the model, although it is overwritten del model # Load the `Model` from checkpoint, provide the checkpoint cpath = os.path.join(w_path, "config.json") # "provide complete path of config file" model = Model.from_config(cpath, data=df) w_file = find_best_weight(os.path.join(w_path, "weights")) # The file name of weights model.load_weights(w_file) x, y = model.predict(indices=model.test_indices, use_datetime_index=False)