def test_store(__model_object__): """""" model1 = __model_object__ model1.store() assert os.path.isfile(''.join([model1.run_name, '_data.csv'])) assert os.path.isfile(''.join([model1.run_name, '_attributes.json'])) delete_files(model1.run_name)
def test_nn_hypertune(__data_split_model__): model1 = __data_split_model__ model1.reg() model1.make_grid() model1.hyperTune() assert type(model1.params) is dict assert os.path.isfile(''.join([model1.run_name, '.h5'])) # Check for PVA graphs delete_files(model1.run_name)
def test_train_reg(__data_split_model__): model1 = __data_split_model__ model1.reg() model1.make_grid() model1.hyperTune() model1.train_reg() assert type(model1.predictions) is pandas.core.frame.DataFrame assert type(model1.predictions_stats) is dict __assert_results__(model1.predictions_stats) delete_files(model1.run_name)
def test_pickle(__run_all__without_analyze): """""" model1 = __run_all__without_analyze model1.pickle_model() from sklearn.metrics import mean_squared_error, r2_score model2 = unpickle_model(''.join([model1.run_name, '.pkl'])) model2.run() # Make predictions predictions = model2.estimator.predict(model2.test_features) # Dataframe for replicate_model pva = pd.DataFrame([], columns=['actual', 'predicted']) pva['actual'] = model2.test_target pva['predicted'] = predictions assert r2_score(pva['actual'], pva['predicted']) < 0.9 assert mean_squared_error(pva['actual'], pva['predicted']) > 0.2 assert np.sqrt(mean_squared_error(pva['actual'], pva['predicted'])) > 0.2 delete_files(model1.run_name)
def test_pva_graph(__run_all__): model1 = __run_all__ assert os.path.isfile(''.join([model1.run_name, '_PVA.png'])) # Check for PVA graphs delete_files(model1.run_name)
def test_var_importance(__run_all__): model1 = __run_all__ assert os.path.isfile(''.join([model1.run_name, '_importance-graph.png'])) delete_files(model1.run_name)