def test_ModelMetrics(sample_data): series_one, series_two = sample_data model_metrics = ModelMetrics(series_one, series_two, num_parameters=2) assert model_metrics.observed_length == 5 assert model_metrics.predicted_length == 5 assert model_metrics.merged_length == 5 assert model_metrics.observed_mean == 3.0 assert model_metrics.predicted_mean == 2.8 assert round(model_metrics.observed_skew, 3) == 0.524 assert round(model_metrics.predicted_skew, 3) == 0.512 assert round(model_metrics.observed_kurtosis, 3) == -0.963 assert round(model_metrics.predicted_kurtosis, 3) == -0.612 assert round(model_metrics.observed_cvstd, 3) == 0.707 assert round(model_metrics.predicted_cvstd, 3) == 0.299 assert round(model_metrics.r_squared, 3) == 0.972 assert round(model_metrics.r_squared_adj, 3) == 0.944 assert round(model_metrics.cvrmse, 3) == 0.394 assert round(model_metrics.cvrmse_adj, 3) == 0.509 assert round(model_metrics.mape, 3) == 0.517 assert round(model_metrics.mape_no_zeros, 3) == 0.517 assert model_metrics.num_meter_zeros == 0 assert round(model_metrics.nmae, 3) == 0.333 assert round(model_metrics.nmbe, 3) == -0.067 assert round(model_metrics.autocorr_resid, 3) == -0.674 assert repr(model_metrics) is not None assert json.dumps(model_metrics.json()) is not None
def test_ModelMetrics_invalid_confidence_level(sample_data): series_one, series_two = sample_data with pytest.raises(Exception) as e: model_metrics = ModelMetrics( series_one, series_two, num_parameters=2, confidence_level=1.1 ) with pytest.raises(Exception) as e: model_metrics = ModelMetrics( series_one, series_two, num_parameters=2, confidence_level=-1 )
def test_model_results_json_with_model_metrics(): candidate_model = CandidateModel(model_type="model_type", formula="formula", status="status", r_squared_adj=0.5) model_results = ModelResults( status="status", method_name="method_name", model=candidate_model, r_squared_adj=np.nan, ) model_metrics = ModelMetrics(observed_input=pd.Series([0, 1, 2]), predicted_input=pd.Series([1, 0, 2])) json_result = model_results.json() json.dumps(json_result) # just make sure it's valid json json_result["metrics"] = {} # overwrite because of floats assert json_result == { "candidates": None, "metadata": {}, "method_name": "method_name", "metrics": {}, "model": { "formula": "formula", "model_params": {}, "model_type": "model_type", "r_squared_adj": 0.5, "status": "status", "warnings": [], }, "settings": {}, "status": "status", "r_squared_adj": None, "warnings": [], }
def test_ModelMetrics_diff_length_error_with_nan(sample_data_diff_length_with_nan): series_one, series_two = sample_data_diff_length_with_nan model_metrics = ModelMetrics(series_one, series_two) assert len(model_metrics.warnings) == 1 warning = model_metrics.warnings[0] assert warning.qualified_name.startswith("eemeter.metrics.input_series_are_of") assert warning.description.startswith("Input series") assert warning.data == { "merged_length": 5, "observed_input_length": 7, "observed_length_without_nan": 7, "predicted_input_length": 7, "predicted_length_without_nan": 5, }
def test_ModelMetrics_autocorr_lags_error(sample_data): series_one, series_two = sample_data with pytest.raises(ValueError): model_metrics = ModelMetrics(series_one, series_two, autocorr_lags=0)
def test_ModelMetrics_num_parameter_error(sample_data): series_one, series_two = sample_data with pytest.raises(ValueError): model_metrics = ModelMetrics(series_one, series_two, num_parameters=-1)
def test_ModelMetrics_zeros(sample_data_zeros): series_one, series_two = sample_data_zeros model_metrics = ModelMetrics(series_one, series_two, num_parameters=2) assert np.isinf(model_metrics.mape) assert model_metrics.num_meter_zeros == 2
def model_metrics(sample_data): series_one, series_two = sample_data return ModelMetrics(series_one, series_two, num_parameters=2)
def test_ModelMetrics_inputs_unchanged(sample_data): series_one, series_two = sample_data model_metrics = ModelMetrics(series_one, series_two) assert sample_data[0].name == "NameOne" assert sample_data[1].name == "NameTwo"
def test_ModelMetrics(sample_data): series_one, series_two = sample_data model_metrics = ModelMetrics(series_one, series_two, num_parameters=2) test_sample_model_metrics(model_metrics) assert repr(model_metrics) is not None assert json.dumps(model_metrics.json()) is not None
def test_model_metrics_json_covert(sample_data): series_one, series_two = sample_data model_metrics = ModelMetrics(series_one, series_two, num_parameters=2) json_rep = model_metrics.json() test_sample_model_metrics(ModelMetrics.from_json(json_rep))
def test_ModelMetrics_diff_length_error(sample_data_diff_length): series_one, series_two = sample_data_diff_length with pytest.raises(ValueError): model_metrics = ModelMetrics(series_one, series_two)