def testFullLoadCurve_invalid(self): import numpy as np import pandas as pd cases = [ None, [], {}, [[1, 2, 3], [4, 5, 6]], np.array([[1, 2, 3], [4, 5, 6]]), pd.DataFrame({ "speed": [10, 11, 12], "foo": [1, 2, 3] }), pd.DataFrame({ "velocity": [100, 200, 300], "alt": [0, 1, 0] }), # pd.Series([5,6,'a']), ] for c in cases: mdl = datamodel.get_model_base() mdl = datamodel.merge(datamodel.get_model_base(), mdl) mdl["wot"] = c self.checkModel_invalid(mdl)
def testFullLoadCurve_invalid(case): import numpy as np import pandas as pd mdl = datamodel.get_model_base() mdl = datamodel.merge(datamodel.get_model_base(), mdl) mdl["wot"] = case _checkModel_invalid(mdl)
def mdl_from_accdb(props, wot, n2vs: List[float]) -> dict: """ :param props: may have been renamed with :func:`accdb_renames()` """ assert isinstance(n2vs, list) mdl: dict = datamodel.get_model_base() mdl["f0"] = props.f0 mdl["f1"] = props.f1 mdl["f2"] = props.f2 mdl["unladen_mass"] = props.get("unladen_mass", props.kerb_mass) mdl["test_mass"] = props.test_mass mdl["p_rated"] = props.get("p_rated", props.rated_power) mdl["n_rated"] = props.get("n_rated", props.rated_speed) mdl["n_idle"] = props.get("n_idle", props.idling_speed) mdl["v_max"] = props.get("", props.v_max) # mdl['n_min_drive']= props.n_min_drive # mdl['n_min_drive_set']= props.n_min_drive_set mdl["n_min_drive_up"] = props.n_min_drive_up mdl["n_min_drive_down"] = props.n_min_drive_down mdl["n_min_drive_up_start"] = props.n_min_drive_start_up # inversed@acdb! mdl["n_min_drive_down_start"] = props.n_min_drive_start_down # inversed@acdb! mdl["t_cold_end"] = props.t_end_start_phase mdl["f_safety_margin"] = props.SM renames = accdb_renames() wot = wot.rename(renames, axis=1) wot["n"] = wot.index mdl["wot"] = wot mdl["n2v_ratios"] = n2vs return mdl
def assemble_model(infiles, model_overrides): mdl = datamodel.get_model_base() for filespec in infiles: try: mdl = load_model_part(mdl, filespec) except Exception as ex: raise Exception("Failed reading %s due to: %s" % (filespec, ex)) from ex if model_overrides: model_overrides = functools.reduce(lambda x, y: x + y, model_overrides) # join all -m for (json_path, value) in model_overrides: try: if not json_path.startswith("/"): json_path = _default_model_override_path + json_path pandel.set_jsonpointer(mdl, json_path, value) except Exception as ex: raise Exception( "Failed setting model-value(%s)@(%s) due to: %s" % (json_path, value, ex)) from ex return mdl
def test_default_resistance_coeffs_None(self): mdl = goodVehicle() mdl["f0"] = mdl["f1"] = mdl["f1"] = None mdl = datamodel.merge(datamodel.get_model_base(), mdl) with pytest.raises(ValidationError, match="None is not of type 'number'"): self.checkModel_valid(mdl)
def test_validate_wltc_data(self): mdl = datamodel.get_model_base() mdl = datamodel.merge(mdl, goodVehicle()) validator = datamodel.model_validator(validate_wltc_data=True, validate_schema=True) validator.validate(mdl)
def test_default_resistance_coeffs_None(self): mdl = goodVehicle() mdl["f0"] = mdl["f1"] = mdl["f1"] = None mdl = datamodel.merge(datamodel.get_model_base(), mdl) with pytest.raises(ValidationError, match="'f0' is a required property"): self.checkModel_valid(mdl)
def test_calc_default_resistance_coeffs_base_model(): tm = 1000 # test_mass bm = datamodel.get_model_base() datamodel.upd_resistance_coeffs_regression_curves(bm) regression_curves = bm["resistance_coeffs_regression_curves"] res = calc_default_resistance_coeffs(tm, regression_curves) print(res) assert len(res) == 3
def _set_model(self, mdl, skip_validation=False, validate_wltc_data=False): from wltp.datamodel import get_model_base, merge merged_model = get_model_base() merge(merged_model, mdl) if not skip_validation: datamodel.validate_model(merged_model, validate_wltc_data=validate_wltc_data) self._model = merged_model
def testModelInstance_defaultLoadCurve(self): mdl = datamodel.get_model_base() mdl.update(goodVehicle()) datamodel.upd_default_load_curve(mdl) validator = datamodel.model_validator() validator.validate(mdl) datamodel.upd_default_load_curve(mdl, "diesel") validator = datamodel.model_validator() validator.validate(mdl)
def testModelInstance_simplInstanceeFullLoadCurve(self): mdl = datamodel.get_model_base() mdl.update(goodVehicle()) mdl.update({ "wot": [ [1, 1, 1, 1, 1, 1, 1, 1, 1], [0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23], ] }) datamodel.model_validator().validate(mdl) dwot = datamodel.upd_default_load_curve({})["wot"] self.assertNotEqual(mdl["wot"], dwot)
def _set_model(self, mdl, skip_validation, validate_wltc_data, additional_properties): from wltp.datamodel import get_model_base, merge merged_model = get_model_base() merge(merged_model, mdl) if not skip_validation: errors = list( datamodel.validate_model( merged_model, validate_wltc_data=validate_wltc_data, additional_properties=additional_properties, iter_errors=True, )) if errors: err_msg = "\n ".join(str(e) for e in errors) raise ValueError(f"Model validation errors: {err_msg}") self._model = merged_model
def test_default_resistance_coeffs_missing(self): mdl = goodVehicle() mdl = datamodel.merge(datamodel.get_model_base(), mdl) self.checkModel_valid(mdl)
def testModelBase_plainInvalid(self): mdl = datamodel.get_model_base() datamodel.upd_default_load_curve(mdl) self.checkModel_invalid(mdl)
logging.basicConfig( level=logging.INFO, format= "%(asctime)s|%(levelname)4.4s|%(module)s:[%(funcName)s]:\n +--> %(message)s", datefmt="%Y-%m-%d,%H:%M:%S", ) pd.set_option("display.max_columns", 64) # %% [markdown] # ## Run a vehicle with user-specified data # %% ## For more input data, see https://wltp.readthedocs.io/en/latest/code.html#schema # mdl = datamodel.get_model_base() mdl["f0"] = 395.78 mdl["f1"] = 0.0 mdl["f2"] = 0.15 mdl["unladen_mass"] = 2527.0 mdl["test_mass"] = 2827.0 mdl["p_rated"] = 95.3000030517578 mdl["n_rated"] = 3500 mdl["n_idle"] = 750 mdl["v_max"] = 119.8 # mdl["n_min_drive"] = # mdl["n_min_drive_up"] = ... # mdl["n_min_drive_down"] = ... # mdl["n_min_drive_up_start"] = ... # mdl["n_min_drive_down_start"] = ... # mdl["t_cold_end"] = ...