def run_pyalgo_on_accdb_vehicle( h5, vehnum, additional_properties=False, props_group_suffix="prop", pwot_group_suffix="wot", ) -> Tuple[dict, pd.DataFrame, pd.DataFrame]: """ Quick 'n dirty way to invoke python-algo (bc model will change). :param h5: the `WltpGs-msaccess.h5` file (path or h5db) to read input from :return: the *out-props* key-values, the *cycle* data-frame, and the grid-wots constructed to solve v_max. """ from wltp import io as wio, engine, utils from wltp.experiment import Experiment props, wot, n2vs = load_vehicle_accdb(h5, vehnum) mdl = mdl_from_accdb(props, wot, n2vs) datamodel.validate_model(mdl, additional_properties=additional_properties) exp = Experiment(mdl, skip_model_validation=True) mdl = exp.run() ## Keep only *output* key-values, not to burden HDF data-model # (excluding `driveability`, which is a list, and f0,f1,f2, addume were input). # # oprops = {k: v for k, v in veh if np.isscalar(v)} out_mdl = { "pmr": mdl["pmr"], "n95_low": mdl["n95_low"], "n95_high": mdl["n95_high"], "v_max": mdl["v_max"], "n_vmax": mdl["n_vmax"], "g_vmax": mdl["g_vmax"], "is_n_lim_vmax": mdl["is_n_lim_vmax"], "n_max1": mdl["n_max1"], "n_max2": mdl["n_max2"], "n_max3": mdl["n_max3"], "n_max": mdl["n_max"], "wltc_class": mdl["wltc_class"], "f_dsc_raw": mdl["f_dsc_raw"], "f_dsc": mdl["f_dsc"], } cycle = mdl["cycle"] return out_mdl, cycle, mdl["wots_vmax"]
def checkModel_invalid(self, mdl): ex = jsonschema.ValidationError try: self.assertRaises(ex, datamodel.validate_model, mdl, iter_errors=False) errs = list(datamodel.validate_model(mdl, iter_errors=True)) self.assertGreater(len(errs), 0, errs) self.assertRaises(ex, datamodel.validate_model, mdl, additional_properties=False) self.assertRaises(ex, datamodel.validate_model, mdl, additional_properties=True) errs = list( datamodel.validate_model(mdl, iter_errors=True, additional_properties=True)) self.assertGreater(len(errs), 0, errs) errs = list( datamodel.validate_model(mdl, iter_errors=True, additional_properties=False)) self.assertGreater(len(errs), 0, errs) self.assertRaises( ex, datamodel.validate_model, mdl, iter_errors=False, additional_properties=True, ) self.assertRaises( ex, datamodel.validate_model, mdl, iter_errors=False, additional_properties=False, ) except: print("Model failed: ", mdl) raise
def main(argv=None): """Calculates an engine-map by fitting data-points vectors, use --help for getting help. REMARKS:: * All string-values are case-sensitive. * Boolean string-values are case insensitive: False : false, off, no, == 0 True : true, on, yes, != 0 * In KEY=VALUE pairs, the values are passed as string. For other types, substitute '=' with:. += : integer *= : float ?= : boolean := : parsed as json @= : parsed as python (with eval()) EXAMPLES: Assuming a 'vehicle.json' file like this:: { "unladen_mass":1230, "test_mass": 1300, "v_max": 195, "p_rated": 100, "n_rated": 5450, "n_idle": 950, "n_min": None, # Can be overridden by manufacturer. "n2v_ratios": [120.5, 75, 50, 43, 37, 32], "f0": 100, "f1": 0.5, "f2": 0.04, } then the following examples:: ## Calculate and print fitted engine map's parameters # for a petrol vehicle with the above engine-point's CSV-table: >> %(prog)s -I vehicle.csv file_model_path=/vehicle -I vehicle.csv file_frmt=SERIES model_path=/params header@=None \ ## ...and if no header existed: >> %(prog)s -m /vehicle:=n_idle -I engine.csv header@=None ## Assume PME column contained normalized-Power in Watts, # instead of P in kW: >> %(prog)s -m fuel=petrol -I engine.csv -irenames X X 'Pnorm (w)' ## Read the same table above but without header-row and # store results into Excel file, 1st sheet: >> %(prog)s -m fuel=petrol -I engine.csv --icolumns CM PME PMF -I engine_map.xlsx sheetname+=0 ## Supply as inline-json more model-values required for columns [RPM, P, FC] # read from <stdin> as json 2D-array of values (no headers). # and store results in UTF-8 regardless of platform's default encoding: >> %(prog)s -m '/engine:={"fuel":"petrol", "stroke":15, "capacity":1359}' \\ -I - file_frmt=JSON orient=values -c RPM P FC \\ -O engine_map.txt encoding=UTF-8 Now, if input vectors are in 2 separate files, the 1st, 'engine_1.xlsx', having 5 columns with different headers than expected, like this:: OTHER1 OTHER2 N "Fuel waste" OTHER3 0 -1 12 0.14 "some text" ... and the 2nd having 2 columns with no headers at all and the 1st column being 'Pnorm', then it, then use the following command:: >> %(prog)s -O engine_map -m fuel=petrol \\ -I=engine_1.xlsx sheetname+=0 \\ -c X X N 'Fuel consumption' X \\ -r X X RPM 'FC(g/s)' X \\ -I=engine_2.csv header@=None \\ -c Pnorm X """ global log, DEBUG program_name = os.path.basename(sys.argv[0]) if argv is None: argv = sys.argv[1:] doc_lines = main.__doc__.splitlines() desc = doc_lines[0] epilog = dedent("\n".join(doc_lines[1:])) parser = build_args_parser(program_name, prog_ver, desc, epilog) opts = parser.parse_args(argv) try: DEBUG = bool(opts.debug) if DEBUG or opts.verbose > 1: opts.strict = True if opts.verbose >= 2: level = 0 elif opts.verbose >= 1: level = logging.DEBUG else: level = DEFAULT_LOG_LEVEL _init_logging(level, name=program_name) log.debug("Args: %s\n +--Opts: %s", argv, opts) if opts.excel: copy_excel_template_files(opts.excel) return # if opts.excelrun: # files_copied = copy_excel_template_files(opts.excelrun) #@UnusedVariable # xls_file = files_copied[0] # # utils.open_file_with_os(xls_file) # return opts = validate_file_opts(opts) infiles = parse_many_file_args(opts.I, "r", opts.irenames) log.info("Input-files: %s", infiles) outfiles = parse_many_file_args(opts.O, "w", None) log.info("Output-files: %s", outfiles) except (ValueError) as ex: if DEBUG: log.exception("Cmd-line parsing failed!") indent = len(program_name) * " " parser.exit( 3, "%s: %s\n%s for help use --help\n" % (program_name, ex, indent)) ## Main program # try: additional_props = not opts.strict mdl = assemble_model(infiles, opts.m) log.info( "Input Model(strict: %s): %s", opts.strict, utils.Lazy(lambda: utils.yaml_dumps(mdl)), ) mdl = datamodel.validate_model(mdl, additional_props) mdl = experiment.Experiment(mdl).run() store_model_parts(mdl, outfiles) except jsons.ValidationError as ex: if DEBUG: log.error("Invalid input model!", exc_info=ex) indent = len(program_name) * " " parser.exit( 4, "%s: Model validation failed due to: %s\n%s\n" % (program_name, ex, indent), ) except RefResolutionError as ex: if DEBUG: log.exception("Invalid model operation!") indent = len(program_name) * " " parser.exit( 4, "%s: Model operation failed due to: %s\n%s\n" % (program_name, ex, indent), )
def checkModel_valid(self, mdl): def consume_errs(errs): for e in errs: self.assertIsNone(e, e) try: datamodel.validate_model(mdl, iter_errors=False) consume_errs(datamodel.validate_model(mdl, iter_errors=True)) datamodel.validate_model(mdl, additional_properties=False) datamodel.validate_model(mdl, additional_properties=True) consume_errs( datamodel.validate_model(mdl, iter_errors=True, additional_properties=True)) consume_errs( datamodel.validate_model(mdl, iter_errors=True, additional_properties=False)) datamodel.validate_model(mdl, iter_errors=False, additional_properties=True) datamodel.validate_model(mdl, iter_errors=False, additional_properties=False) except: print("Model failed: ", mdl) raise
pd.set_option("display.max_columns", 64) # %% h5 = "VehData/WltpGS-msaccess.h5" caseno = 1 prop, wot, n2vs = vehdb.load_vehicle_accdb(h5, caseno) acc_cycle = vehdb.load_vehicle_nodes(h5, 1, "cycle") # %% print(list(prop.index)) # %% # renames = vehdb.accdb_renames() # prop = prop.rename(renames) mdl = vehdb.mdl_from_accdb(prop, wot, n2vs) datamodel.validate_model(mdl, additional_properties="true") wot = mdl["wot"] # %% print(list(acc_cycle.columns)) print(list(mdl.keys())) # %% gwots = engine.interpolate_wot_on_v_grid(wot, n2vs) gwots = engine.attach_p_avail_in_gwots(gwots, f_safety_margin=0.1) gwots["p_resist"] = vehicle.calc_p_resist(gwots.index, mdl["f0"], mdl["f1"], mdl["f2"]) # %% V = datamodel.get_class_v_cycle(3) # %%
def _checkModel_invalid(mdl): ex = jsonschema.ValidationError try: with pytest.raises(ex): datamodel.validate_model(mdl, iter_errors=False) errs = list(datamodel.validate_model(mdl, iter_errors=True)) assert len(errs) > 0 with pytest.raises(ex): datamodel.validate_model(mdl, additional_properties=False) with pytest.raises(ex): datamodel.validate_model(mdl, additional_properties=True) errs = list( datamodel.validate_model(mdl, iter_errors=True, additional_properties=True)) assert len(errs) > 0 errs = list( datamodel.validate_model(mdl, iter_errors=True, additional_properties=False)) assert len(errs) > 0 with pytest.raises(ex): datamodel.validate_model( mdl, iter_errors=False, additional_properties=True, ) with pytest.raises(ex): datamodel.validate_model( mdl, iter_errors=False, additional_properties=False, ) except: print("Model failed: ", mdl) raise
def test_cycler_pipeline(): # wltc_class): wltc_class = 0 aug = wio.make_autograph() ops = aug.wrap_funcs([ *pipelines.cycler_pipeline().ops, # fake Vs operation( lambda v: v.rename("V_dsc"), "FAKE.V_dsc", "wltc_class_data/V_cycle", "V_dsc", ), ]) pipe = compose(..., *ops) props = goodvehicle.goodVehicle() inp = { **props, "wltc_data": datamodel.get_wltc_data(), "wltc_class": wltc_class, "v_max": 190.3, "g_vmax": 6, # "n_min_drives": nmindrive.mdl_2_n_min_drives.compute(props) } datamodel.validate_model(inp, additional_properties=True) with config.evictions_skipped(True): sol = pipe.compute(inp, callbacks=(pipelines.check_dupe_cols)) cycle = sol["cycle"] assert len(cycle) == 1612 # assert len(cycle.columns) == 105 renames = { "OK_max_n": "ok_max_n", "OK_g0": "ok_gear0", "OK_p": "ok_p", "OK_n": "ok_n", "OK_gear": "ok_gear", "G_min": "g_min", "G_max0": "g_max0", } exp = [ ("t", ""), ("V_cycle", ""), ("V_dsc", ""), ("V", ""), ("A", ""), ("va_phase", ""), ("phase_1", ""), ("phase_2", ""), ("phase_3", ""), ("accel_raw", ""), ("run", ""), ("stop", ""), ("decel", ""), ("initaccel", ""), ("stopdecel", ""), ("up", ""), ("P_resist", ""), ("P_inert", ""), ("P_req", ""), ("n", "g1"), ("n", "g2"), ("n", "g3"), ("n", "g4"), ("n", "g5"), ("n", "g6"), ("n_norm", "g1"), ("n_norm", "g2"), ("n_norm", "g3"), ("n_norm", "g4"), ("n_norm", "g5"), ("n_norm", "g6"), ("p", "g1"), ("p", "g2"), ("p", "g3"), ("p", "g4"), ("p", "g5"), ("p", "g6"), ("p_avail", "g1"), ("p_avail", "g2"), ("p_avail", "g3"), ("p_avail", "g4"), ("p_avail", "g5"), ("p_avail", "g6"), ("p_avail_stable", "g1"), ("p_avail_stable", "g2"), ("p_avail_stable", "g3"), ("p_avail_stable", "g4"), ("p_avail_stable", "g5"), ("p_avail_stable", "g6"), ("p_norm", "g1"), ("p_norm", "g2"), ("p_norm", "g3"), ("p_norm", "g4"), ("p_norm", "g5"), ("p_norm", "g6"), ("P_remain", "g1"), ("P_remain", "g2"), ("P_remain", "g3"), ("P_remain", "g4"), ("P_remain", "g5"), ("P_remain", "g6"), ("ok_p", "g3"), ("ok_p", "g4"), ("ok_p", "g5"), ("ok_p", "g6"), ("ok_max_n", "g1"), ("ok_max_n", "g2"), ("ok_max_n", "g3"), ("ok_max_n", "g4"), ("ok_max_n", "g5"), ("ok_max_n", "g6"), ("ok_gear0", "g0"), ("ok_min_n_g1", "g1"), ("ok_min_n_g1_initaccel", "g1"), ("ok_min_n_g2", "g2"), ("ok_min_n_g2_stopdecel", "g2"), ("ok_min_n_g3plus_dns", "g3"), ("ok_min_n_g3plus_dns", "g4"), ("ok_min_n_g3plus_dns", "g5"), ("ok_min_n_g3plus_dns", "g6"), ("ok_min_n_g3plus_ups", "g3"), ("ok_min_n_g3plus_ups", "g4"), ("ok_min_n_g3plus_ups", "g5"), ("ok_min_n_g3plus_ups", "g6"), ("ok_n", "g1"), ("ok_n", "g2"), ("ok_n", "g3"), ("ok_n", "g4"), ("ok_n", "g5"), ("ok_n", "g6"), ("incrementing_gflags", "g0"), ("incrementing_gflags", "g1"), ("incrementing_gflags", "g2"), ("incrementing_gflags", "g3"), ("incrementing_gflags", "g4"), ("incrementing_gflags", "g5"), ("incrementing_gflags", "g6"), ("ok_gear", "g0"), ("ok_gear", "g1"), ("ok_gear", "g2"), ("ok_gear", "g3"), ("ok_gear", "g4"), ("ok_gear", "g5"), ("ok_gear", "g6"), ("g_min", ""), ("g_max0", ""), ] print(cycle.columns) # assert set(cycle.columns) == set(exp) assert set(cycle.rename(columns=renames, level=0)) == set(exp) assert not ({ "class_phase_boundaries", "n2v_g_vmax", "n95_low", "n95_high", "n_max_cycle", "n_max_vehicle", "n_max", } - sol.keys()) steps = [getattr(n, "name", n) for n in sol.plan.steps] steps_executed = [getattr(n, "name", n) for n in sol.executed] print("\n".join(textwrap.wrap(" ".join(steps), 90))) # print("\n".join(textwrap.wrap(" ".join(steps_executed), 90))) exp_steps = """ get_wltc_class_data get_class_phase_boundaries PhaseMarker interpolate_wot_on_v_grid attach_p_avail_in_gwots calc_n2v_g_vmax calc_n95 calc_n_max_vehicle make_gwots_multi_indexer FAKE.V_dsc init_cycle_velocity calc_acceleration attach_class_phase_markers calc_phase_accel_raw calc_phase_run_stop calc_phase_decel calc_phase_initaccel calc_phase_stopdecel calc_phase_up calc_p_resist calc_inertial_power calc_required_power calc_n_max_cycle calc_n_max validate_n_max join_gwots_with_cycle calc_P_remain calc_OK_p calc_OK_max_n calc_OK_g0 calc_OK_min_n derrive_ok_n_flags calc_ok_gears make_cycle_multi_indexer make_incrementing_gflags make_G_min make_G_max0 """.split() assert steps == steps_executed == exp_steps
def test_cycler_pipeline(): # wltc_class): wltc_class = 0 aug = wio.make_autograph() ops = aug.wrap_funcs([ *pipelines.cycler_pipeline().ops, # fake Vs operation(None, "FAKE.V_dsc", "wltc_class_data/V_cycle", "V_dsc"), ]) pipe = compose(..., *ops) inp = { **goodvehicle.goodVehicle(), "wltc_data": datamodel.get_wltc_data(), "wltc_class": wltc_class, "v_max": 190.3, "g_vmax": 6, } datamodel.validate_model(inp) with config.evictions_skipped(True): sol = pipe.compute(inp) exp = { "V_cycle", "V_cycle", "V", "A", "v_phase1", "v_phase2", "v_phase3", "va_phases", "P_resist", "P_inert", "P_req", "t", ("n", "g1"), ("n", "g2"), ("n", "g3"), ("n", "g4"), ("n", "g5"), ("n", "g6"), ("n_norm", "g1"), ("n_norm", "g2"), ("n_norm", "g3"), ("n_norm", "g4"), ("n_norm", "g5"), ("n_norm", "g6"), ("p", "g1"), ("p", "g2"), ("p", "g3"), ("p", "g4"), ("p", "g5"), ("p", "g6"), ("p_avail", "g1"), ("p_avail", "g2"), ("p_avail", "g3"), ("p_avail", "g4"), ("p_avail", "g5"), ("p_avail", "g6"), ("p_avail_stable", "g1"), ("p_avail_stable", "g2"), ("p_avail_stable", "g3"), ("p_avail_stable", "g4"), ("p_avail_stable", "g5"), ("p_avail_stable", "g6"), ("p_norm", "g1"), ("p_norm", "g2"), ("p_norm", "g3"), ("p_norm", "g4"), ("p_norm", "g5"), ("p_norm", "g6"), ("p_resist", ""), } assert set(sol["cycle"].columns) == exp assert not ({ "class_phase_boundaries", "n2v_g_vmax", "n_95_low", "n_95_high", "n_max_cycle", "n_max_vehicle", "n_max", } - sol.keys()) steps = [getattr(n, "name", n) for n in sol.plan.steps] steps_executed = [getattr(n, "name", n) for n in sol.executed] print(steps, steps_executed) exp_steps = """ get_wltc_class_data get_forced_cycle get_class_phase_boundaries interpolate_wot_on_v_grid attach_p_avail_in_gwots attach_p_resist_in_gwots calc_n2v_g_vmax calc_n_95 calc_n_max_vehicle make_gwots_multi_indexer FAKE.V_dsc init_cycle_velocity calc_acceleration attach_class_v_phase_markers calc_class_va_phase_markers calc_p_resist calc_inertial_power calc_required_power calc_n_max_cycle calc_n_max attach_wots """.split() assert steps == steps_executed == exp_steps
2750 89.70 3000 93.00 3250 94.15 3500 95.30 3750 95.00 4000 94.70 4250 92.15 4500 89.60 4750 86.95 5000 84.30 """), sep="\t", header=0, ) datamodel.validate_model(mdl, additional_properties=True) exp = Experiment(mdl, skip_model_validation=True) mdl = exp.run() oprops = { "pmr": mdl["pmr"], "n95_low": mdl["n95_low"], "n95_high": mdl["n95_high"], "v_max": mdl["v_max"], "n_vmax": mdl["n_vmax"], "g_vmax": mdl["g_vmax"], "n_max1": mdl["n_max1"], "n_max2": mdl["n_max2"], "n_max3": mdl["n_max3"], "n_max": mdl["n_max"],