def compile(self, inputs, outputs): dsp = self.dsp.shrink_dsp(outputs=outputs) dsp.default_values = sh.selector( set(dsp.default_values) - set(inputs), dsp.default_values) res = dsp() dsp = dsp.get_sub_dsp_from_workflow(outputs, graph=dsp.dmap, reverse=True, blockers=res, wildcard=False) for k, v in sh.selector(dsp.data_nodes, res, allow_miss=True).items(): try: dsp.set_default_value(k, v.value) except AttributeError: # Circular token does not have v.value (it is an XlError) pass #dsp = self.dsp func = self.compile_class( dsp=dsp, function_id=self.dsp.name, inputs=inputs, outputs=outputs, ) return func
def compile(self, inputs, outputs): dsp = self.dsp.shrink_dsp(outputs=outputs) dsp.default_values = sh.selector( set(dsp.default_values) - set(inputs), dsp.default_values ) res = dsp() dsp = dsp.get_sub_dsp_from_workflow( outputs, graph=dsp.dmap, reverse=True, blockers=res, wildcard=False ) keys = set(dsp.data_nodes) - set(dsp.default_values) for k, v in sh.selector(keys, res, allow_miss=True).items(): dsp.set_default_value(k, v.value) func = self.compile_class( dsp=dsp, function_id=self.dsp.name, inputs=inputs, outputs=outputs ) return func
def split_prediction_models(scores, models, default_models): """ Split prediction models. :param scores: Models score. :type scores: dict :param models: Calibrated models. :type models: dict :param default_models: Default calibrated models. :type default_models: dict :return: Scores and prediction models. :rtype: tuple """ sbm, model_sel, par = {}, {}, {} for (k, c), v in sh.stack_nested_keys(scores, depth=2): r = sh.selector(['models'], v, allow_miss=True) for m in r.get('models', ()): sh.get_nested_dicts(par, m, 'calibration')[c] = c r.update(v.get('score', {})) sh.get_nested_dicts(sbm, k)[c] = r r = sh.selector(['success'], r, allow_miss=True) r = sh.map_dict({'success': 'status'}, r, {'from': c}) sh.get_nested_dicts(model_sel, k, 'calibration')[c] = r p = {i: dict.fromkeys(default_models, 'input') for i in prediction_cycles} mdls = {i: default_models.copy() for i in prediction_cycles} for k, n in sorted(models.items()): d = n.get(sh.NONE, (None, True, {})) for i in prediction_cycles: c, s, m = n.get(i, d) if m: s = {'from': c, 'status': s} sh.get_nested_dicts(model_sel, k, 'prediction')[i] = s mdls[i].update(m) p[i].update(dict.fromkeys(m, c)) for k, v in sh.stack_nested_keys(p, ('prediction', ), depth=2): sh.get_nested_dicts(par, k[-1], *k[:-2])[k[-2]] = v s = { 'param_selections': par, 'model_selections': model_sel, 'score_by_model': sbm, 'scores': scores } return (s, ) + tuple(mdls.get(k, {}) for k in prediction_cycles)
def at_models_selector(d, at_pred_inputs, models_ids, data): sgs = 'specific_gear_shifting' # Namespace shortcuts. try: vel, vsr = data['velocities'], data['velocity_speed_ratios'] t_eng, t_gears = data['engine_speeds_out'], data['gears'] sv, at_m = data['stop_velocity'], data[sgs] except KeyError: return {} t_e = ('mean_absolute_error', 'accuracy_score', 'correlation_coefficient') # at_models to be assessed. at_m = {'CMV', 'CMV_Cold_Hot', 'DTGS', 'GSPV', 'GSPV_Cold_Hot' } if at_m == 'ALL' else {at_m} # Other models to be taken from calibration output. models = sh.selector(set(models_ids) - at_m, data, allow_miss=True) # Inputs to predict the gears. inputs = sh.selector(at_pred_inputs, data, allow_miss=True) from ..physical.gear_box.at_gear import calculate_error_coefficients from ..physical.gear_box.mechanical import calculate_gear_box_speeds_in def _err(model_id, model): gears = d.dispatch(inputs=sh.combine_dicts(inputs, { sgs: model_id, model_id: model }), outputs=['gears'])['gears'] eng = calculate_gear_box_speeds_in(gears, vel, vsr, sv) err = calculate_error_coefficients(t_gears, gears, t_eng, eng, vel, sv) return err def _sort(v): e = sh.selector(t_e, v[0], output_type='list') return (e[0], -e[1], -e[2]), v[1] # Sort by error. rank = sorted( ((_err(k, m), k, m) for k, m in sorted(sh.selector(at_m, data, allow_miss=True).items())), key=_sort) if rank: data['at_scores'] = collections.OrderedDict((k, e) for e, k, m in rank) e, k, m = rank[0] models[sgs], models[k] = k, m log.debug( 'at_gear_shifting_model: %s with mean_absolute_error %.3f ' '[RPM], accuracy_score %.3f, and correlation_coefficient ' '%.3f.', k, *sh.selector(t_e, e, output_type='list')) return models
def test_excel_model_compile(self): xl_model = ExcelModel() xl_model.loads(self.filename_compile) xl_model.finish() inputs = ["A%d" % i for i in range(2, 5)] outputs = ["C%d" % i for i in range(2, 5)] func = xl_model.compile( ["'[EXCEL.XLSX]DATA'!%s" % i for i in inputs], ["'[EXCEL.XLSX]DATA'!%s" % i for i in outputs] ) i = sh.selector(inputs, self.results_compile, output_type='list') res = sh.selector(outputs, self.results_compile, output_type='list') self.assertEqual([x.value[0, 0] for x in func(*i)], res)
def _get_values(data, keys, tag=(), update=lambda k, v: v, base=None): k = ('input', 'target', 'output') data = sh.selector(k, data, allow_miss=True) base = {} if base is None else base for k, v in sh.stack_nested_keys(data, depth=3): k = k[::-1] v = sh.selector(keys, v, allow_miss=True) v = update(k, v) if v: sh.get_nested_dicts(base, *tag, *k[:-1])[k[-1]] = v return base
def get_values(data, keys, tag=(), update=lambda k, v: v, base=None): k = ('input', 'target', 'output') data = sh.selector(k, data, allow_miss=True) base = {} if base is None else base for k, v in sh.stack_nested_keys(data, depth=3): k = k[::-1] v = sh.selector(keys, v, allow_miss=True) v = update(k, v) if v: k = tag + k sh.get_nested_dicts(base, *k, default=co2_utl.ret_v(v)) return base
def extract_dice_report(encrypt_inputs, vehicle_family_id, start_time, report): from co2mpas import version res = { 'info': { 'encrypt_inputs': encrypt_inputs, 'vehicle_family_id': vehicle_family_id, 'CO2MPAS_version': version, 'datetime': start_time.strftime('%Y/%m/%d-%H:%M:%S') } } # deviation keys = 'summary', 'comparison', 'prediction' if sh.are_in_nested_dicts(report, *keys): deviation = 'declared_co2_emission_value', 'prediction_target_ratio' for cycle, d in sh.get_nested_dicts(report, *keys).items(): if sh.are_in_nested_dicts(d, *deviation): v = (sh.get_nested_dicts(d, *deviation) - 1) * 100 sh.get_nested_dicts(res, 'deviation')[cycle] = v # vehicle keys = [('summary', 'results', 'vehicle'), ('prediction', 'output')] vehicle = 'fuel_type', 'engine_capacity', 'gear_box_type', 'engine_is_turbo' if sh.are_in_nested_dicts(report, *keys[0]): for cycle, d in sh.get_nested_dicts(report, *keys[0]).items(): if sh.are_in_nested_dicts(d, *keys[1]): v = sh.selector(vehicle, sh.get_nested_dicts(d, *keys[1]), allow_miss=True) if v: sh.get_nested_dicts(res, 'vehicle', cycle).update(v) # model scores keys = 'data', 'calibration', 'model_scores' model_scores = 'model_selections', 'param_selections', 'score_by_model', \ 'scores' if sh.are_in_nested_dicts(report, *keys): sh.get_nested_dicts(res, 'model_scores').update( sh.selector(model_scores, sh.get_nested_dicts(report, *keys), allow_miss=True)) res = copy.deepcopy(res) for k, v in list(stack(res)): if isinstance(v, np.generic): sh.get_nested_dicts(res, *k[:-1])[k[-1]] = v.item() return res
def _calibrate_gsm( velocity_speed_ratios, on_engine, anomalies, gear, velocities, stop_velocity, idle_engine_speed): # noinspection PyProtectedMember from .at_gear.cmv import CMV, _filter_gear_shifting_velocity as filter_gs idle = idle_engine_speed[0] - idle_engine_speed[1] _vsr = sh.combine_dicts(velocity_speed_ratios, base={0: 0}) limits = { 0: {False: [0]}, 1: {True: [stop_velocity]}, max(_vsr): {True: [dfl.INF]} } shifts = np.unique(sum(map(_shift, (on_engine, anomalies)), [])) for i, j in sh.pairwise(shifts): if on_engine[i:j].all() and not anomalies[i:j].any(): for v in np.array(list(sh.pairwise(_shift(gear[i:j])))) + i: if j != v[1]: v, (g, ng) = velocities[slice(*v)], gear[[v[1] - 1, v[1]]] up = g < ng sh.get_nested_dicts(limits, g, up, default=list).append( v.max() if up else v.min() ) for k, v in list(limits.items()): limits[k] = v.get(False, [_vsr[k] * idle] * 2), v.get(True, []) d = {j: i for i, j in enumerate(sorted(limits))} gsm = CMV(filter_gs(sh.map_dict(d, limits), stop_velocity)) gsm.velocity_speed_ratios = sh.selector(gsm, sh.map_dict(d, _vsr)) gsm.convert(_vsr) return gsm
def parse_cmd_flags(cmd_flags=None): """ Parses the command line options. :param cmd_flags: Command line options. :type cmd_flags: dict :return: Default parameters of process model. :rtype: tuple """ flags = sh.combine_dicts(cmd_flags or {}, base={ 'only_summary': False, 'hard_validation': False, 'declaration_mode': False, 'enable_selector': False, 'type_approval_mode': False, 'encryption_keys': None, 'sign_key': None, 'output_template': sh.NONE, 'encryption_keys_passwords': None, 'output_folder': './outputs', 'augmented_summary': False }) flags['declaration_mode'] |= flags['type_approval_mode'] flags['hard_validation'] |= flags['declaration_mode'] if flags['declaration_mode'] and not flags['type_approval_mode'] and \ flags['enable_selector']: log.info('Since CO2MPAS is launched in declaration mode the option ' '--enable-selector is not used.\n' 'If you want to use it remove -DM from the cmd.') flags['enable_selector'] = False return sh.selector(_cmd_flags, flags, output_type='list')
def parse_solution(solution): """ Parse the CO2MPAS model solution. :param solution: CO2MPAS model solution. :type solution: schedula.Solution :return: CO2MPAS outputs. :rtype: dict[dict] """ res = {} for k, v in solution.items(): k = k.split('.') sh.get_nested_dicts(res, *k[:-1])[k[-1]] = v for k, v in list(sh.stack_nested_keys(res, depth=3)): n, k = k[:-1], k[-1] if n == ('output', 'calibration') and k in ('wltp_l', 'wltp_h'): v = sh.selector(('co2_emission_value', ), v, allow_miss=True) if v: d = sh.get_nested_dicts(res, 'target', 'prediction') d[k] = sh.combine_dicts(v, d.get(k, {})) res['pipe'] = solution.pipe return res
def template(output_file, cycle_type, gear_box_type, wltp_class): """ Writes a sample template OUTPUT_FILE. OUTPUT_FILE: SYNCING input template file (.xlsx). [default: ./datasync.xlsx] """ import pandas as pd from co2mpas.core.model.physical.cycle import dsp theoretical = sh.selector(['times', 'velocities'], dsp(inputs=dict(cycle_type=cycle_type.upper(), gear_box_type=gear_box_type, wltp_class=wltp_class, downscale_factor=0), outputs=['times', 'velocities'], shrink=True)) base = dict.fromkeys( ('times', 'velocities', 'target gears', 'engine_speeds_out', 'engine_coolant_temperatures', 'co2_normalization_references', 'alternator_currents', 'battery_currents', 'target fuel_consumptions', 'target co2_emissions', 'target engine_powers_out'), []) data = dict(theoretical=theoretical, dyno=base, obd=base) os.makedirs(osp.dirname(output_file), exist_ok=True) with pd.ExcelWriter(output_file) as writer: for k, v in data.items(): pd.DataFrame(v).to_excel(writer, k, index=False) return data
def parse_dsp_solution(solution): """ Parses the co2mpas model results. :param solution: Co2mpas model after dispatching. :type solution: schedula.Solution :return: Mapped outputs. :rtype: dict[dict] """ res = {} for k, v in solution.items(): sh.get_nested_dicts(res, *k.split('.'), default=co2_utl.ret_v(v)) for k, v in list(sh.stack_nested_keys(res, depth=3)): n, k = k[:-1], k[-1] if n == ('output', 'calibration') and k in ('wltp_l', 'wltp_h'): v = sh.selector(('co2_emission_value', ), v, allow_miss=True) if v: d = sh.get_nested_dicts(res, 'target', 'prediction') d[k] = sh.combine_dicts(v, d.get(k, {})) res['pipe'] = solution.pipe return res
def save_json(output_fpath, outputs): """ Save dsp outputs in an JSON file. :param output_fpath: Output file path. :type output_fpath: str :param outputs: Model outputs. :type outputs: dict :return: File path where output are written. :rtype: str """ import json os.makedirs(osp.dirname(output_fpath) or '.', exist_ok=True) with open(output_fpath, 'w') as file: json.dump(sh.selector(('shifts', 'resampled'), outputs, allow_miss=True), file, default=_json_default) return output_fpath
def calculate_shifts(labels, reference_name, data): """ Calculates the shifts from the reference data-set. :param labels: Reference-labels (i.e., "x", "y") for each data-set. It is like `{"<set-name>": {"x": "<x-label>", "y": "<y-label>"}, ...}`. :type labels: collections.defaultdict :param reference_name: Reference data-set name. :type reference_name: str :param data: Data-sets. :type data: dict[str, dict[str, numpy.array]] :return: Shifts from the reference data-set. :rtype: dict[str, float] """ data = {k: _get(labels, k, v, 'x', 'y') for k, v in data.items()} keys = [k for k in data if k != reference_name] args = sh.selector([reference_name] + keys, data, output_type='list') return sh.map_list(keys, *_compute_shifts(*args))
def xsingle(cell, rng): if len(rng.ranges) == 1 and not rng.is_set and rng.value.shape[1] == 1: rng = rng & Ranges((sh.combine_dicts( rng.ranges[0], sh.selector(('r1', 'r2'), cell.ranges[0])), )) if rng.ranges: return rng return Error.errors['#VALUE!']
def select_prediction_data(data, *new_data): """ Selects the data required to predict the CO2 emissions with CO2MPAS model. :param data: Output data. :type data: dict :param new_data: New data. :type new_data: dict :return: Data required to predict the CO2 emissions with CO2MPAS model. :rtype: dict """ ids = _prediction_data from .physical.defaults import dfl if not dfl.functions.select_prediction_data.theoretical: ids = ids + _prediction_data_ts data = sh.selector(ids, data, allow_miss=True) if new_data: new_data = sh.combine_dicts(*new_data) data = sh.combine_dicts(data, new_data) if 'gears' in data and 'gears' not in new_data: if data.get('gear_box_type', 0) == 'automatic' or \ len(data.get('velocities', ())) != len(data['gears']): data.pop('gears') return data
def _run_variations(plan, bases, core_model, timestamp): for r in _ProgressBar(plan, _format_meter=_format_meter): sol, data = bases[r['base']], r['data'] if 'solution' in sol: s = sol['solution'] base = _define_inputs(s, sh.combine_nested_dicts(sh.selector( data, s, allow_miss=True ), data)) elif 'base' in sol: base = sh.combine_nested_dicts(sol['base'], data, depth=2) else: continue for i, d in base.items(): if hasattr(d, 'items'): base[i] = {k: v for k, v in d.items() if v is not sh.EMPTY} sol = core_model(_define_inputs(sol, dict( base=base, vehicle_name='-'.join((str(r['id']), sol['vehicle_name'])), timestamp=timestamp ))) summary, keys = {}, { tuple(k.split('.')[:0:-1]) for k in base if k.startswith('output.') } for k, v in data.items(): k = ('plan %s' % k).split('.')[::-1] sh.get_nested_dicts(summary, *k).update(v) for k, v in sh.stack_nested_keys(sol['summary'], depth=3): if k[:-1] not in keys: sh.get_nested_dicts(summary, *k).update(v) sol['summary'] = summary yield sol
def _args(self, *args): assert len(args) == len(self.inputs) i = {} for links, v in zip(self.inputs.values(), args): for k in links: i[k] = (v + i[k]) if k in i else v return sh.selector(self.func.inputs, i, output_type='list')
def fromiter(gen, dtype, keys=None, count=-1): import schedula as sh a = np.fromiter(gen, dtype=dtype, count=count) _keys = a.dtype.names if _keys: return sh.selector(keys or _keys, a, output_type='list') return a
def test_selector(self): args = (['a', 'b'], {'a': 1, 'b': 2, 'c': 3}) self.assertEqual(sh.selector(*args), {'a': 1, 'b': 2}) args = (['a', 'b'], {'a': 1, 'b': object(), 'c': 3}) res = {'a': 1, 'b': args[1]['b']} self.assertEqual(sh.selector(*args), res) self.assertNotEqual(sh.selector(*args, copy=True), res) args = (['a', 'b'], {'a': 1, 'b': 2, 'c': 3}) self.assertSequenceEqual(sh.selector(*args, output_type='list'), (1, 2)) args = ['a', 'd'], {'a': 1, 'b': 1} self.assertEqual(sh.selector(*args, allow_miss=True), {'a': 1}) self.assertRaises(KeyError, sh.selector, *args, output_type='list')
def test_selector(self): args = (['a', 'b'], {'a': 1, 'b': 2, 'c': 3}) self.assertEqual(sh.selector(*args), {'a': 1, 'b': 2}) args = (['a', 'b'], {'a': 1, 'b': object(), 'c': 3}) res = {'a': 1, 'b': args[1]['b']} self.assertEqual(sh.selector(*args), res) if EXTRAS != 'micropython': self.assertNotEqual(sh.selector(*args, copy=True), res) args = (['a', 'b'], {'a': 1, 'b': 2, 'c': 3}) self.assertEqual(tuple(sh.selector(*args, output_type='list')), (1, 2)) args = ['a', 'd'], {'a': 1, 'b': 1} self.assertEqual(sh.selector(*args, allow_miss=True), {'a': 1}) self.assertRaises(KeyError, sh.selector, *args, output_type='list')
def test_files(self): mydir = osp.dirname(__file__) if SEATBELT_FILE and osp.isfile(SEATBELT_FILE): res_file = SEATBELT_FILE else: tmpdir = tempfile.gettempdir() res_file = osp.join(tmpdir, 'co2mpas_seatbelt_demos.dill') log.info( "\n OVERWRITE_SEATBELT: %s \n" " RUN_INPUT_FOLDER: %s \n" " RUN_ALL_FILES: %s \n" " SEATBELT_FILE: %s", OVERWRITE_SEATBELT, RUN_INPUT_FOLDER, RUN_ALL_FILES, res_file) if not OVERWRITE_SEATBELT and osp.isfile(res_file): old_results = sh.load_dispatcher(res_file) log.info("Old results loaded!") else: old_results = None path = RUN_INPUT_FOLDER or osp.join(mydir, '..', 'co2mpas', 'demos') file = (path if (RUN_ALL_FILES or RUN_INPUT_FOLDER) else osp.join( path, 'co2mpas_demo-0.xlsx')) model = vehicle_processing_model() results = [] inp_files = file_finder([file]) if not inp_files: raise AssertionError("DataCheck found no input-files in %r!" % file) for fpath in inp_files: fname = osp.splitext(osp.basename(fpath))[0] log.info('Processing: %s', fname) inputs = { 'input_file_name': fpath, 'variation': { 'flag.only_summary': True } } r = model.dispatch(inputs=inputs) r = sh.selector(['report', 'summary'], r['solution']) r.get('report', {}).pop('pipe', None) results.append(sorted(sh.stack_nested_keys(r))) if not OVERWRITE_SEATBELT and osp.isfile(res_file): log.info('Comparing...') self._check_results(results, old_results) else: os.environ["OVERWRITE_SEATBELT"] = '0' sh.save_dispatcher(results, res_file) log.info('Overwritten seat belt %r.', res_file)
def test_excel_model_compile(self): xl_model = ExcelModel().loads(self.filename_compile).finish() inputs = ["A%d" % i for i in range(2, 5)] outputs = ["C%d" % i for i in range(2, 5)] func = xl_model.compile(["'[excel.xlsx]DATA'!%s" % i for i in inputs], ["'[excel.xlsx]DATA'!%s" % i for i in outputs]) i = sh.selector(inputs, self.results_compile, output_type='list') res = sh.selector(outputs, self.results_compile, output_type='list') self.assertEqual([x.value[0, 0] for x in func(*i)], res) self.assertIsNot(xl_model, copy.deepcopy(xl_model)) self.assertIsNot(func, copy.deepcopy(func)) xl_model = ExcelModel().loads( self.filename_circular).finish(circular=1) func = xl_model.compile(["'[circular.xlsx]DATA'!A10"], ["'[circular.xlsx]DATA'!E10"]) self.assertEqual(func(False).value[0, 0], 2.0) self.assertIs(func(True).value[0, 0], ERR_CIRCULAR) self.assertIsNot(xl_model, copy.deepcopy(xl_model)) self.assertIsNot(func, copy.deepcopy(func))
def fast_range2parts(**kw): inputs = sh.selector(_keys, kw, allow_miss=True) for func in (fast_range2parts_v1, fast_range2parts_v2, fast_range2parts_v3): try: return sh.combine_dicts(kw, base=func(**inputs)) except TypeError: pass else: raise ValueError
def split_prediction_models(scores, calibrated_models, input_models, cycle_ids=()): sbm, model_sel, par = {}, {}, {} for (k, c), v in sh.stack_nested_keys(scores, depth=2): r = sh.selector(['models'], v, allow_miss=True) for m in r.get('models', ()): sh.get_nested_dicts(par, m, 'calibration')[c] = c r.update(v.get('score', {})) sh.get_nested_dicts(sbm, k, c, default=co2_utl.ret_v(r)) r = sh.selector(['success'], r, allow_miss=True) r = sh.map_dict({'success': 'status'}, r, {'from': c}) sh.get_nested_dicts(model_sel, k, 'calibration')[c] = r p = {i: dict.fromkeys(input_models, 'input') for i in cycle_ids} models = {i: input_models.copy() for i in cycle_ids} for k, n in sorted(calibrated_models.items()): d = n.get(sh.NONE, (None, True, {})) for i in cycle_ids: c, s, m = n.get(i, d) if m: s = {'from': c, 'status': s} sh.get_nested_dicts(model_sel, k, 'prediction')[i] = s models[i].update(m) p[i].update(dict.fromkeys(m, c)) for k, v in sh.stack_nested_keys(p, ('prediction', ), depth=2): sh.get_nested_dicts(par, k[-1], *k[:-1], default=co2_utl.ret_v(v)) s = { 'param_selections': par, 'model_selections': model_sel, 'score_by_model': sbm, 'scores': scores } return (s, ) + tuple(models.get(k, {}) for k in cycle_ids)
def _check_initial_temperature(data, *args): t = ('initial_temperature', 'engine_coolant_temperatures', 'engine_speeds_out', 'idle_engine_speed_median') try: a = sh.selector(t, data, output_type='list') if not check_initial_temperature(*a): msg = "Initial engine temperature outside permissible limits " \ "according to GTR!" return t, msg except KeyError: # `t` is not in `data`. pass
def _check_sign_currents(data, *args): c = ('battery_currents', 'alternator_currents') try: a = sh.selector(c, data, output_type='list') s = check_sign_currents(*a) if not all(s): s = ' and '.join([k for k, v in zip(c, s) if not v]) msg = "Probably '{}' have the wrong sign!".format(s) return c, msg except KeyError: # `c` is not in `data`. pass
def add(self, dsp): base = self.range.ranges[0] ctx = sh.selector(('sheet_id', ), base) for r, n in self.missing: ist = Ranges.format_range(('name', 'n1', 'n2'), n1=n, r1=r, **ctx) k = ist['name'] self.inputs[k] = _get_indices_intersection(base, ist) f = functools.partial(format_output, ist), dsp.add_data(k, [[sh.EMPTY]], filters=f) if list(self.inputs) != [self.output]: dsp.add_function(None, self, self.inputs or None, [self.output])
def _check_full_load(data, *args): s = ('idle_engine_speed_median', 'full_load_speeds') try: r = sh.selector(s, _get_engine_model(s)(data, s), output_type='list') except KeyError: return if r[0] < r[1][0]: msg = "You have not provided Full Load Curve values below %f RPMs. \n" \ "This may cause issues in the simulation. Please start from " \ "idle RPM (%f) or correct the " \ "`idle_engine_speed_median = full_load_speeds[0]`!" return s, msg % (r[1][0], r[0])
def _check_gear_box(data, *args): c = ('gear_box_type', 'is_hybrid') try: gear_box_type, is_hybrid = sh.selector(c, data, output_type='list') if gear_box_type == 'planetary' and not is_hybrid: msg = "`gear_box_type` cannot be 'planetary' when " \ "`is_hybrid = False`." \ "Hence, set `gear_box_type != 'planetary'` or " \ "set `is_hybrid = True`!" return c, msg except KeyError: # `c` is not in `data`. pass