Example #1
0
def _summary2df(data):
    res = []
    summary = data.get('summary', {})

    if 'results' in summary:
        r = {}
        index = ['cycle', 'stage', 'usage']

        for k, v in sh.stack_nested_keys(summary['results'], depth=4):
            l = sh.get_nested_dicts(r, k[0], default=list)
            l.append(sh.combine_dicts(sh.map_list(index, *k[1:]), v))

        if r:
            df = _dd2df(
                r, index=index, depth=2,
                col_key=functools.partial(_sort_key, p_keys=('param',) * 2),
                row_key=functools.partial(_sort_key, p_keys=index)
            )
            import pandas as pd
            df.columns = pd.MultiIndex.from_tuples(_add_units(df.columns))
            setattr(df, 'name', 'results')
            res.append(df)

    if 'selection' in summary:
        df = _dd2df(
            summary['selection'], ['model_id'], depth=2,
            col_key=functools.partial(_sort_key, p_keys=('stage', 'cycle')),
            row_key=functools.partial(_sort_key, p_keys=())
        )
        setattr(df, 'name', 'selection')
        res.append(df)

    if 'comparison' in summary:
        r = {}
        for k, v in sh.stack_nested_keys(summary['comparison'], depth=3):
            v = sh.combine_dicts(v, base={'param_id': k[-1]})
            sh.get_nested_dicts(r, *k[:-1], default=list).append(v)
        if r:
            df = _dd2df(
                r, ['param_id'], depth=2,
                col_key=functools.partial(_sort_key, p_keys=('stage', 'cycle')),
                row_key=functools.partial(_sort_key, p_keys=())
            )
            setattr(df, 'name', 'comparison')
            res.append(df)

    if res:
        return {'summary': res}
    return {}
Example #2
0
def _selector(name, data_in, data_out, setting):
    d = sh.Dispatcher(name='%s selector' % name,
                      description='Select the calibrated %s.' % name)

    errors, setting = [], setting or {}
    _sort_models = setting.pop('sort_models', sort_models)

    if 'weights' in setting:
        _weights = sh.map_list(setting.get('names', setting['targets']),
                               *setting.pop('weights'))
    else:
        _weights = None

    _get_best_model = functools.partial(setting.pop('get_best_model',
                                                    get_best_model),
                                        models_wo_err=setting.pop(
                                            'models_wo_err', None),
                                        selector_id=d.name)

    d.add_data(data_id='selector_settings', default_value={})

    node_ids = ['error_settings', 'best_model_settings']

    d.add_function(function=functools.partial(define_selector_settings,
                                              node_ids=node_ids),
                   inputs=['selector_settings'],
                   outputs=node_ids)

    for i in data_in:
        e = 'error/%s' % i

        errors.append(e)

        d.add_function(function=_errors(name, i, data_out, setting),
                       inputs=['error_settings', i] +
                       [k for k in data_out if k != i],
                       outputs=[e])

    d.add_function(function_id='sort_models',
                   function=functools.partial(_sort_models, weights=_weights),
                   inputs=errors,
                   outputs=['rank'])

    d.add_function(function_id='get_best_model',
                   function=_get_best_model,
                   inputs=['rank', 'best_model_settings'],
                   outputs=['model', 'errors'])

    return sh.SubDispatch(d, outputs=['model', 'errors'], output_type='list')
Example #3
0
def _format_scores(scores):
    res = {}
    for k, j in sh.stack_nested_keys(scores, depth=3):
        if k[-1] in ('limits', 'errors'):
            model_id = k[0]
            extra_field = ('score', ) if k[-1] == 'errors' else ()
            for i, v in sh.stack_nested_keys(j):
                i = (
                    model_id,
                    i[-1],
                    k[1],
                ) + i[:-1] + extra_field
                sh.get_nested_dicts(res, *i, default=co2_utl.ret_v(v))
    sco = {}
    for k, v in sorted(sh.stack_nested_keys(res, depth=4)):
        v.update(sh.map_list(['model_id', 'param_id'], *k[:2]))
        sh.get_nested_dicts(sco, *k[2:], default=list).append(v)
    return sco
Example #4
0
def _extract_summary_from_output(report, extracted):
    for k, v in sh.stack_nested_keys(report.get('output', {}), depth=2):
        k = k[::-1]
        for u, i, j in _param_names_values(v.get('pa', {})):
            o = {}
            if i == 'co2_params_calibrated':
                o = _format_dict(j.valuesdict().items(), 'co2_params %s')
            elif i == 'calibration_status':
                o = _format_dict(enumerate(j), 'status co2_params step %d',
                                 lambda x: x[0])
            elif i == 'willans_factors':
                o = j
            elif i == 'phases_willans_factors':
                for n, m in enumerate(j):
                    o.update(_format_dict(m.items(), '%s phase {}'.format(n)))
            elif i == 'co2_rescaling_scores':
                o = sh.map_list(
                    ['rescaling_mean', 'rescaling_std', 'rescaling_n'], *j)
            elif i in ('has_sufficient_power', ):
                o = {i: j}

            if o:
                sh.get_nested_dicts(extracted, *(k + (u, ))).update(o)
Example #5
0
    'idle_engine_speed', 'full_load_curve', 'accelerations', 'motive_powers',
    'engine_speeds_out', 'engine_coolant_temperatures', 'plateau_acceleration',
    'time_cold_hot_transition', 'times', 'stop_velocity', 'cycle_type',
    'use_dt_gear_shifting', 'specific_gear_shifting', 'velocity_speed_ratios',
    'velocities', 'MVL', 'fuel_saving_at_strategy', 'change_gear_window_width',
    'max_velocity_full_load_correction'
]

#: Relevant outputs of the model.
outputs = ['gears']

#: Targets to compare the outputs of the model.
targets = outputs

#: Weights coefficients to compute the model score.
weights = sh.map_list(targets, -1)

#: Metrics to compare outputs with targets.
metrics = {'gears': _accuracy_score}


def _correlation_coefficient(t, o):
    import numpy as np
    with np.errstate(divide='ignore', invalid='ignore'):
        return np.corrcoef(t, o)[0, 1] if t.size > 1 else np.nan


def calculate_error_coefficients(identified_gears, gears, engine_speeds,
                                 predicted_engine_speeds, velocities,
                                 stop_velocity):
    """
Example #6
0
models = [
    'co2_params_calibrated', 'calibration_status', 'initial_friction_params',
    'engine_idle_fuel_consumption', 'kco2_wltp_correction_factor'
]

#: Inputs required to run the model.
inputs = ['co2_emissions_model']

#: Relevant outputs of the model.
outputs = ['co2_emissions', 'calibration_status']

#: Targets to compare the outputs of the model.
targets = ['identified_co2_emissions', 'calibration_status']

#: Weights coefficients to compute the model score.
weights = sh.map_list(targets, 1, None)


# noinspection PyUnusedLocal
def metric_calibration_status(y_true, y_pred):
    """
    Metric for the `calibration_status`.

    :param y_true:
        Reference calibration_status.
    :type y_true: list

    :param y_pred:
        Predicted calibration_status.
    :type y_pred: list
Example #7
0
 def test_map_list(self):
     key_map = ['a', {'a': 'c'}, ['a', {'a': 'd'}]]
     inputs = (2, {'a': 3, 'b': 2}, [1, {'a': 4}])
     res = sh.map_list(key_map, *inputs)
     self.assertEqual(res, {'a': 1, 'b': 2, 'c': 3, 'd': 4})
Example #8
0
        Phases when engine speed is affected by the after treatment warm up [-].
    :type after_treatment_warm_up_phases: numpy.array

    :return:
        Error.
    :rtype: float
    """
    from co2mpas.utils import mae
    b = ~calculate_clutch_phases(times, 1, 1, gear_shifts, 0, (-4.0, 4.0))
    b &= (velocities > stop_velocity) & ~after_treatment_warm_up_phases
    b &= on_engine & (hybrid_modes == 1)
    return b.any() and float(mae(y_true[b], y_pred[b])) or .0


#: Metrics to compare outputs with targets.
metrics = sh.map_list(targets, metric_engine_speed_model)

#: Upper score limits to raise the warnings.
up_limit = sh.map_list(targets, 40)


def select_models(keys, data):
    """
    Select models from data.

    :param keys:
        Model keys.
    :type keys: list

    :param data:
        Cycle data.
Example #9
0
 def test_map_list(self):
     key_map = ['a', {'a': 'c'}, ['a', {'a': 'd'}]]
     inputs = (2, {'a': 3, 'b': 2}, [1, {'a': 4}])
     res = sh.map_list(key_map, *inputs)
     self.assertEqual(res, {'a': 1, 'b': 2, 'c': 3, 'd': 4})
Example #10
0
def _error(name, setting):
    d = sh.Dispatcher(
        name=name,
        description='Calculates the error of calibrated model of a reference.',
    )

    default_settings = {
        'inputs_map': {},
        'targets': [],
        'metrics_inputs': {},
        'up_limit': None,
        'dn_limit': None
    }

    default_settings.update(setting)
    default_settings['names'] = default_settings.get(
        'names', default_settings['targets'])

    it = sh.selector(['up_limit', 'dn_limit'], default_settings).items()

    for k, v in it:
        if v is not None:
            default_settings[k] = sh.map_list(setting['names'], *v)

    d.add_function(function_id='select_inputs',
                   function=sh.map_dict,
                   inputs=['inputs_map', 'data'],
                   outputs=['inputs<0>'])

    d.add_function(function_id='select_inputs',
                   function=functools.partial(sh.selector, allow_miss=True),
                   inputs=['inputs', 'inputs<0>'],
                   outputs=['inputs<1>'])

    d.add_function(function=sh.combine_dicts,
                   inputs=['calibrated_models', 'inputs<1>'],
                   outputs=['prediction_inputs'])

    d.add_function(function=select_targets,
                   inputs=['names', 'targets', 'data'],
                   outputs=['references'])

    d.add_function(function=functools.partial(
        default_settings.pop('dsp', lambda x: x), {}),
                   inputs=['prediction_inputs', 'calibrated_models'],
                   outputs=['results'])

    d.add_function(function=select_outputs,
                   inputs=['names', 'outputs', 'targets', 'results'],
                   outputs=['predictions'])

    d.add_function(function_id='select_metrics_inputs',
                   function=functools.partial(sh.selector, allow_miss=True),
                   inputs=['metrics_inputs', 'data'],
                   outputs=['metrics_args'])

    d.add_function(
        function=make_metrics,
        inputs=['metrics', 'references', 'predictions', 'metrics_args'],
        outputs=['errors'])

    d.add_function(function=check_limits,
                   inputs=['errors', 'up_limit', 'dn_limit'],
                   outputs=['status'])

    for k, v in default_settings.items():
        d.add_data(k, v)

    func = sh.SubDispatch(dsp=d,
                          outputs=['errors', 'status'],
                          output_type='list')

    return func
    :param y_pred:
        Predicted engine speed vector [RPM].
    :type y_pred: numpy.array

    :param on_engine:
        If the engine is on [-].
    :type on_engine: numpy.array

    :return:
        Error.
    :rtype: float
    """
    from co2mpas.utils import mae
    return float(mae(y_true[on_engine], y_pred[on_engine]))


#: Metrics to compare outputs with targets.
metrics = sh.map_list(targets, metric_clutch_torque_converter_model)

#: Upper score limits to raise the warnings.
up_limit = sh.map_list(targets, 100)

#: Prediction model.
dsp = copy.deepcopy(_clutch_tc).add_function(
    function=calculate_engine_speeds_out,
    inputs=[
        'on_engine', 'idle_engine_speed', 'engine_speeds_out_hot',
        'clutch_tc_speeds_delta'
    ],
    outputs=['engine_speeds_out'])
Example #12
0
#: Inputs required to run the model.
inputs = [
    'drive_battery_electric_powers', 'times', 'motive_powers', 'accelerations',
    'on_engine', 'starter_currents', 'initial_service_battery_state_of_charge'
]

#: Relevant outputs of the model.
outputs = [
    'alternator_currents', 'service_battery_currents',
    'drive_battery_currents', 'dcdc_converter_currents',
    'service_battery_state_of_charges', 'drive_battery_state_of_charges'
]
#: Targets to compare the outputs of the model.
targets = outputs

#: Weights coefficients to compute the model score.
weights = sh.map_list(targets, 1, 1, 1, 1, 0, 0)

#: Metrics to compare outputs with targets.
metrics = sh.map_list(targets, *([co2_utl.mae] * 6))

#: Upper score limits to raise the warnings.
up_limit = dict.fromkeys(('alternator_currents', 'service_battery_currents',
                          'drive_battery_currents', 'dcdc_converter_currents'),
                         60)

#: Prediction model.
# noinspection PyProtectedMember
dsp = sh.Blueprint(_electrics, inputs, outputs,
                   models)._set_cls(define_sub_model)
Example #13
0
    'after_treatment_cooling_duration',
    'idle_engine_speed',
    'full_load_curve',
    'after_treatment_warm_up_duration',
    'motor_p2_planetary_speed_ratio',
    'motor_p2_planetary_maximum_power_function',
    'final_drive_speeds_in',
    'motor_p4_front_maximum_powers',
    'motive_powers',
]

#: Relevant outputs of the model.
outputs = ['on_engine', 'engine_starts']

#: Targets to compare the outputs of the model.
targets = outputs

#: Weights coefficients to compute the model score.
weights = sh.map_list(targets, -1, -1)

#: Metrics to compare outputs with targets.
metrics = sh.map_list(targets, *([_accuracy_score] * 2))

#: Bottom score limits to raise the warnings.
dn_limit = sh.map_list(targets, 0.7, 0.7)

#: Prediction model.
# noinspection PyProtectedMember
dsp = sh.Blueprint(_physical, inputs, outputs,
                   models)._set_cls(define_sub_model)
#: Model name.
name = 'engine_coolant_temperature_model'

#: Parameters that constitute the model.
models = [
    'engine_temperature_regression_model', 'max_engine_coolant_temperature',
    'engine_thermostat_temperature'
]

#: Inputs required to run the model.
inputs = [
    'times', 'on_engine', 'velocities', 'engine_speeds_out',
    'accelerations', 'initial_engine_temperature'
]

#: Relevant outputs of the model.
outputs = ['engine_coolant_temperatures']

#: Targets to compare the outputs of the model.
targets = outputs

#: Metrics to compare outputs with targets.
metrics = sh.map_list(targets, co2_utl.mae)

#: Upper score limits to raise the warnings.
up_limit = sh.map_list(targets, 4)

#: Prediction model.
# noinspection PyProtectedMember
dsp = sh.Blueprint(_thermal, inputs, outputs, models)._set_cls(define_sub_model)
    :param y_pred:
        Predicted engine speed vector [RPM].
    :type y_pred: numpy.array

    :param after_treatment_warm_up_phases:
        Phases when engine speed is affected by the after treatment warm up [-].
    :type after_treatment_warm_up_phases: numpy.array

    :return:
        Error.
    :rtype: float
    """
    b = after_treatment_warm_up_phases
    if b.any():
        from co2mpas.utils import mae
        return float(mae(y_true[b], y_pred[b]))
    else:
        return 0


#: Metrics to compare outputs with targets.
metrics = sh.map_list(targets, metric_after_treatment_speed_model)

#: Upper score limits to raise the warnings.
up_limit = sh.map_list(targets, 160)

#: Prediction model.
# noinspection PyProtectedMember
dsp = sh.Blueprint(_after_treat, inputs, outputs,
                   models)._set_cls(define_sub_model)