Ejemplo n.º 1
0
def select_prediction_data(data, *new_data):
    """
    Selects the data required to predict the CO2 emissions with CO2MPAS model.

    :param data:
        Output data.
    :type data: dict

    :param new_data:
        New data.
    :type new_data: dict

    :return:
        Data required to predict the CO2 emissions with CO2MPAS model.
    :rtype: dict
    """

    ids = _prediction_data
    from .physical.defaults import dfl
    if not dfl.functions.select_prediction_data.theoretical:
        ids = ids + _prediction_data_ts

    data = sh.selector(ids, data, allow_miss=True)

    if new_data:
        new_data = sh.combine_dicts(*new_data)
        data = sh.combine_dicts(data, new_data)

    if 'gears' in data and 'gears' not in new_data:
        if data.get('gear_box_type', 0) == 'automatic' or \
                        len(data.get('velocities', ())) != len(data['gears']):
            data.pop('gears')

    return data
Ejemplo n.º 2
0
def _add2summary(total_summary, summary, base_keys=None):
    base_keys = base_keys or {}
    for k, v in sh.stack_nested_keys(summary, depth=3):
        d = sh.get_nested_dicts(total_summary, *k, default=list)
        if isinstance(v, list):
            for j in v:
                d.append(sh.combine_dicts(j, base_keys))
        else:
            d.append(sh.combine_dicts(v, base_keys))
Ejemplo n.º 3
0
def _summary2df(data):
    res = []
    summary = data.get('summary', {})

    if 'results' in summary:
        r = {}
        index = ['cycle', 'stage', 'usage']

        for k, v in sh.stack_nested_keys(summary['results'], depth=4):
            l = sh.get_nested_dicts(r, k[0], default=list)
            l.append(sh.combine_dicts(sh.map_list(index, *k[1:]), v))

        if r:
            df = _dd2df(r,
                        index=index,
                        depth=2,
                        col_key=functools.partial(_sort_key,
                                                  p_keys=('param', ) * 2),
                        row_key=functools.partial(_sort_key, p_keys=index))
            import pandas as pd
            df.columns = pd.MultiIndex.from_tuples(_add_units(df.columns))
            setattr(df, 'name', 'results')
            res.append(df)

    if 'selection' in summary:
        df = _dd2df(summary['selection'], ['model_id'],
                    depth=2,
                    col_key=functools.partial(_sort_key,
                                              p_keys=('stage', 'cycle')),
                    row_key=functools.partial(_sort_key, p_keys=()))
        setattr(df, 'name', 'selection')
        res.append(df)

    if 'comparison' in summary:
        r = {}
        for k, v in sh.stack_nested_keys(summary['comparison'], depth=3):
            v = sh.combine_dicts(v, base={'param_id': k[-1]})
            sh.get_nested_dicts(r, *k[:-1], default=list).append(v)
        if r:
            df = _dd2df(r, ['param_id'],
                        depth=2,
                        col_key=functools.partial(_sort_key,
                                                  p_keys=('stage', 'cycle')),
                        row_key=functools.partial(_sort_key, p_keys=()))
            setattr(df, 'name', 'comparison')
            res.append(df)

    if res:
        return {'summary': res}
    return {}
Ejemplo n.º 4
0
def _define_inputs(sol, inputs):
    kw = dict(
        sources=inputs, check_inputs=False, graph=sol.dsp.dmap,
        _update_links=False
    )
    keys = set(sol) - set(sol.dsp.get_sub_dsp_from_workflow(**kw).data_nodes)
    return sh.combine_dicts({k: sol[k] for k in keys}, inputs)
Ejemplo n.º 5
0
 def append(self, token):
     if isinstance(token, (Operator, Function)):
         try:
             tokens = [self.pop() for _ in range(token.get_n_args)][::-1]
         except IndexError:
             raise FormulaError()
         token.update_input_tokens(*tokens)
         inputs = [self.get_node_id(i) for i in tokens]
         token.set_expr(*tokens)
         out, dmap, get_id = token.node_id, self.dsp.dmap, get_unused_node_id
         if out not in self.dsp.nodes:
             func = token.compile()
             kw = dict(
                 function_id=get_id(dmap, token.name),
                 function=func,
                 inputs=inputs or None,
                 outputs=[out],
             )
             if isinstance(func, dict):
                 _inputs = func.get('extra_inputs', {})
                 for k, v in _inputs.items():
                     if v is not sh.NONE:
                         self.dsp.add_data(k, v)
                 kw = sh.combine_dicts(
                     {'inputs': (list(_inputs) + inputs) or None}, func,
                     base=kw
                 )
             self.dsp.add_function(**kw)
         else:
             self.nodes[token] = n_id = get_id(dmap, out, 'c%d>{}')
             self.dsp.add_function(None, sh.bypass, [out], [n_id])
     elif isinstance(token, Operand):
         self.missing_operands.add(token)
     self._deque.append(token)
Ejemplo n.º 6
0
    def add_sheet(self, worksheet, context):
        get_in = sh.get_nested_dicts
        if isinstance(worksheet, str):
            book = get_in(self.books, context['excel'], BOOK)
            worksheet = book[_get_name(worksheet, book.sheetnames)]

        context = sh.combine_dicts(context,
                                   base={'sheet': worksheet.title.upper()})

        d = get_in(self.books, context['excel'], SHEETS, context['sheet'])
        if 'formula_references' not in d:
            d['formula_references'] = formula_references = {
                k: v['ref']
                for k, v in worksheet.formula_attributes.items()
                if v.get('t') == 'array' and 'ref' in v
            }
        else:
            formula_references = d['formula_references']

        if 'formula_ranges' not in d:
            d['formula_ranges'] = {
                Ranges().push(ref, context=context)
                for ref in formula_references.values()
            }
        return worksheet, context
Ejemplo n.º 7
0
class DispatcherSphinxDirective(Graphviz):
    required_arguments = 1
    img_opt = {
        'height': directives.length_or_unitless,
        'width': directives.length_or_percentage_or_unitless,
    }
    option_spec = {
        'graphviz_dot': directives.unchanged,  # sphinx==1.3.5
        'index': bool_option
    }
    sh.combine_dicts(img_opt, Graphviz.option_spec, base=option_spec)

    def run(self):
        node = super(DispatcherSphinxDirective, self).run()[0]
        # noinspection PyUnresolvedReferences
        node = dsp(node.rawsource, *node.children, **node.attributes)
        node['img_opt'] = sh.selector(self.img_opt,
                                      self.options,
                                      allow_miss=True)
        node['index'] = self.options.get('index', False)
        env = self.state.document.settings.env
        argument = search_image_for_language(self.arguments[0], env)
        dirpath = osp.dirname(env.relfn2path(argument)[1])
        node['dirpath'] = dirpath if osp.isdir(dirpath) else None
        return [node]
Ejemplo n.º 8
0
def _calibrate_gsm(
        velocity_speed_ratios, on_engine, anomalies, gear, velocities,
        stop_velocity, idle_engine_speed):
    # noinspection PyProtectedMember
    from .at_gear.cmv import CMV, _filter_gear_shifting_velocity as filter_gs
    idle = idle_engine_speed[0] - idle_engine_speed[1]
    _vsr = sh.combine_dicts(velocity_speed_ratios, base={0: 0})

    limits = {
        0: {False: [0]},
        1: {True: [stop_velocity]},
        max(_vsr): {True: [dfl.INF]}
    }
    shifts = np.unique(sum(map(_shift, (on_engine, anomalies)), []))
    for i, j in sh.pairwise(shifts):
        if on_engine[i:j].all() and not anomalies[i:j].any():
            for v in np.array(list(sh.pairwise(_shift(gear[i:j])))) + i:
                if j != v[1]:
                    v, (g, ng) = velocities[slice(*v)], gear[[v[1] - 1, v[1]]]
                    up = g < ng
                    sh.get_nested_dicts(limits, g, up, default=list).append(
                        v.max() if up else v.min()
                    )

    for k, v in list(limits.items()):
        limits[k] = v.get(False, [_vsr[k] * idle] * 2), v.get(True, [])
    d = {j: i for i, j in enumerate(sorted(limits))}
    gsm = CMV(filter_gs(sh.map_dict(d, limits), stop_velocity))
    gsm.velocity_speed_ratios = sh.selector(gsm, sh.map_dict(d, _vsr))
    gsm.convert(_vsr)
    return gsm
Ejemplo n.º 9
0
def parse_cmd_flags(cmd_flags=None):
    """
    Parses the command line options.

    :param cmd_flags:
        Command line options.
    :type cmd_flags: dict

    :return:
        Default parameters of process model.
    :rtype: tuple
    """
    flags = sh.combine_dicts(cmd_flags or {},
                             base={
                                 'only_summary': False,
                                 'hard_validation': False,
                                 'declaration_mode': False,
                                 'enable_selector': False,
                                 'type_approval_mode': False,
                                 'encryption_keys': None,
                                 'sign_key': None,
                                 'output_template': sh.NONE,
                                 'encryption_keys_passwords': None,
                                 'output_folder': './outputs',
                                 'augmented_summary': False
                             })
    flags['declaration_mode'] |= flags['type_approval_mode']
    flags['hard_validation'] |= flags['declaration_mode']
    if flags['declaration_mode'] and not flags['type_approval_mode'] and \
            flags['enable_selector']:
        log.info('Since CO2MPAS is launched in declaration mode the option '
                 '--enable-selector is not used.\n'
                 'If you want to use it remove -DM from the cmd.')
        flags['enable_selector'] = False
    return sh.selector(_cmd_flags, flags, output_type='list')
Ejemplo n.º 10
0
def parse_solution(solution):
    """
    Parse the CO2MPAS model solution.

    :param solution:
        CO2MPAS model solution.
    :type solution: schedula.Solution

    :return:
        CO2MPAS outputs.
    :rtype: dict[dict]
    """

    res = {}
    for k, v in solution.items():
        k = k.split('.')
        sh.get_nested_dicts(res, *k[:-1])[k[-1]] = v

    for k, v in list(sh.stack_nested_keys(res, depth=3)):
        n, k = k[:-1], k[-1]
        if n == ('output', 'calibration') and k in ('wltp_l', 'wltp_h'):
            v = sh.selector(('co2_emission_value', ), v, allow_miss=True)
            if v:
                d = sh.get_nested_dicts(res, 'target', 'prediction')
                d[k] = sh.combine_dicts(v, d.get(k, {}))

    res['pipe'] = solution.pipe

    return res
Ejemplo n.º 11
0
def _eval(s, error=None, usersyms=None, **kwargs):
    error = error or 'cannot be eval!'
    from asteval import Interpreter
    usersyms = sh.combine_dicts(_usersyms, usersyms or {})
    return Or(And(str, Use(Interpreter(usersyms=usersyms).eval), s),
              s,
              error=error)
Ejemplo n.º 12
0
def xsingle(cell, rng):
    if len(rng.ranges) == 1 and not rng.is_set and rng.value.shape[1] == 1:
        rng = rng & Ranges((sh.combine_dicts(
            rng.ranges[0], sh.selector(('r1', 'r2'), cell.ranges[0])), ))
        if rng.ranges:
            return rng
    return Error.errors['#VALUE!']
Ejemplo n.º 13
0
def parse_dsp_solution(solution):
    """
    Parses the co2mpas model results.

    :param solution:
        Co2mpas model after dispatching.
    :type solution: schedula.Solution

    :return:
        Mapped outputs.
    :rtype: dict[dict]
    """

    res = {}
    for k, v in solution.items():
        sh.get_nested_dicts(res, *k.split('.'), default=co2_utl.ret_v(v))

    for k, v in list(sh.stack_nested_keys(res, depth=3)):
        n, k = k[:-1], k[-1]
        if n == ('output', 'calibration') and k in ('wltp_l', 'wltp_h'):
            v = sh.selector(('co2_emission_value', ), v, allow_miss=True)
            if v:
                d = sh.get_nested_dicts(res, 'target', 'prediction')
                d[k] = sh.combine_dicts(v, d.get(k, {}))

    res['pipe'] = solution.pipe

    return res
Ejemplo n.º 14
0
 def __and__(self, other):  # Intersection.
     r = []
     for rng in other.ranges:
         r.extend(
             _intersect(rng, self.ranges, format_range=self.format_range))
     values = sh.combine_dicts(self.values, other.values)
     is_set = self.is_set or other.is_set
     return Ranges(r, values, is_set, self.all_values and other.all_values)
Ejemplo n.º 15
0
 def get_range(format_range, ref, context=None):
     context = context or {}
     m = _re_range.match(ref).groupdict().items()
     m = {k: v for k, v in m if v is not None}
     if 'ref' in m:
         raise ValueError
     i = sh.combine_dicts(context, m)
     return dict(format_range(('name', 'n1', 'n2'), **i))
Ejemplo n.º 16
0
def write_to_excel(dfs, output_template):
    """
    Writes DataFrames to excel.

    :param dfs:
        DataFrames of vehicle output report.
    :type dfs: dict[str, pandas.DataFrame]

    :param output_template:
        Template output.
    :type output_template: str

    :return:
        Excel output file.
    :rtype: io.BytesIO
    """
    import pandas as pd
    log.debug('Writing into xl-file based on template(%s)...', output_template)
    writer, fd = _clone_excel(output_template)

    xlref, calculate_sheets, charts = [], sorted(writer.sheets), []
    for k, v in sorted(dfs.items(), key=_sort_sheets):
        if not k.startswith('graphs.'):
            down = True
            if k.endswith('pa'):
                kw = {'named_ranges': ('rows', ), 'index': True, 'k0': 1}
            elif k.endswith('ts'):
                kw = {'named_ranges': ('columns', ), 'index': False, 'k0': 1}
            elif k.endswith('proc_info'):
                down = False
                kw = {'named_ranges': ()}
            else:
                kw = {}

            xlref.extend(_write_sheets(writer, k, v, down=down, **kw))
        else:
            try:
                sheet = writer.book.add_worksheet(k)
            except AttributeError:
                sheet = writer.book.create_sheet(title=k)
            charts.append((sheet, v))

    for sheet, v in charts:
        _chart2excel(writer, sheet, v)

    if xlref:
        xlref = sorted(sh.combine_dicts(*[x[1] for x in xlref]).items())
        xlref = pd.DataFrame(xlref)
        xlref.set_index([0], inplace=True)
        _df2excel(writer,
                  'xlref',
                  xlref,
                  named_ranges=(),
                  index=True,
                  header=False)

    writer.save()
    return fd
Ejemplo n.º 17
0
    def _err(model_id, model):
        gears = func(inputs=sh.combine_dicts(inp, {
            sgs: model_id,
            model_id: model
        }),
                     outputs=['gears'])['gears']

        eng = calculate_gear_box_speeds_in(gears, vel, vsr, sv)
        return calculate_error_coefficients(t_gears, gears, t_eng, eng, vel,
                                            sv)
Ejemplo n.º 18
0
def fast_range2parts(**kw):
    inputs = sh.selector(_keys, kw, allow_miss=True)

    for func in (fast_range2parts_v1, fast_range2parts_v2, fast_range2parts_v3):
        try:
            return sh.combine_dicts(kw, base=func(**inputs))
        except TypeError:
            pass
    else:
        raise ValueError
Ejemplo n.º 19
0
 def __or__(self, other):  # Union.
     base = self.ranges
     for r0 in other.ranges:
         stack = [r0]
         for b in base:
             s = stack.copy()
             stack = []
             for r in s:
                 stack.extend(_split(b, r, format_range=self.format_range))
         base += tuple(stack)
     values = sh.combine_dicts(self.values, other.values)
     return Ranges(base, values, True, self.all_values and other.all_values)
Ejemplo n.º 20
0
 def process(self, match, context=None):
     d = super(Range, self).process(match)
     if len(d) <= 1 and 'indirect' not in d and 'ref' in d:
         try:
             from .function import Function
             if Function(self.source).name == d['ref']:
                 return {}
         except TokenError:
             pass
     if 'ref' in d:
         self.attr['is_reference'] = True
     return range2parts(None, **sh.combine_dicts(context or {}, d))
Ejemplo n.º 21
0
def write_to_excel(data, output_file_name, template_file_name):
    import pandas as pd
    if template_file_name:
        log.debug('Writing into xl-file(%s) based on template(%s)...',
                  output_file_name, template_file_name)
        writer = clone_excel(template_file_name, output_file_name)

    else:
        log.debug('Writing into xl-file(%s)...', output_file_name)
        writer = pd.ExcelWriter(output_file_name, engine='xlsxwriter')

    xlref, calculate_sheets, charts = [], sorted(writer.sheets), []
    for k, v in sorted(data.items(), key=_sort_sheets):
        if not k.startswith('graphs.'):
            down = True
            if k.endswith('pa'):
                kw = {'named_ranges': ('rows', ), 'index': True, 'k0': 1}
            elif k.endswith('ts'):
                kw = {'named_ranges': ('columns', ), 'index': False, 'k0': 1}
            elif k.endswith('proc_info'):
                down = False
                kw = {'named_ranges': ()}
            else:
                kw = {}

            xlref.extend(_write_sheets(writer, k, v, down=down, **kw))
        else:
            try:
                sheet = writer.book.add_worksheet(k)
            except AttributeError:
                sheet = writer.book.create_sheet(title=k)
            charts.append((sheet, v))

    for sheet, v in charts:
        _chart2excel(writer, sheet, v)

    if xlref:
        xlref = sorted(sh.combine_dicts(*[x[1] for x in xlref]).items())
        xlref = pd.DataFrame(xlref)
        xlref.set_index([0], inplace=True)
        _df2excel(writer, 'xlref', xlref, 0, (), index=True, header=False)

    if calculate_sheets:
        import formulas
        xl_model = formulas.ExcelModel()
        context = xl_model.add_book(
            writer.book, {'excel': osp.basename(output_file_name)})[1]
        xl_model.pushes(*calculate_sheets,
                        context=context).finish().calculate()
        xl_model.write(xl_model.books)

    writer.save()
    log.info('Written into xl-file(%s)...', output_file_name)
Ejemplo n.º 22
0
def _split(base, rng, intersect=None, format_range=range2parts):
    z = _have_intersect(base, rng)
    if not z:
        return rng,

    if intersect is not None:
        intersect.update(z)

    ranges = []
    rng = sh.selector(('excel', 'sheet', 'n1', 'n2', 'r1', 'r2'), rng)
    it = ('n1', 'n2', 1), ('n2', 'n1', -1), ('r1', 'r2', 1), ('r2', 'r1', -1)
    for i, j, n in it:
        if z[i] != rng[i]:
            if j[0] == 'r':
                r = sh.combine_dicts(rng, {j: str(int(z[i]) - n)})
            else:
                r = sh.combine_dicts(rng, {j: z[i] - n})
            r = dict(format_range(('name', 'n1', 'n2'), **r))
            ranges.append(r)
            rng[i] = z[i]

    return tuple(ranges)
Ejemplo n.º 23
0
def define_wltp_base_model(wltp_base_model):
    """
    Defines WLTP base model.

    :param wltp_base_model:
        WLTP base model params.
    :type wltp_base_model: dict

    :return:
        WLTP base model.
    :rtype: dict
    """
    import wltp.model as wltp_mdl
    # noinspection PyProtectedMember
    return sh.combine_dicts(wltp_mdl._get_model_base(), wltp_base_model)
Ejemplo n.º 24
0
 def _yield_refs(book, context=None):
     for n in book.defined_names.definedName:
         if n.value == '#REF!':
             continue
         ref, i = n.name.upper(), n.localSheetId
         rng = Ranges().push(n.value, context=context).ranges[0]['name']
         sheet_names = book.sheetnames
         if i is not None:
             sheet_names = sheet_names[i:i + 1]
         for sn in sheet_names:
             name = range2parts(
                 None, **sh.combine_dicts(context, {
                     'sheet': sn,
                     'ref': ref
                 }))
             yield name['name'], rng
Ejemplo n.º 25
0
def _parse_values(data, default=None, where=''):
    default = default or {}
    for k, v in data.items():
        match = _re_params_name.match(k) if k is not None else None
        if not match and default.get('scope') == 'meta':
            match = _re_params_name.match('.'.join(
                filter(bool, ('meta', default.get('meta'), k))))
        if not match:
            log.warning("Parameter '%s' %s cannot be parsed!", k, where)
            continue
        elif _isempty(v):
            continue
        match = {i: j.lower() for i, j in match.groupdict().items() if j}

        for key in _parse_key(**sh.combine_dicts(default, match)):
            yield key, v
Ejemplo n.º 26
0
def _convert_limits(lu, vsr, n_vsr, stop_velocity=dfl.values.stop_velocity):
    _r, _l, _u = np.array(
        sorted([vsr.get(k, 0)] + list(v) for k, v in lu.items())).T
    nk, nr = np.array(sorted(sh.combine_dicts(n_vsr, base={0: 0}).items())).T
    klu = sorted(
        zip(nk.astype(int), _interp(_r[2:], _l[2:], nr),
            _interp(_r[1:-1], _u[1:-1], nr)))
    res, n0, n1 = {}, min(k for k in n_vsr if k > 0), max(n_vsr)
    for k, l, u in klu:
        if k == 0:
            l, u = list(lu.get(k, [0, stop_velocity + 1]))
        else:
            if k == n0:
                l = lu.get(k, (stop_velocity, ))[0]
            if k == n1:
                u = lu.get(k, (None, dfl.INF))[1]
        res[k] = [l, u]
    return res
Ejemplo n.º 27
0
def get_summary(solutions):
    """
    Extract summary data from model solutions.

    :param solutions:
        All model solutions.
    :type solutions: list[schedula.Solution]

    :return:
        Summary data.
    :rtype: list
    """
    return [sh.combine_dicts(
        dict(sh.stack_nested_keys(sol.get('summary', {}), depth=4)), base={
            'id': sol['vehicle_name'],
            'base': sol['input_file_name']
        }
    ) for sol in solutions]
Ejemplo n.º 28
0
def _get_theoretical(profile):
    defaults = {
        'cycle_type': 'WLTP',
        'gear_box_type': 'manual',
        'wltp_class': 'class3b',
        'downscale_factor': 0
    }
    profile = {k: v for k, v in profile.items() if v}
    profile = sh.combine_dicts(defaults, profile)
    profile['cycle_type'] = profile['cycle_type'].upper()
    profile['wltp_class'] = profile['wltp_class'].lower()
    profile['gear_box_type'] = profile['gear_box_type'].lower()
    from co2mpas.model.physical.cycle import cycle
    res = cycle().dispatch(inputs=profile,
                           outputs=['times', 'velocities'],
                           shrink=True)
    data = sh.selector(['times', 'velocities'], res, output_type='list')
    return pd.DataFrame(data).T
Ejemplo n.º 29
0
def make_simulation_plan(plan, timestamp, variation, flag, model=None):
    model, summary = model or batch.vehicle_processing_model(), {}
    run_base = model.get_node('run_base')[0].dsp
    run_modes = tuple(
        run_base.get_sub_dsp_from_workflow(
            ('data', 'vehicle_name'), check_inputs=False,
            graph=run_base.dmap).data_nodes) + ('start_time', 'vehicle_name')

    var = json.dumps(variation, sort_keys=True)
    o_cache, o_folder = flag['overwrite_cache'], flag['output_folder']
    modelconf = flag.get('modelconf', None)
    kw, bases = sh.combine_dicts(flag, {'run_base': True}), set()
    for (i, base_fpath, run), p in tqdm.tqdm(plan, disable=False):
        try:
            base = get_results(model, o_cache, base_fpath, timestamp, run, var,
                               o_folder, modelconf)
        except KeyError:
            log.warning('Base model "%s" of variation "%s" cannot be parsed!',
                        base_fpath, i)
            continue

        name = base['vehicle_name']
        if 'summary' in base and name not in bases:
            batch._add2summary(summary, base['summary'])
            bases.add(name)

        name = '{}-{}'.format(name, i)

        new_base, o = define_new_inputs(p, base)
        inputs = batch.prepare_data(new_base, {}, base_fpath, o_cache,
                                    o_folder, timestamp, False, modelconf)[0]
        inputs.update(sh.selector(set(base).difference(run_modes), base))
        inputs['vehicle_name'] = name
        inputs.update(kw)
        res = run_base.dispatch(inputs)
        batch.notify_result_listener(plan_listener, {'solution': res})

        s = filter_summary(p, o, res.get('summary', {}))
        base_keys = {
            'vehicle_name': (base_fpath, name, run),
        }
        batch._add2summary(summary, s, base_keys)

    return summary
Ejemplo n.º 30
0
    def add_cell(self,
                 cell,
                 context,
                 references=None,
                 formula_references=None,
                 formula_ranges=None,
                 external_links=None):
        get_in = sh.get_nested_dicts
        if formula_references is None:
            formula_references = get_in(self.books, context['excel'], SHEETS,
                                        context['sheet'], 'formula_references')

        if formula_ranges is None:
            formula_ranges = get_in(self.books, context['excel'], SHEETS,
                                    context['sheet'], 'formula_ranges')

        if references is None:
            references = get_in(self.books, context['excel'], 'references')

        if external_links is None:
            external_links = get_in(self.books, context['excel'],
                                    'external_links')
        context = sh.combine_dicts(context,
                                   base={'external_links': external_links})
        crd = cell.coordinate
        crd = formula_references.get(crd, crd)
        #cell = Cell(crd, cell.value, context=context).compile()
        try:
            cell = Cell(crd, cell.value, context=context).compile()
        except FormulaError:
            # There was an error so set this value with the NA error using the excel function
            cell = Cell(crd, "=NA()", context=context).compile()
        if cell.output in self.cells:
            return
        if cell.value is not sh.EMPTY:
            if any(not (cell.range - rng).ranges for rng in formula_ranges):
                return
        cell.update_inputs(references=references)

        if cell.add(self.dsp, context=context):
            self.cells[cell.output] = cell
            return cell
Ejemplo n.º 31
0
 def test_combine_dicts(self):
     res = sh.combine_dicts({'a': 3, 'c': 3}, {'a': 1, 'b': 2})
     self.assertEqual(res, {'a': 1, 'b': 2, 'c': 3})