コード例 #1
0
ファイル: excel.py プロジェクト: ankostis/CO2MPAS-TA
def _parse_base_data(res,
                     match,
                     sheet,
                     sheet_name,
                     re_params_name=_re_params_name):
    r = {}
    defaults = {'usage': 'input', 'stage': 'calibration'}

    if 'type' not in match:
        match['type'] = 'pa' if 'cycle' not in match else 'ts'

    match = dsp_utl.combine_dicts(defaults, match)

    if match['type'] == 'pa':
        xl_ref = '#%s!B2:C_:["pipe", ["dict", "recurse"]]' % sheet_name
        data = lasso(xl_ref, sheet=sheet)
    else:
        # noinspection PyBroadException
        try:
            xl_ref = '#%s!A2(R):.3:RD:["df", {"header": 0}]' % sheet_name
            data = lasso(xl_ref, sheet=sheet)
        except:
            return {}
        data.dropna(how='all', inplace=True)
        data.dropna(axis=1, how='all', inplace=True)
        mask = data.count(0) == len(data._get_axis(0))
        # noinspection PyUnresolvedReferences
        drop = [k for k, v in mask.items() if not v]
        if drop:
            msg = 'Columns {} in {} sheet contains nan.\n ' \
                  'Please correct the inputs!'
            raise ValueError(msg.format(drop, sheet_name))

    for k, v in parse_values(data, match, re_params_name):
        co2_utl.get_nested_dicts(r, *k[:-1])[k[-1]] = v

    n = (match['scope'], 'target')
    if match['type'] == 'ts' and co2_utl.are_in_nested_dicts(r, *n):
        t = co2_utl.get_nested_dicts(r, *n)
        for k, v in co2_utl.stack_nested_keys(t, key=n, depth=2):
            if 'times' not in v:
                n = list(k + ('times', ))
                n[1] = match['usage']
                if co2_utl.are_in_nested_dicts(r, *n):
                    v['times'] = co2_utl.get_nested_dicts(r, *n)
                else:
                    for i, j in co2_utl.stack_nested_keys(r, depth=4):
                        if 'times' in j:
                            v['times'] = j['times']
                            break

    co2_utl.combine_nested_dicts(r, depth=5, base=res)
コード例 #2
0
ファイル: excel.py プロジェクト: ankostis/CO2MPAS-TA
def _parse_plan_data(plans,
                     match,
                     sheet,
                     sheet_name,
                     re_params_name=_re_params_name):
    # noinspection PyBroadException
    xl_ref = '#%s!A1(R):._:R:"recurse"'
    data = lasso(xl_ref % sheet_name, sheet=sheet)
    try:
        data = pd.DataFrame(data[1:], columns=data[0])
    except IndexError:
        return None
    if 'id' not in data:
        data['id'] = data.index + 1

    data.set_index(['id'], inplace=True)
    data.dropna(how='all', inplace=True)
    data.dropna(axis=1, how='all', inplace=True)

    plan = pd.DataFrame()
    defaults = {'usage': 'input', 'stage': 'calibration'}
    match = dsp_utl.combine_dicts(defaults, match)
    for k, v in parse_values(data, match, re_params_name):
        k = k[-1] if k[-1] in ('base', 'defaults') else '.'.join(k[1:])
        plan[k] = v

    plans.append(plan)
コード例 #3
0
ファイル: krig_.py プロジェクト: stefanosts/reporting
def main():

    f = "DT_MT_par.xlsx"
    fin = "test_in.csv" #"DT_MT_in.csv"
    fout = "DT_MT_out_Python.csv"

    dfins = pd.read_csv(fin)
    dfins = dfins.ix[:1000, :]
    dfsplit = np.array_split(dfins, 1)#100)

    sh = shts[0]
    dFL = pd.DataFrame(columns = shts)
    startTime = datetime.now()
    for sh in shts:
        ysc = pd.DataFrame(xleash.lasso("%s#%s!%s"%(f, sh, refs['Ysc'])))
        ssc = pd.DataFrame(xleash.lasso("%s#%s!%s"%(f, sh, refs['SSc'])))
        theta = pd.DataFrame(xleash.lasso("%s#%s!%s"%(f, sh, refs['theta'])))
        beta = pd.DataFrame(xleash.lasso("%s#%s!%s"%(f, sh, refs['beta'])))
        gamma = pd.DataFrame(xleash.lasso("%s#%s!%s"%(f, sh, refs['gamma'])))
        s = pd.DataFrame(xleash.lasso("%s#%s!%s"%(f, sh, refs['S'])))

        dS = pd.Series()
        for df in dfsplit:
            r = KrigingCo2mpas(df, ysc, ssc, theta, beta, gamma, s)
            dS = dS.append(r)
        
        dFL[sh] = dS
        print(datetime.now() - startTime)

    dFL.to_csv(fout, index=False)
コード例 #4
0
 def test_collect_tables(self, case):
     exp_ref_path, exp_ref_sheet = 'datasync.xlsx', 'Sheet1'
     (ref_xlref, *sync_xlrefs), exp_xlrefs = case
     required_labels = ('x', 'y1')
     tables = datasync.Tables(required_labels, _shfact)
     tables.collect_tables(ref_xlref, *sync_xlrefs)
     self.assertEqual(tables.ref_fpath, exp_ref_path)
     self.assertEqual(tables.ref_sh_name, exp_ref_sheet)
     self.assertEquals(len(tables.tables), len(exp_xlrefs))
     self.assertEquals(len(tables.headers), len(exp_xlrefs))
     for d, xlref, in zip(tables.tables, exp_xlrefs):
         df = xleash.lasso('%s:"df"' % xlref, sheets_factory=_shfact)
         npt.assert_array_almost_equal(d, df.values, err_msg=xlref)
コード例 #5
0
ファイル: datasync.py プロジェクト: Loic-MJ/CO2MPAS-TA
    def _consume_next_xlref(self, xlref, lasso):
        """
        :param str xlref:
                an xlref that may not contain hash(`#`); in that case,
                it is taken as *file-part* or as *fragment-part* depending
                on the existence of prev lasso's `url_file`.
        :param Lasso lasso:
                reuses `url_file` & `sheet` if missing from xlref
        """

        xlref = _guess_xlref_without_hash(xlref,
                                          bias_on_fragment=bool(
                                              lasso.url_file))
        lasso = xleash.lasso(xlref,
                             sheets_factory=self._sheets_factory,
                             url_file=lasso.url_file,
                             sheet=lasso.sheet,
                             return_lasso=True)
        values = lasso.values
        if values:  # Skip blank sheets.
            # TODO: Convert column monkeybiz into pure-pandas using xleash.
            str_row_indices = [
                i for i, r in enumerate(values) if any(
                    isinstance(v, str) for v in r)
            ]

            req_labels = IndexedSet(self.required_labels)
            for k in str_row_indices:
                if set(values[k]) >= req_labels:
                    break
            else:
                raise CmdException(
                    "Columns %r not found in table of sheet(%r) in book(%r)!" %
                    (self.required_labels, lasso.sheet._sheet.name,
                     lasso.sheet.book_fname))
            ix = values[k]
            i = max(str_row_indices, default=0) + 1

            h = pd.DataFrame(values[:i], columns=ix)
            self.headers.append((sheet_name(lasso), k, h))

            values = pd.DataFrame(values[i:], columns=ix)
            values.dropna(how='all', inplace=True)
            values.dropna(axis=1, how='any', inplace=True)
            if values.empty:
                log.warning("Empty table of sheet(%r) in book (%r)!" %
                            (lasso.sheet._sheet.name, lasso.sheet.book_fname))
            else:
                self.tables.append(values)

        return lasso
コード例 #6
0
ファイル: report.py プロジェクト: Loic-MJ/CO2MPAS-TA
    def _extract_dice_report_from_output(self, fpath):
        import pandas as pd
        from pandalone import xleash

        df = xleash.lasso(self.dice_report_xlref, url_file=fpath)
        if not isinstance(df, pd.DataFrame):
            raise ValueError(
                "The param '%s.%s' must resolve to a DataFrame, not type(%r): %s"
                % (type(self).__name__, self.dice_report_xlref, type(df), df))

        df = df.where((pd.notnull(df)), None)
        vfid = df.at[self.output_vfid_coords]

        return vfid, df
コード例 #7
0
    def extract_output_tables(self, fpath):
        from pandalone import xleash

        #fpath = pndlu.convpath(fpath)
        master_xlrefs = xleash.lasso(self.output_master_xlref, url_file=fpath)
        assert isinstance(master_xlrefs, Mapping), (
            "The `output_master_xlref(%s) must resolve to a dictionary, not type(%r): %s"
            % (self.output_master_xlref, type(master_xlrefs), master_xlrefs))

        tables = []
        for tab_key in self.output_table_keys:
            assert tab_key in master_xlrefs, (
                "The `output_table_key` %r were not found in *master-xlref* dictionary: %s"
                % (tab_key, master_xlrefs))

            tab_xlref = master_xlrefs[tab_key]
            tab = xleash.lasso(tab_xlref, url_file=fpath)
            assert isinstance(tab, pd.DataFrame), (
                "The `output_master_xlref` key (%r --> %r) must resolve to a DataFrame, not type(%r): %s"
                % (tab_key, tab_xlref, type(tab), tab))
            tables.append(tab)

        self.output_tables = tables
        return self.output_tables
コード例 #8
0
def _parse_sheet(match, sheet, sheet_name, res=None):
    if res is None:
        res = {}

    sh_type = _get_sheet_type(**match)

    # noinspection PyBroadException
    try:
        import pandalone.xleash as xleash
        data = xleash.lasso(_xl_ref[sh_type] % sheet_name, sheet=sheet)
    except Exception:
        return res

    if sh_type == 'pl':
        try:
            import pandas as pd
            data = pd.DataFrame(data[1:], columns=data[0])
        except IndexError:
            return None
        if 'id' not in data:
            data['id'] = data.index + 1

        data.set_index(['id'], inplace=True)
        data.dropna(how='all', inplace=True)
        data.dropna(axis=1, how='all', inplace=True)
    elif sh_type == 'ts':
        data.dropna(how='all', inplace=True)
        data.dropna(axis=1, how='all', inplace=True)
        mask = data.count(0) == len(data._get_axis(0))
        # noinspection PyUnresolvedReferences
        drop = [k for k, v in mask.items() if not v]
        if drop:
            msg = 'Columns {} in {} sheet contains nan.\n ' \
                  'Please correct the inputs!'
            raise ValueError(msg.format(drop, sheet_name))
        data = data.to_dict('list')
    else:
        data = {k: v for k, v in data.items() if k}

    for k, v in _parse_values(data, match, "in sheet '%s'" % sheet_name):
        sh.get_nested_dicts(res, *k[:-1])[k[-1]] = v
    return res
コード例 #9
0
ファイル: CarsDB-accdb.py プロジェクト: ankostis/wltp
# %%
vehdb.print_nodes(h5fname)

# %%
# a=xleash.lasso('VehData/calculation_parameter_all.15092019_prog_code_dev.xlsx#calculation_parameter_all!::["df"]')
# b=xleash.lasso('VehData/calculation_parameter_all.20092019.xlsx#calculation_parameter_all!::["df"]')
# bad_cols = (a == b).all()
# bad_cols[~bad_cols]

# %%
veh_inputs_excel = (
    Path("VehData/calculation_parameter_all.20092019.xlsx"),
    "calculation_parameter_all",
)
specs = xleash.lasso('%s#%s!::["df"]' % veh_inputs_excel)

wots_excel = (Path("VehData/TB_Pwot.20092019.xlsx"), "TB_Pwot")
pwots = xleash.lasso('%s#%s!::["df"]' % wots_excel)

# %% [markdown]
# ## PRE 20190728 COLUMNS
# ```
# vehicle_no  rated_power     v_max  ndv_6               no_of_gears  v_max_4  v_max_10  n_min_drive_set  n95_high   at_s
# comments    kerb_mass       ndv_1  ndv_7               ng_vmax      v_max_5  v_s_max   n_min_wot        n_max1     above_s
# pmr_km      test_mass       ndv_2  ndv_8               v_max_ext    v_max_6  n_vmax    f_dsc_req        n_max2     vmax_determined_by_n_lim
# pmr_tm      rated_speed     ndv_3  ndv_9               v_max_1      v_max_7  f0        Pres_130         n_max3
# IDclass     idling_speed    ndv_4  ndv_10              v_max_2      v_max_8  f1        Pres_130_Prated  n_max_wot
# class       v_max_declared  ndv_5  v_max_transmission  v_max_3      v_max_9  f2        n95_low          below_s
#
# Index(['no_engine', 'n', 'Pwot', 'Twot', 'Pwot_norm', 'Twot_norm', 'SM', 'ASM',
コード例 #10
0
    def extract_input_params(self, fpath):
        from pandalone import xleash

        #fpath = pndlu.convpath(fpath)
        self.input_params = xleash.lasso(self.input_xlref, url_file=fpath)
        return self.input_params
コード例 #11
0
ファイル: batch.py プロジェクト: ncolloel/CO2MPAS-TA
def prepare_data(raw_data, variation, input_file_name, overwrite_cache,
                 output_folder, timestamp, type_approval_mode, modelconf):
    """
    Prepare the data to be processed.

    :param raw_data:
        Raw data from the input file.
    :type raw_data: dict

    :param variation:
        Variations to be applied.
    :type variation: dict

    :param input_file_name:
        Input file name.
    :type input_file_name: str

    :param overwrite_cache:
        Overwrite saved cache?
    :type overwrite_cache: bool

    :param output_folder:
        Output folder.
    :type output_folder: str

    :param timestamp:
        Run timestamp.
    :type timestamp: str

    :param type_approval_mode:
        Is launched for TA?
    :type type_approval_mode: bool

    :param modelconf:
        Path of modelconf that has modified the defaults.
    :type modelconf: str

    :return:
        Prepared data.
    :rtype: dict
    """
    has_plan = 'plan' in raw_data and (not raw_data['plan'].empty)
    match = {
        'scope': 'plan' if has_plan else 'base',
    }
    r = {}
    sheets_factory = xleash.SheetsFactory()
    from co2mpas.io import check_xlasso
    for k, v in excel._parse_values(variation, match, "in variations"):
        if isinstance(v, str) and check_xlasso(v):
            v = xleash.lasso(v, sheets_factory, url_file=input_file_name)
        dsp_utl.get_nested_dicts(r, *k[:-1])[k[-1]] = v

    if 'plan' in r:
        if has_plan:
            plan = raw_data['plan'].copy()
            for k, v in dsp_utl.stack_nested_keys(r['plan'], 4):
                plan['.'.join(k)] = v
        else:
            gen = dsp_utl.stack_nested_keys(r['plan'], 4)
            plan = pd.DataFrame([{'.'.join(k): v for k, v in gen}])
            excel._add_index_plan(plan, input_file_name)

        r['plan'] = plan
        has_plan = True

    if 'base' in r:
        r['base'] = dsp_utl.combine_nested_dicts(raw_data.get('base', {}),
                                                 r['base'],
                                                 depth=4)

    if 'flag' in r:
        r['flag'] = dsp_utl.combine_nested_dicts(raw_data.get('flag', {}),
                                                 r['flag'],
                                                 depth=1)

    data = dsp_utl.combine_dicts(raw_data, r)

    if type_approval_mode:
        variation, has_plan = {}, False
        if not schema._ta_mode(data):
            return {}, pd.DataFrame([])

    flag = data.get('flag', {}).copy()

    if 'run_base' not in flag:
        flag['run_base'] = not has_plan

    if 'run_plan' not in flag:
        flag['run_plan'] = has_plan

    flag['type_approval_mode'] = type_approval_mode
    flag['output_folder'] = output_folder
    flag['overwrite_cache'] = overwrite_cache
    if modelconf:
        flag['modelconf'] = modelconf

    if timestamp is not None:
        flag['timestamp'] = timestamp

    flag = schema.validate_flags(flag)

    if flag is dsp_utl.NONE:
        return {}, pd.DataFrame([])

    schema.check_data_version(flag)

    res = {
        'flag': flag,
        'variation': variation,
        'input_file_name': input_file_name,
    }
    res = dsp_utl.combine_dicts(flag, res)
    base = dsp_utl.combine_dicts(res, {'data': data.get('base', {})})
    plan = dsp_utl.combine_dicts(res,
                                 {'data': data.get('plan', pd.DataFrame([]))})

    return base, plan