def compareZahlungen2020():
    sheetname = "Zahlungen_2020"
    path1 = "/home/martin/Projects/python/ImmoControlCenter/Ein_Aus_2020_local.ods"
    path2 = "/home/martin/Projects/python/ImmoControlCenter/Ein_Aus_2020_web.ods"
    df_loc = read_ods(path1, sheetname)
    df_web = read_ods(path2, sheetname)
    BETRAG: int = 3
    # row_web = df_web.iloc[0]
    # betrag = df_web.iloc[0, 3]
    # print( row_web )
    rweb: int = 0
    rloc: int = -1
    betrag_web: int = -999999
    for index, row in df_loc.iterrows():
        rloc += 1
        d1 = row.to_dict()
        betrag_loc = d1["Betrag"]

        while betrag_web < betrag_loc:
            betrag_web = df_web.iloc[rweb, BETRAG]
            if betrag_web < betrag_loc:
                print(
                    "Zeile fehlt in lokaler Liste: Zeile_web = %d, betrag_web = %.2f"
                    % (rweb + 2, betrag_web))
                rweb += 1

        if betrag_web == betrag_loc:
            rweb += 1
        else:  # betrag_web > betrag_loc
            print("Zeile fehlt in web-Liste: Zeile_loc = %d, Wert_loc = %.2f" %
                  (rloc + 2, betrag_loc))
    def test_wrong_id_type(self):

        path = rsc / header_file

        with pytest.raises(ValueError) as e_info:
            read_ods(path, 1.0)
            assert e_info.match("Sheet id has to be either `str` or `int`")
    def test_non_existent_sheet(self):

        path = rsc / header_file
        sheet_name = "No_Sheet"

        with pytest.raises(KeyError) as e_info:
            read_ods(path, sheet_name)
            assert e_info.match(f"There is no sheet named {sheet_name}")
Exemple #4
0
    def static_from_ods(self, url_or_path):
        from pandas_ods_reader import read_ods
        path = self.get_url_or_path(url_or_path)

        # nodes are in first sheet, edges next
        self.df_nodes = read_ods(path, 1).set_index('id')
        self.df_edges = read_ods(path, 2)

        self.df2nx()
Exemple #5
0
    def _readfile(self):
        ''' _readfile '''
        # load specific sheet, and remove first two rows
        sheet_name = 'By Class (Lap Time)'
        tmp = read_ods(self.fn, sheet_name, columns=["overall", "rank", "class", "vehicle"])
        self.df_lap = tmp.drop([0,1])

        # load specific sheet, and remove some columns, and rows
        sheet_name = 'Key Vehicle Info'
        tmp = read_ods(self.fn, sheet_name, columns=['classA','name','C','D',
            'Manufacturer','F','G','H','I','J','K','Retailer','Price',
            'N','O','P','Q','R','Date','Year','DLC'])
        tmp.drop(columns=['C','D','F','G','H','I','J','K','N','O','P','Q','R'],
                inplace=True)
        self.df_key = tmp.drop(index=[0, 1])
Exemple #6
0
def handler(q=False):
    if q is False:
        return False
    q = json.loads(q)
    filename = q['attachment']
    try:
        ods_array = np.frombuffer(binascii.a2b_base64(q['data']), np.uint8)
    except Exception as e:
        print(e)
        err = "Couldn't fetch attachment (JSON 'data' is empty). Are you using the 'Query enrichment' action?"
        misperrors['error'] = err
        print(err)
        return misperrors

    ods_content = ""
    ods_file = io.BytesIO(ods_array)
    doc = ezodf.opendoc(ods_file)
    num_sheets = len(doc.sheets)
    try:
        for i in range(0, num_sheets):
            ods = pandas_ods_reader.read_ods(ods_file, i, headers=False)
            ods_content = ods_content + "\n" + ods.to_string(max_rows=None)
        print(ods_content)
        return {'results': [{'types': ['freetext'], 'values': ods_content, 'comment': ".ods-to-text from file " + filename},
                            {'types': ['text'], 'values': ods_content, 'comment': ".ods-to-text from file " + filename}]}
    except Exception as e:
        print(e)
        err = "Couldn't analyze file as .ods. Error was: " + str(e)
        misperrors['error'] = err
        return misperrors
Exemple #7
0
def test2():
    path = "../testdata/Depotstand_20220330.ods"
    # load a sheet based on its index (1 based)
    sheet_idx = 1
    df = read_ods(path, sheet_idx)
    popotable = getDKBAnleihen(df)
    popotable.print()
 def load_data(self):
     '''
     Load your data by specifying the path
     '''
     sheet_idx = 1
     self.data_frame = read_ods('input.ods', sheet_idx)
     self.data = self.data_frame['LeçonsApprises']
     return self.data
Exemple #9
0
def contenuSheetSecondaire(path, branche, nomSheet):
    detailBranche = etree.SubElement(branche, "Details")

    df = read_ods(path, nomSheet)

    for j in range(len(df)): #Nombre de lignes de la feuille de calcul
        separationInterieurDesId= etree.SubElement(detailBranche, "Lignes")
        obtenirValeurCellule(path, nomSheet, separationInterieurDesId, j,0, 2)
    def test_no_header_file_with_cols(self):

        path = rsc / no_header_file
        columns = ["A", "B", "C", "D", "E"]
        df = read_ods(path, 1, headers=False, columns=columns)

        assert list(df.columns) == columns
        assert len(df) == 10
    def test_duplicated_column_names(self):

        path = rsc / duplicated_column_names_file
        df = read_ods(path, 1)

        assert isinstance(df, pd.DataFrame)
        assert len(df.columns) == 4
        assert "website.1" in df.columns
    def test_header_file_col_len(self):

        path = rsc / col_len_file
        df = read_ods(path, 1)

        assert isinstance(df, pd.DataFrame)
        assert len(df) == 10
        assert (len(df.columns) == 5)
    def test_header_file_with_str(self):

        path = rsc / header_file
        df = read_ods(path, "Sheet1")

        assert isinstance(df, pd.DataFrame)
        assert len(df) == 10
        assert (len(df.columns) == 5)
Exemple #14
0
def obtenirValeurCellule(path, sheet_name, branche, commencementNumeroLigne, commencementNumeroColonne,arret):
    df = read_ods(path, sheet_name)
    
    for i in range(len(df.columns)-arret): 
        nom=df.columns[i+commencementNumeroColonne].replace(" ", "") #Permet d'enlever les espaces dans le nom des colonnes
        if isinstance(df.iloc[commencementNumeroLigne,i+commencementNumeroColonne], float):
            etree.SubElement(branche, unidecode.unidecode(nom)).text = str(df.iloc[commencementNumeroLigne,i+commencementNumeroColonne]).encode('utf-8','ignore')
        else :
            etree.SubElement(branche, unidecode.unidecode(nom)).text = df.iloc[commencementNumeroLigne,i+commencementNumeroColonne]
    def test_header_file_with_cols(self):

        path = rsc / header_file
        columns = ["One", "Two", "Three", "Four", "Five"]
        df = read_ods(path, "Sheet1", columns=columns)

        assert list(df.columns) == columns
        assert len(df) == 10
        assert (len(df.columns) == 5)
    def test_no_header_file_no_cols(self):

        path = rsc / no_header_file
        df = read_ods(path, 1, headers=False)

        assert list(df.columns) == [
            f"column.{i}" for i in range(len(df.columns))]
        assert len(df) == 10
        assert (len(df.columns) == 5)
Exemple #17
0
def creationFils(path, branche):
    sheet_name = "Classes"
    df = read_ods(path, sheet_name)
    for j in range(len(df)-1):
        nom=string.capwords(df.iloc[1+j,1]).replace(",","").replace(" ", "").replace("'", "") #Permet de récupérer le nom des balises sans accents, ni espace, ni apostrophes
        nomIdentifiant=df.iloc[1+j,0] #Récupère l'id tel RC0000
        fils = etree.SubElement(branche, unidecode.unidecode(nom), ID=nomIdentifiant) #Création sous-branche de root
        obtenirValeurCellule(path, "Classes", fils, j+1, 1, 1)
        contenuSheetSecondaire(path,fils, nomIdentifiant)
    def test_missing_header(self):

        path = rsc / missing_header_file
        df = read_ods(path, 1)

        assert isinstance(df, pd.DataFrame)
        assert len(df) == 10
        assert (len(df.columns) == 5)

        assert df.columns[2] == "unnamed.1"
Exemple #19
0
def creationFils(path, branche):
    sheet_name = "Classes"
    df = read_ods(path, sheet_name)

    for j in range(5):
        nom=string.capwords(df.iloc[2+j,1]).replace(",","").replace(" ", "").replace("'", "") #Permet de récupérer le nom des balises sans accents, ni espace, ni apostrophes
        nomIdentifiant=df.iloc[2+j,0]
        fils = etree.SubElement(branche, unidecode.unidecode(nom), IDREF=nomIdentifiant)
        obtenirValeurCellule(path, "Classes", fils, j+2, 1, 1)
        contenuSheetSecondaire(path,fils, nomIdentifiant)
def provideVerwaltung():
    sheet_name = "Wohnung"
    df = read_ods(stammdatenpath, sheet_name)
    for index, row in df.iterrows():
        d = row.to_dict()
        d["von"] = "2019-01-01"
        d["bis"] = ""
        if not d["vw_id"]: continue
        db.insertVerwaltung(d, False)
    db.commit()
Exemple #21
0
    def data_frame(self):
        if self._data_frame is None:
            with NamedTemporaryFile(mode='wb', suffix=".ods") as f:
                req = requests.get(self._get_url())
                req.raise_for_status()
                f.write(req.content)
                f.flush()

                self._data_frame = read_ods(f.name, self._page)

        return self._data_frame
def provideSollmiete():
    sheetname = "Mietverhaeltnis"
    df = read_ods(stammdatenpath, sheetname)
    for index, row in df.iterrows():
        d = row.to_dict()
        d["mv_id"] = create_mv_id(d["name"], d["vorname"])
        d["bis"] = ""
        if math.isnan(d["netto"]): d["netto"] = 0.0
        if math.isnan(d["nkv"]): d["nkv"] = 0.0
        db.insertSollmiete(d, False)
    db.commit()
def provideMietobjekte():
    sheetname = "Wohnung"
    df = read_ods(stammdatenpath, sheetname)
    for index, row in df.iterrows():
        d = row.to_dict()
        d["master_id"] = 0
        d["aktiv"] = 1
        d["container_nr"] = ""
        if math.isnan(d["qm"]): d["qm"] = 0
        db.insertMietobjekt(d, False)
        print(d)
    db.commit()
def getDataFrame(database):
    if database == "default":
        dataframeSource = os.path.abspath(
            os.path.join(os.path.dirname(__file__), "BlindMemo.ods"))
    else:
        dataframeSource = os.path.isfile(database)
    df = read_ods(dataframeSource, 1)
    dfEdges = df[df['Type'] == 'Edge']
    dfCorners = df[df['Type'] == 'Corner']
    #print(dfEdges)
    #print(dfCorners)
    return dfCorners, dfEdges
def provideMiete2020():
    sheet_name = "Miete_2020"
    df = read_ods(einaus20path, sheet_name)
    for index, row in df.iterrows():
        d = row.to_dict()
        print("processing mietobjekt ", d["mietobjekt_id"])
        for m in range(1, 11):  #nur bis Oktober gefüllt
            monat = mon_dbnames[m - 1]
            value = d[monat.title()]
            value = 0 if math.isnan(value) else value
            ### obsolete db.updateMtlEinAus( d["mietobjekt_id"], "miete", 2020, m, value )
    db.commit()
Exemple #26
0
def creationRacine(path, nbSheet, sheetNames):
    singleSheetName = "Classes"
   
    root = etree.Element("root")

    df = read_ods(path, singleSheetName)

    #obtenirValeurCellule(path, singleSheetName, racine, 1, 1, 1)
    creationFils(path, root)
    #contenuSheetSecondaire(path, root, nbSheet, sheetNames)

    tree = etree.ElementTree(root)
    return(tree)
    def test_mixed_dtypes(self):

        path = rsc / mixed_dtypes_file
        df = read_ods(path, 1)

        assert isinstance(df, pd.DataFrame)
        assert len(df) == 10
        assert (len(df.columns) == 5)

        type_list = [float, object, float, float, object]
        assert df.dtypes.tolist() == type_list
        col_b_types = [type(v) for v in df.B.values]
        assert str in col_b_types and float in col_b_types
Exemple #28
0
def main():
    hp = Hyperparameters()

    for gender in ['females', 'males']:
        print(gender)
        data = np.load(hp.data_pp_dir + 'data_arrays_' + gender + '.npz')
        df = feather.read_dataframe(hp.data_pp_dir + 'df_index_person_' +
                                    gender + '.feather')
        df_geo = feather.read_dataframe(hp.data_dir +
                                        'Py_VARIANZ_2012_v3-1_GEO.feather')[[
                                            'VSIMPLE_INDEX_MASTER',
                                            'MB2020_code'
                                        ]]
        df_mb_sa2 = read_ods(hp.data_dir + 'MB_SA2.ods',
                             1).rename(columns={
                                 'MB2020_V1_': 'MB2020_code'
                             }).astype(int)
        df_geo = df_geo.merge(df_mb_sa2, how='left',
                              on='MB2020_code').drop(['MB2020_code'], axis=1)
        df = df.merge(df_geo, how='left', on='VSIMPLE_INDEX_MASTER')

        # load predicted risk
        df['RISK_PERC'] = feather.read_dataframe(hp.results_dir + 'df_cml_' +
                                                 gender +
                                                 '.feather')['RISK_PERC']

        # median risk
        print('Median risk: {:.3} IQR: [{:.3}, {:.3}]'.format(
            np.percentile(df['RISK_PERC'].values, 50),
            np.percentile(df['RISK_PERC'].values, 25),
            np.percentile(df['RISK_PERC'].values, 75)))

        # set SA2s with less than 5 people to NaN
        df.loc[df.groupby('SA22020_V1')['VSIMPLE_INDEX_MASTER'].
               transform('nunique') < 5, 'RISK_PERC'] = np.nan

        # get median risk by SA2
        df = df.groupby('SA22020_V1')['RISK_PERC'].median().reset_index()

        # save
        df.to_csv(hp.results_dir + 'df_sa2_' + gender + '.csv')
        if gender == 'females':
            df_females = df
        else:
            df_males = df

    df = df_females.merge(df_males, on='SA22020_V1', how='inner').dropna()
    corr_coeff, lcl, ucl = corr(df['RISK_PERC_x'].values,
                                df['RISK_PERC_y'].values)
    print('Pearsons correlation: {:.3} [{:.3}, {:.3}]'.format(
        corr_coeff, lcl, ucl))
def load_inventory():
    """method to open the inventory ods spreadsheet and create inventory
    objects from each row
    """
    path = "inventory.ods"

    # # load a sheet based on its index (1 based)
    # sheet_idx = 1
    # df = read_ods(path, sheet_idx)

    # # load a sheet based on its name
    # sheet_name = "Sheet1"
    # df = read_ods(path, sheet_name)

    # # load a file that does not contain a header row
    # # if no columns are provided, they will be numbered
    # df = read_ods(path, 1, headers=False)

    # load a file and provide custom column names
    # if headers is True (the default), the header row will be overwritten
    df = read_ods(path,
                  1,
                  columns=[
                      "user_id", "inv_id", "inv_name", "inv_type",
                      "description", "manufacturer", "price",
                      "count_per_package", "size", "picture_path", "keywords"
                  ])

    #print(len(df))
    i = 1
    while i < len(df):

        #pdb.set_trace()
        inventory_item = Inventory(
            inv_id=df.loc[i].inv_id,
            user_id=int(df.loc[i].user_id),
            inv_name=df.loc[i].inv_name,
            inv_type=df.loc[i].inv_type,
            description=df.loc[i].description,
            price=df.loc[i].price,
            count_per_package=df.loc[i].count_per_package,
            manufacturer=df.loc[i].manufacturer,
            size=df.loc[i].size,
            picture_path=df.loc[i].picture_path,
            keywords=df.loc[i].keywords)
        i += 1
        print(inventory_item)
        # Add the User object to the session so it will be stored.
        db.session.add(inventory_item)

    db.session.commit()
def main():
    df = read_ods(path, sheet_name)

    for i in range(lessons_number):
        col_name = "Ders{}".format(i)
        col = df[col_name]
        col_len = len(col)

        for j in range(col_len):
            if df[col_name][j] != None:
                sort_by_lessons(df[col_name][j], i, j, df)

    print(lessons)
    plot()