# import libraries
import pandas as pd
import os
import numpy as np
from dbfread import DBF
from pandas import DataFrame
# get a list of tables to iterate through
path = '//166.2.126.25/teui1/4_Derek/Salmon_Challis_LTA_Mapping/Zonal_Stats'
out_path = '//166.2.126.25/teui1/4_Derek/Salmon_Challis_LTA_Mapping/Zonal_Stats/'
for root, dirs, files in os.walk(path):
    for file in files:
        if file.endswith(".dbf"):
            print(file)
            # convert the dbf to pandas dataframe
            tempTable = DBF(os.path.join(root, file))
            tempFrame = DataFrame(iter(tempTable))
            file_name = os.path.join(root, file)
            data = tempFrame  #pd.read_csv(file_name)
            df = data.iloc[:, 1:]
            for column in df.columns[0:]:
                #print(tempFrame[column])
                df.loc[:, column] /= 4046.86
                #print(tempFrame[column])
                out_name = file.split('.')[0]
                df.to_csv(path_or_buf=out_path + out_name + '_acres.csv',
                          sep=',')

path = '//166.2.126.25/teui1/4_Derek/Salmon_Challis_LTA_Mapping/Zonal_Stats'
out_path = '//166.2.126.25/teui1/4_Derek/Salmon_Challis_LTA_Mapping/Zonal_Stats/'
for root, dirs, files in os.walk(path):
Пример #2
0
    def import_account(self, file_dir, files, journal_data):
        """Import accounts from *_acf*.dbf files.
        The data in those files are the type, name, code and currency of the
        account as well as wether it is used as a default central account for
        partners or taxes.
        :return: (account_data, account_central, account_deprecated_ids, account_tax)
            account_data is a dictionary whose keys are the Winbooks account
                references and the values are the account ids in Odoo.
            account_central is a dictionary whose keys are the Winbooks central
                account references and the values are the account ids in Odoo.
            account_deprecated_ids is a recordset of account that need to be
                deprecated after the import.
            account_tax is a dictionary whose keys are the Winbooks account
                references and the values are the Winbooks tax references.
        """
        def manage_centralid(account, centralid):
            "Set account to being a central account"
            property_name = None
            account_central[centralid] = account.id
            if centralid == 'S1':
                property_name = 'property_account_payable_id'
                model_name = 'res.partner'
            if centralid == 'C1':
                property_name = 'property_account_receivable_id'
                model_name = 'res.partner'
            if centralid == 'V01':
                property_name = 'property_tax_receivable_account_id'
                model_name = 'account.tax.group'
            if centralid == 'V03':
                property_name = 'property_tax_payable_account_id'
                model_name = 'account.tax.group'
            if property_name:
                self.env['ir.property']._set_default(property_name, model_name, account, self.env.company)

        _logger.info("Import Accounts")
        account_data = {}
        account_central = {}
        account_tax = {}
        recs = []
        grouped = collections.defaultdict(list)
        AccountAccount = self.env['account.account']
        ResCurrency = self.env['res.currency']
        AccountGroup = self.env['account.group']
        account_types = [
            {'min': 100, 'max': 160, 'id': 'account.data_account_type_equity'},
            {'min': 160, 'max': 200, 'id': 'account.data_account_type_non_current_liabilities'},
            {'min': 200, 'max': 280, 'id': 'account.data_account_type_non_current_assets'},
            {'min': 280, 'max': 290, 'id': 'account.data_account_type_fixed_assets'},
            {'min': 290, 'max': 400, 'id': 'account.data_account_type_current_assets'},
            {'min': 400, 'max': 401, 'id': 'account.data_account_type_receivable', 'reconcile': True},
            {'min': 401, 'max': 420, 'id': 'account.data_account_type_current_assets'},
            {'min': 420, 'max': 440, 'id': 'account.data_account_type_current_liabilities'},
            {'min': 440, 'max': 441, 'id': 'account.data_account_type_payable', 'reconcile': True},
            {'min': 441, 'max': 490, 'id': 'account.data_account_type_current_liabilities'},
            {'min': 490, 'max': 492, 'id': 'account.data_account_type_current_assets'},
            {'min': 492, 'max': 500, 'id': 'account.data_account_type_current_liabilities'},
            {'min': 500, 'max': 600, 'id': 'account.data_account_type_liquidity'},
            {'min': 600, 'max': 700, 'id': 'account.data_account_type_expenses'},
            {'min': 700, 'max': 822, 'id': 'account.data_account_type_revenue'},
            {'min': 822, 'max': 860, 'id': 'account.data_account_type_expenses'},
        ]
        for file_name in files:
            for rec in DBF(join(file_dir, file_name), encoding='latin').records:
                recs.append(rec)
        for item in recs:
            grouped[item.get('TYPE')].append(item)
        rec_number_list = []
        account_data_list = []
        journal_centered_list = []
        is_deprecated_list = []
        account_deprecated_ids = self.env['account.account']
        for key, val in grouped.items():
            if key == '3':  # 3=general account, 9=title account
                for rec in val:
                    account = AccountAccount.search(
                        [('code', '=', rec.get('NUMBER')), ('company_id', '=', self.env.company.id)], limit=1)
                    if account:
                        account_data[rec.get('NUMBER')] = account.id
                        rec['CENTRALID'] and manage_centralid(account, rec['CENTRALID'])
                    if not account and rec.get('NUMBER') not in rec_number_list:
                        data = {
                            'code': rec.get('NUMBER'),
                            'name': rec.get('NAME11'),
                            'group_id': AccountGroup.search([('code_prefix_start', '=', rec.get('CATEGORY'))], limit=1).id,
                            'currency_id': ResCurrency.search([('name', '=', rec.get('CURRENCY'))], limit=1).id
                        }
                        if rec.get('VATCODE'):
                            account_tax[rec.get('NUMBER')] = rec.get('VATCODE')
                        try:
                            account_code = int(rec.get('NUMBER')[:3])
                        except Exception:
                            _logger.warning(_('%s is not a valid account number for %s.') % (rec.get('NUMBER'), rec.get('NAME11')))
                            account_code = 300  # set Current Asset by default for deprecated accounts
                        for account_type in account_types:
                            if account_code in range(account_type['min'], account_type['max']):
                                data.update({
                                    'user_type_id': self.env.ref(account_type['id']).id,
                                    'reconcile': account_type.get('reconcile', False)
                                })
                                break
                        # fallback for accounts not in range(100000,860000)
                        if not data.get('user_type_id'):
                            data['user_type_id'] = self.env.ref('account.data_account_type_other_income').id
                        account_data_list.append(data)
                        rec_number_list.append(rec.get('NUMBER'))
                        journal_centered_list.append(rec.get('CENTRALID'))
                        is_deprecated_list.append(rec.get('ISLOCKED'))

                        if len(account_data_list) % 100 == 0:
                            _logger.info("Advancement: {}".format(len(account_data_list)))
        account_ids = AccountAccount.create(account_data_list)
        for account, rec_number, journal_centred, is_deprecated in zip(account_ids, rec_number_list, journal_centered_list, is_deprecated_list):
            account_data[rec_number] = account.id
            # create the ir.property if this is marked as a default account for something
            journal_centred and manage_centralid(account, journal_centred)
            # we can't deprecate the account now as we still need to add lines with this account
            # keep the list in memory so that we can deprecate later
            if is_deprecated:
                account_deprecated_ids += account
        return account_data, account_central, account_deprecated_ids, account_tax
Пример #3
0
    def import_vat(self, file_dir, files, account_central):
        """Import the taxes from *codevat.dbf files.
        The data in thos files are the amount, type, including, account and tags
        of the taxes.
        :return: a dictionary whose keys are the Winbooks taxes references and
        the values are the taxes ids in Odoo.
        """
        _logger.info("Import VAT")
        vatcode_data = {}
        treelib = {}
        AccountTax = self.env['account.tax']
        tags_cache = {}

        def get_tags(string):
            "Split the tags, create if it doesn't exist and return m2m command for creation"
            tag_ids = self.env['account.account.tag']
            if not string:
                return tag_ids
            indexes = [i for i, x in enumerate(string) if x in ('+', '-')] + [len(string)]
            for i in range(len(indexes) - 1):
                tag_name = string[indexes[i]: indexes[i + 1]]
                tag_id = tags_cache.get(tag_name, False)
                if not tag_id:
                    tag_id = self.env['account.account.tag'].search([('name', '=', tag_name), ('applicability', '=', 'taxes')])
                    tags_cache[tag_name] = tag_id
                if not tag_id:
                    tag_id = self.env['account.account.tag'].create({'name': tag_name, 'applicability': 'taxes', 'country_id': self.env.company.country_id.id})
                tag_ids += tag_id
            return [(4, id, 0) for id in tag_ids.ids]

        data_list = []
        code_list = []
        for file_name in files:
            for rec in DBF(join(file_dir, file_name), encoding='latin').records:
                treelib[rec.get('TREELEVEL')] = rec.get('TREELIB1')
                if not rec.get('USRCODE1'):
                    continue
                tax_name = " ".join([treelib[x] for x in [rec.get('TREELEVEL')[:i] for i in range(2, len(rec.get('TREELEVEL')) + 1, 2)]])
                tax = AccountTax.search([('company_id', '=', self.env.company.id), ('name', '=', tax_name),
                                         ('type_tax_use', '=', 'sale' if rec.get('CODE')[0] == '2' else 'purchase')], limit=1)
                if tax.amount != rec.get('RATE') if rec.get('TAXFORM') else 0.0:
                    tax.amount = rec.get('RATE') if rec.get('TAXFORM') else 0.0
                if tax:
                    vatcode_data[rec.get('CODE')] = tax.id
                else:
                    data = {
                        'amount_type': 'percent',
                        'name': tax_name,
                        'company_id': self.env.company.id,
                        'amount': rec.get('RATE') if rec.get('TAXFORM') else 0.0,
                        'type_tax_use': 'sale' if rec.get('CODE')[0] == '2' else 'purchase',
                        'price_include': False if rec.get('TAXFORM') or rec.get('BASFORM') == 'BAL' else True,
                        'refund_repartition_line_ids': [
                            (0, 0, {'repartition_type': 'base', 'factor_percent': 100.0, 'tag_ids': get_tags(rec.get('BASE_CN')), 'company_id': self.env.company.id}),
                            (0, 0, {'repartition_type': 'tax', 'factor_percent': 100.0, 'tag_ids': get_tags(rec.get('TAX_CN')), 'company_id': self.env.company.id, 'account_id': account_central.get(rec.get('ACCCN1'), False)}),
                        ],
                        'invoice_repartition_line_ids': [
                            (0, 0, {'repartition_type': 'base', 'factor_percent': 100.0, 'tag_ids': get_tags(rec.get('BASE_INV')), 'company_id': self.env.company.id}),
                            (0, 0, {'repartition_type': 'tax', 'factor_percent': 100.0, 'tag_ids': get_tags(rec.get('TAX_INV')), 'company_id': self.env.company.id, 'account_id': account_central.get(rec.get('ACCINV1'), False)}),
                        ],
                    }
                    if rec.get('ACCCN2'):
                        data['refund_repartition_line_ids'] += [(0, 0, {'repartition_type': 'tax', 'factor_percent': -100.0, 'tag_ids': [], 'company_id': self.env.company.id, 'account_id': account_central.get(rec.get('ACCCN2'), False)})]
                    if rec.get('ACCINV2'):
                        data['invoice_repartition_line_ids'] += [(0, 0, {'repartition_type': 'tax', 'factor_percent': -100.0, 'tag_ids': [], 'company_id': self.env.company.id, 'account_id': account_central.get(rec.get('ACCINV2'), False)})]
                    data_list.append(data)
                    code_list.append(rec.get('CODE'))

                    if len(data_list) % 100 == 0:
                        _logger.info("Advancement: {}".format(len(data_list)))
            tax_ids = AccountTax.create(data_list)
            for tax_id, code in zip(tax_ids, code_list):
                vatcode_data[code] = tax_id.id
        return vatcode_data
Пример #4
0
num dicionário
"""
#início


class MyFieldParser(FieldParser):
    def parse(self, field, data):
        try:
            return FieldParser.parse(self, field, data)
        except ValueError:
            return InvalidValue(data)


for f1 in files:
    print(f1)
    dbf = DBF(f1, parserclass=MyFieldParser)
    for i, record in enumerate(dbf):
        for name, value in record.items():
            if isinstance(value, InvalidValue):
                print('records[{}][{!r}] == {!r}'.format(i, name, value))

    frame_global = DataFrame(iter(dbf))
    di = {}

    try:
        for i in range(np.shape(frame_global['FLOW_M'].tolist())[0]):
            if i % 10000 == 0:
                print(i)
            di[frame_global['DATA'][i],
               frame_global['HORA_I'][i]] = frame_global['FLOW_M'][i]
def isNum(x):
    try:
        float(x)
        return True
    except ValueError:
        return False


#Kv should be in rpms/V
#current should be in amps
#resistance should be in ohms
#weight should be in ounces

connection = sql.connect("components.db")
motoCalcFile = DBF("Motors/MOTOR8.DBF")
cursor = connection.cursor()
cursor.execute("drop table Motors")  # The database is refreshed every time

cursor.execute("""CREATE TABLE Motors (id INTEGER PRIMARY KEY, 
                                      name VARCHAR(40), 
                                      kv FLOAT, 
                                      gear_ratio FLOAT default 1.0, 
                                      resistance FLOAT, 
                                      no_load_current FLOAT default 0.0,
                                      weight FLOAT default -1.0);""")

print("Reading MotoCalc Database")
for record in motoCalcFile:

    print(record)
Пример #6
0
        for j in os.listdir(n_path): 
            if "dbf" in j:dbf_path.append(str(n_path + "/" +j))


# We will next read through dbf files and record everything in python lists. As shown below, this data includes 22 columns. 

# In[2]:

# open shapefiles and save the data into python lists
from dbfread import DBF
import pandas as pd
records = []
for n, i in enumerate(dbf_path): 
        idr = i.split("/")[-1][:-4]
        if not "pt" in idr:continue
        for record in DBF(i):
            record.update({"id":idr})
            records.append(record)
columns = []            
for n,r in enumerate(records):
    if n==1: 
        for j in r: columns.append(j)
print columns


# In[3]:

# convert the lists to Pandas DataFrame. We have 22 columns and 128930 rows
recs = []
for n,r in enumerate(records): 
        lst = []
Пример #7
0
def make_maps(force_download=False):

    CID_URL = 'http://www.datasus.gov.br/cid10/V2008/downloads/CID10CSV.zip'
    local_zipfile = download_zip(CID_URL, force_download)
    with ZipFile(local_zipfile) as zfile:
        l = zfile.namelist()

        with zfile.open('CID-10-SUBCATEGORIAS.CSV') as cid_file:
            cid_map = pd.read_csv(cid_file,
                                  sep=';',
                                  encoding='iso-8859-1',
                                  usecols=['SUBCAT', 'DESCRICAO'],
                                  index_col='SUBCAT',
                                  squeeze=True)
            cid_map = (cid_map.index + ': ' + cid_map).str.replace(', ', ' - ')

    TAB_SIH_URL = 'ftp://ftp.datasus.gov.br/dissemin/publicos/SIHSUS/200801_/Auxiliar/TAB_SIH.zip'
    local_zipfile = download_zip(TAB_SIH_URL, force_download)
    with ZipFile(local_zipfile) as zfile:
        l = zfile.namelist()

        # br_municip.cnv
        with zfile.open('br_municip.cnv') as file:
            mun_map = pd.read_table(file,
                                    sep=b'\s{2,}',
                                    engine='python',
                                    encoding='iso-8859-1',
                                    skiprows=1,
                                    header=None,
                                    skipfooter=3).applymap(func_decode)
            mun_map[1] = mun_map[1].str.split(' ', 1).str.get(1)
            mun_map = pd.Series(mun_map[1].tolist(), index=mun_map[2].tolist())
            for i, mun in mun_map[mun_map.str.startswith('Município')].items():
                for i2 in i.split(','):
                    mun_map[str(i2).zfill(6)] = mun
                mun_map.drop(i, inplace=True)
            mun_map = mun_map.to_dict()
            for i in ['520000', '529999']:
                mun_map[i] = 'Município ignorado - GO'

            list_brasilia = []
            for i in range(530000, 530009 + 1):
                list_brasilia.append(str(i))
            for i in range(530011, 539999 + 1):
                list_brasilia.append(str(i))
            list_brasilia.append('530010')

            for i in list_brasilia:
                mun_map[i] = 'Brasília'

            list_ign = []
            for i in range(1, 9999):
                list_ign.append(str(i).zfill(6))

            list_ign = list_ign + ['000000', '999999']
            for i in list_ign:
                mun_map[i] = 'Ignorado ou exterior'

        # CARATEND.CNV
        with zfile.open('CARATEND.CNV') as file:
            caratend_map = pd.read_table(file,
                                         sep=b'\s{2,}',
                                         engine='python',
                                         encoding='iso-8859-1',
                                         skiprows=1,
                                         header=None).applymap(func_decode)
            caratend_map = {
                '01': 'Eletivo',
                '02': 'Urgência',
                '03': 'Acidente no local trabalho ou a serv da empresa',
                '04': 'Acidente no trajeto para o trabalho',
                '05': 'Outros tipo de acidente de trânsito',
                '06': 'Out tp lesões e envenen por agent quím físicos'
            }
            #fiz manualmente porque não funcionou o encoding e o decode

        # COMPLEX2.CNV
        with zfile.open('COMPLEX2.CNV') as file:
            comp_map = pd.read_table(file,
                                     sep=b'\s{2,}',
                                     engine='python',
                                     encoding='iso-8859-1',
                                     skiprows=1,
                                     header=None).applymap(func_decode)
            comp_map = {
                '00': 'Não se aplica',
                '01': 'Atenção básica',
                '02': 'Média complexidade',
                '03': 'Alta complexidade',
                '99': 'Não se aplica'
            }
            #fiz manualmente porque não funcionou o encoding e o decode

        # FINANC.CNV
        with zfile.open('FINANC.CNV') as file:
            df = pd.read_table(file,
                               sep=b'\s{2,}',
                               engine='python',
                               encoding='iso-8859-1',
                               skiprows=1,
                               header=None).applymap(func_decode)
            temp = pd.DataFrame([{
                1: 'Não discriminado',
                2: '00'
            }, {
                1: 'Não discriminado',
                2: '99'
            }, {
                1: '04 Fundo de Ações Estratégicas e Compensações FAEC',
                2: '04'
            }])
            df = pd.concat([df, temp],
                           ignore_index=True).drop(0, axis=1).drop([0, 3])
            financ_map = pd.Series(df[1].tolist(), index=df[2].tolist())

        # ETNIA
        with zfile.open('etnia.cnv') as et_file:
            df_et = pd.read_table(et_file,
                                  sep=b'\s{2,}',
                                  engine='python',
                                  encoding='iso-8859-1',
                                  skiprows=1,
                                  header=None).applymap(func_decode)
            ni = pd.DataFrame([{
                1: 'NÃO INFORMADO',
                2: '0000'
            }, {
                1: 'NÃO INFORMADO',
                2: '9999'
            }])
            df_et = pd.concat([df_et, ni],
                              ignore_index=True).drop(0, axis=1).drop(0)
            et_map = pd.Series(df_et[1].tolist(), df_et[2].tolist())

        # SEX
        with zfile.open('SEXO.CNV') as sex_file:
            df = pd.read_table(sex_file,
                               sep=b'\s{2,}',
                               engine='python',
                               skiprows=1,
                               header=None).applymap(func_decode)
            sex_map = {
                df.loc[0, 2][0]: df.loc[0, 1],
                df.loc[0, 2][2]: df.loc[0, 1],
                df.loc[1, 2]: df.loc[1, 1],
                df.loc[2, 2][0]: df.loc[2, 1],
                df.loc[2, 2][2]: df.loc[2, 1]
            }

        # UF
        with zfile.open('br_ufsigla.cnv') as uf:
            df = pd.read_table(
                uf, sep=b'\s{2,}', engine='python', skiprows=1,
                header=None).applymap(func_decode).dropna(axis=1)
            pe = pd.DataFrame([{1: 'PE', 2: '20'}, {1: 'PE', 2: '26'}])
            df = pd.concat([df, pe], ignore_index=True)
            df.drop(12, inplace=True)
            uf_map = pd.Series(df[1].tolist(), df[2].tolist())

        # CNES
        list_df = []
        list_cnes = [item for item in l if re.search('\S+CNES\S+', item)]
        for file in list_cnes:
            zfile.extract(file)
            dbf = DBF(file, encoding='utf-8')
            cnes_temp = pd.DataFrame(list(dbf))
            list_df.append(cnes_temp)
            os.unlink(file)
        cnes_br = pd.concat(list_df, sort=False).drop_duplicates()
        cnes_br['UF_ZI'] = cnes_br.UF_ZI.map(uf_map)
        cnes_map = pd.Series(cnes_br.NOMEFANT.tolist(),
                             index=cnes_br.CNES.tolist())

        # CBO.dbf
        file = 'CBO.dbf'
        zfile.extract(file)
        dbf = DBF(file, encoding='iso-8859-1')
        cbo_map = pd.DataFrame(list(dbf))
        cbo_map = pd.Series(cbo_map.DS_CBO.tolist(),
                            index=cbo_map.CBO.tolist())
        os.unlink(file)


#     # group_cid_map
#     group_cid_map = {}
#     for key in MAP_CIDS:
#         for v in MAP_CIDS[key]:
#             group_cid_map[v] = key

    return sex_map, cnes_map, uf_map, cid_map, et_map, financ_map, cbo_map, comp_map, mun_map, caratend_map
Пример #8
0
def download_table_dbf(file_name):
    """
    Realiza o download de um arquivo auxiliar de dados do SIA em formato "dbf" ou de uma pasta
    "zip" que o contém (se a pasta "zip" já não foi baixada), em seguida o lê como um objeto pandas
    DataFrame e por fim o elimina

    Parâmetros
    ----------
    file_name: objeto str
        String do nome do arquivo "dbf"

    Retorno
    -------
    df: objeto pandas DataFrame
        Dataframe que contém os dados de um arquivo auxiliar de dados originalmente em formato "dbf"
    """

    if ((file_name == 'CADMUN') or (file_name == 'TABUF')):
        fname = file_name + '.DBF'
        ftp = FTP('ftp.datasus.gov.br')
        ftp.login()
        ftp.cwd('/dissemin/publicos/SIM/CID10/TABELAS/')
        ftp.retrbinary(f'RETR {fname}', open(fname, 'wb').write)

    elif file_name == 'rl_municip_regsaud':
        folder = 'base_territorial.zip'
        ftp = FTP('ftp.datasus.gov.br')
        ftp.login()
        ftp.cwd('/territorio/tabelas/')
        ftp.retrbinary(f'RETR {folder}', open(folder, 'wb').write)
        zip = ZipFile(folder, 'r')
        fname = file_name + '.dbf'
        zip.extract(fname)

    else:
        folder = 'TAB_SIA.zip'
        if not os.path.isfile(folder):
            ftp = FTP('ftp.datasus.gov.br')
            ftp.login()
            ftp.cwd('/dissemin/publicos/SIASUS/200801_/Auxiliar/')
            ftp.retrbinary(f'RETR {folder}', open(folder, 'wb').write)
        zip = ZipFile(folder, 'r')
        try:
            fname = file_name + '.DBF'
            zip.extract('TAB_DBF/' + fname)
        except:
            try:
                fname = file_name + '.dbf'
                zip.extract('TAB_DBF/' + fname)
            except:
                raise Exception(f'Could not access {file_name}.')

    if ((file_name == 'CADMUN') or (file_name == 'TABUF')
            or (file_name == 'rl_municip_regsaud')):
        dbf = DBF(fname)

    else:
        dbf = DBF('TAB_DBF/' + fname, encoding='iso-8859-1')

    df = pd.DataFrame(iter(dbf))

    if ((file_name == 'CADMUN') or (file_name == 'TABUF')
            or (file_name == 'rl_municip_regsaud')):
        os.unlink(fname)

    else:
        os.unlink('TAB_DBF/' + fname)

    return df
Пример #9
0
import sys
import csv
from dbfread import DBF

fname = sys.argv[-1]
assert fname.lower().endswith('.dbf')
outname = fname.lower()[:-4]+".csv"
print("Converting",fname,"to",outname,".")

table = DBF(fname)

with open(outname,'w',newline='') as f:
	writer = csv.writer(f)
	writer.writerow(table.field_names)
	for record in table:
		writer.writerow(list(record.values()))
Пример #10
0
# In[21]:


import dbfread


# In[22]:


from dbfread import DBF


# In[23]:


tab = DBF("/Users/margaretmccall/Downloads/Assignment Result 4/FAF4DATA_V43.DBF")


# In[27]:


frame= pd.DataFrame(iter(tab))


# In[28]:


frame.head()


# In[30]:
Пример #11
0
from dbfread import DBF
import re
import config

client = config.client

query = client.query("SELECT FROM Carrera")

#for result in query:
#	print result

table = DBF('KI9119B.dbf', load=True, encoding="latin-1")

table2 = DBF('KI9119A.dbf', load=True, encoding="latin-1")
#print list(set(table.field_names) - set(table2.field_names))
#print table.field_names
#print table2.field_names
r = re.compile("S\d")
fields = filter(r.match, table.field_names)
#print fields
#DELETING ALL RECORDS
#client.command("TRUNCATE CLASS Institucion UNSAFE")
#client.command("TRUNCATE CLASS Empleado UNSAFE")
#client.command("TRUNCATE CLASS Responsable UNSAFE")
#client.command("TRUNCATE CLASS Direccion UNSAFE")
#client.command("TRUNCATE CLASS Resultados911 UNSAFE")
#DELETING ALL RECORDS
#Check fields
info_general = [
    'CARRERA', 'NOMBRECAR', 'NOMCARSINA', 'MODALIDAD', 'NOMMOD', 'PLAN_EST',
    'DURACION', 'ESTRUCT', 'ESTATUS', 'MAT_2015', 'TIPO_SUB', 'SUBSISTEMA',
from dbfread import DBF
import csv
import sys
import json
from random import randrange
import random

# Script to generate a geoJSON with all municipalities of BL
# Datenquellen:
# Gemeindeliste: https://www.bfs.admin.ch/bfs/de/home/grundlagen/agvch.assetdetail.11467406.html
# Geodaten: SwissBoundaries3D

# not the best code ;) but it works
# (c) 2020, RadioSYS GmbH, Oliver Wisler

os = DBF('./data/PLZO_OS.dbf')
osvb = DBF('./data/PLZO_OS.dbf')
osname = DBF('./data/PLZO_OSNAME.dbf', encoding='utf-8')
osnamepos = DBF('./data/PLZO_OSNAMEPOS.dbf', encoding='utf-8')
plz = DBF('./data/PLZO_PLZ.dbf')
print(plz.field_names)
print(osvb.field_names)
print(os.field_names)
print(osname.field_names)
print(osnamepos.field_names)

sHoheitsgebiet = DBF('./data/swissBOUNDARIES3D_1_3_TLM_HOHEITSGEBIET.dbf',
                     encoding='utf-8')
print(sHoheitsgebiet.field_names)

print('Lengths os:%s osname:%s osnamepos:%s plz:%s' %
Пример #13
0
from dbfread import DBF

count = 0
sites = []
print list(DBF('../ttms/ttms.dbf'))[0].keys()
for record in DBF('../ttms/ttms.dbf'):

    # print record.keys()
    # print record['LOCATION']
    # print record['SECTION_']
    # print record['Sitetype']
    # if 'YEAR_' in record:
    # 	print record['YEAR_']
    # print record['COMM']

    cosite = record['Cosite']
    if cosite[:2] == '75':
        # print record['YEAR_'], cosite
        print record.values()

    # count += 1
    # if count == 100:
    # 	break
Пример #14
0
def WT_processing_pipeline(path,
                           pipe_WT_TBD,
                           assign_types=False,
                           makecopy=True):
    # copy dbf to memory in pandas dataframe

    tic = time()

    dbf_ref = DBF(path, ignore_missing_memofile='yes')
    references_dataFrame = DataFrame(iter(dbf_ref))

    if assign_types:
        references_dataFrame = assign_GW_ups_downs(references_dataFrame,
                                                   pipe_WT_TBD)

    references_dataFrame['WT_ASS'] = 0.0
    #pipe_WT_TBD = []
    GW_WT_list = []
    GW_index_list = []
    sleeve = False
    assigned_value_list = []
    clamp = False

    for index, row in references_dataFrame.iterrows():

        # Check for sleeves, tees, installations
        if row['TYPE'] == "AS_Sleeve":
            sleeve = True

        if row['TYPE'] == "AE_Sleeve":
            sleeve = False

        if row['TYPE'] == "Clamp":
            clamp = True

        # Check for end of a GWs section and apply WT
        if row['TYPE'] == "GW_wt_dwn" or row['TYPE'] == "GW_wt_up" or row[
                'TYPE'] == "AE_Receiv":
            GW_WT_list_mean = np.mean(GW_WT_list)
            GW_WT_list_median = np.median(GW_WT_list)

            # Compare WTs to list if it exists
            assigned_value = compare_GW_WTs(pipe_WT_TBD, GW_WT_list_median)

            # track assigned values
            assigned_value_list.append(assigned_value)

            index_wta = references_dataFrame.columns.get_loc("WT_ASS")
            # Assign GW WT
            for GW in GW_index_list:

                references_dataFrame.iat[GW, index_wta] = assigned_value
                #references_dataFrame.iat[GW,5] = 55

            # start new lists
            GW_WT_list = []
            GW_index_list = []

            if row['TYPE'] != "AE_Receiv":
                # populate with last gw up or down
                GW_WT_list.append(row['WT_CALC'])
                GW_index_list.append(index)

        # Create a list of a section of GWs
        if row['TYPE'] == "GirthWeld":

            # if sleeve or present do not append the GW WT calculated value just the index position
            if sleeve or clamp:
                GW_index_list.append(index)
                clamp = False
            else:
                GW_WT_list.append(row['WT_CALC'])
                GW_index_list.append(index)

    if makecopy:
        table_copy = make_dbf_copy(path)
    else:
        # overwriting references db
        table_copy = path

    write_to_table(table_copy, references_dataFrame)

    toc = time()

    print("Task took {0:.{1}f} seconds".format(toc - tic, 1))
Пример #15
0
"""
Add custom field parsing by subclassing FieldParser.
"""

from dbfread import DBF, FieldParser

class CustomFieldParser(FieldParser):
    def parseC(self, field, data):
        # Return strings reversed.
        return data.rstrip(' 0').decode()[::-1]

for record in DBF('files/people.dbf', parserclass=CustomFieldParser):
    print(record['NAME'])
Пример #16
0
"""
Return records as objects with fields as attributes.
"""
from dbfread import DBF

class Record(object):
    def __init__(self, items):
        for name, value in items:
            setattr(self, name, value)

for record in DBF('files/people.dbf', recfactory=Record, lowernames=True):
    print(record.name, 'was born on', record.birthdate)
Пример #17
0
asm.setid("animal", 100)
asm.setid("owner", 100)
asm.setid("ownerdonation", 100)
asm.setid("ownerlicence", 100)
asm.setid("adoption", 100)
asm.setid("animalcontrol", 100)

print "\\set ON_ERROR_STOP\nBEGIN;"
print "DELETE FROM animal WHERE ID >= 100;"
print "DELETE FROM animalcontrol WHERE ID >= 100;"
print "DELETE FROM owner WHERE ID >= 100;"
print "DELETE FROM ownerdonation WHERE ID >= 100;"
print "DELETE FROM ownerlicence WHERE ID >= 100;"
print "DELETE FROM adoption WHERE ID >= 100;"

for p in DBF("%s/NAMES.DBF" % PATH):
    o = asm.Owner()
    owners.append(o)
    ppo[p["ID"]] = o
    o.OwnerForeNames = p["F_NAME"]
    o.OwnerSurname = p["L_NAME"]
    o.OwnerAddress = "%s %s\n%s" % (p["ADR_ST_NUM"], p["ADR_ST_NAM"], p["ADR_LINE2"])
    o.OwnerTown = p["CITY"]
    o.OwnerCounty = p["STATE"]
    o.OwnerPostcode = p["ZIP"]
    o.HomeTelephone = p["H_PHONE"]
    o.WorkTelephone = p["W_PHONE"]
    comments = "ID: %s" % p["ID"]
    comments += "\n%s" % asm.nulltostr(p["NAMES_TXT"])
    o.Comments = comments
def dbf_to_mysql_ValuationFile(filename):

    # set mysql connection
    conn = pymysql.connect(host='10.20.12.7', port=3306, user='******', passwd='HJam2017!', db='RiskMeasurement', use_unicode=True, charset="utf8")
    cur = conn.cursor()

    # load dbf and insert to db
    dbf_file = filename
    product = dbf_file[-12:-4]
    table = DBF(filename=dbf_file, load=True, encoding="gb2312")

    # get record
    for row in table.records:
        temp_row_list = []

        # filter empty lines
        if row.get('A0') == '' or row.get('Ffdate') == '':
            continue

        # get values for each rows
        for key in row:
            x = row.get(key)
            if not (type(x) is type(0.0)):
                value1 = str(x).encode('utf-8').decode('utf-8')
                value = "'%s'" % value1
            else:
                value = x
            temp_row_list.append(value)

        # mark product id
        product_id = "'%s'" % str(product).encode('utf-8').decode('utf-8')
        temp_row_list.append(product_id)

        # replace None with 0.0/0 for float/int field
        ## define a method first
        def replace_none_value(list, startpoint, endpoint, givenvalue):
            s = int(startpoint)
            if endpoint == 'null':  # single item
                if list[s] ==  "'None'":
                    list[s] = givenvalue
                return list
            else:
                e = int(endpoint)
                for i in list[s:e]:  # multiple items
                    if i ==  "'None'":
                        list[list.index(i)] = givenvalue
                return list
        ## execute the method
        row_list = replace_none_value(temp_row_list,3,7,0.0)
        row_list = replace_none_value(temp_row_list,12,17,0.0)
        row_list = replace_none_value(temp_row_list,-2,'null',0)

        # format list to tuple
        insert_values = tuple(row_list)
        print(row_list)
        print(insert_values)

        # compile sql statement
        insert_statement = """insert into Valuation values(%s,%s,%s,%f,%f,%f,%f,%s,%s,%s,%s,%s,%f,%f,%f,%f,%f,%s,%s)""" % insert_values

        # execute sql
        insert_result = cur.execute(insert_statement)
        conn.commit()
        print(insert_result)

    # close mysql connection
    cur.close()
    conn.close()
Пример #19
0
	return i_query


if load.lower()=='y':
	files =  os.listdir('./data')
	files = [x for x in files if not x.startswith('.')]
	files = [x for x in files if x.lower().endswith('.dbf')]
	files.sort()

	print(f"\nA total of {len(files)} tables will be loaded...\n")
	time.sleep(2)
	
	#loading tables into database
	for table in tqdm(files):
		#print(f'Loading {table}...')
		dbf = DBF(f'data/{table}')
		df = pd.DataFrame(iter(dbf))
		if not df.empty:
			df.to_sql(name=table[:-4], con=conn, if_exists = 'replace', index=False)
	
	#creating table that gives you an over view of the database
	table_overview_ct="""
	CREATE TABLE IF NOT EXISTS TABLE_OVERVIEW (
		TABLE_NAME	TEXT,
		COLUMN_NAME	TEXT,
		DATA_TYPE	TEXT,
		ROW_COUNT 	INT
	);
	"""
	conn.execute(table_overview_ct)
	
Пример #20
0
asm.setid("animal", 100)
asm.setid("owner", 100)
asm.setid("ownerlicence", 100)
asm.setid("ownerdonation", 100)
asm.setid("animalcontrol", 100)

print "\\set ON_ERROR_STOP\nBEGIN;"
print "DELETE FROM animal WHERE ID >= 100;"
print "DELETE FROM animalcontrol WHERE ID >= 100;"
print "DELETE FROM owner WHERE ID >= 100;"
print "DELETE FROM ownerlicence WHERE ID >= 100;"
print "DELETE FROM ownerdonation WHERE ID >= 100;"

# pre-load address and residences mappings
addresses = {}
for a in DBF("%s/ADDRESS.DBF" % PATH):
    addresses[a["ADDRESSNO"]] = a

residences = {}
for r in DBF("%s/RESIDENC.DBF" % PATH):
    residences[r["PERSONNO"]] = r

for p in DBF("%s/PERSON.DBF" % PATH):
    if p["PERSONNO"] == "New": continue
    o = asm.Owner()
    owners.append(o)
    ppo[p["PERSONNO"]] = o
    o.OwnerForeNames = p["FIRSTNAME"]
    o.OwnerSurname = p["LASTNAME"]
    if p["AGENCY"] != "" and o.OwnerSurname == "":
        o.OwnerSurname = p["AGENCY"]
Пример #21
0
from dbfread import DBF
import psycopg2

connection = psycopg2.connect(user="******",
                              password="******",
                              host="127.0.0.1",
                              port="5432",
                              database="astz")

all_cnt = 0
all_rows = 0
errors = []

for record in DBF('/home/bastion/MD_MAINZ-20190827/MD_mainz.DBF'):
    all_rows += 1
    if all_rows < 942877 + 1:
        if all_rows % 100000:
            print(all_rows)
        continue
    real_keys = []
    values = []
    for key in record.keys():
        val = record[key]
        if key == 'IS':
            key = 'IS_'
        real_keys.append(key)
        values.append(val)

    cursor = connection.cursor()
    try:
        cursor.execute(
Пример #22
0
 def __insert_hospital(self, file, request, tip_file):
     if tip_file == 1:
         self.OPER = False
         temp_oper.objects.filter(user=request.user.id).all().delete()
         for rec in DBF(file,
                        char_decode_errors="ignore",
                        encoding="utf-8",
                        lowernames=True):
             dict_rec_r = dict(rec)
             kodan = str(dict_rec_r['kodan']).strip(' ')
             kodan = ''.join(kodan)
             try:
                 temp_oper.objects.create(kod_op=dict_rec_r['kod_op'],
                                          dato=dict_rec_r['dato'],
                                          goc_o=dict_rec_r['goc_o'],
                                          py=dict_rec_r['py'],
                                          kodx=dict_rec_r['kodx'],
                                          kodxa=dict_rec_r['kodxa'],
                                          kodxa1=dict_rec_r['kodxa1'],
                                          obz=dict_rec_r['obz'],
                                          kodan=kodan,
                                          pr_osob=dict_rec_r['pr_osob'],
                                          k_mm=dict_rec_r['k_mm'],
                                          nib=dict_rec_r['nib'],
                                          user=request.user.id)
                 self.OPER = True
             except KeyError:
                 pass
     else:
         self.SLUCH = False
         temp_sluch.objects.filter(user=request.user.id).all().delete()
         for rec in DBF(file,
                        char_decode_errors="ignore",
                        encoding="cp866",
                        lowernames=True):
             dict_rec_r = dict(rec)
             try:
                 tm_let = dict_rec_r[
                     'tm_let'] if dict_rec_r['tm_let'] != '' else None
                 temp_sluch.objects.create(
                     fam=dict_rec_r['fam'],
                     im=dict_rec_r['im'],
                     ot=dict_rec_r['ot'],
                     pol=dict_rec_r['pol'],
                     datr=dict_rec_r['datr'],
                     udl=dict_rec_r['udl'],
                     s_pasp=dict_rec_r['s_pasp'],
                     n_pasp=dict_rec_r['n_pasp'],
                     ss=dict_rec_r['ss'],
                     c_oksm=dict_rec_r['c_oksm'],
                     adr=dict_rec_r['adr'],
                     m_roj=dict_rec_r['m_roj'],
                     cod_adr=dict_rec_r['cod_adr'],
                     cj=dict_rec_r['cj'],
                     v_lgoty=dict_rec_r['v_lgoty'],
                     in_t=dict_rec_r['in_t'],
                     rab=dict_rec_r['rab'],
                     r_n=dict_rec_r['r_n'],
                     prof=dict_rec_r['prof'],
                     vec=dict_rec_r['vec'],
                     nib=dict_rec_r['nib'],
                     datp=dict_rec_r['datp'],
                     datv=dict_rec_r['datv'],
                     goc=dict_rec_r['goc'],
                     prpg=dict_rec_r['prpg'],
                     vrez=dict_rec_r['vrez'],
                     lpy=dict_rec_r['lpy'],
                     ws=dict_rec_r['ws'],
                     tm_otd=dict_rec_r['tm_otd'],
                     otd=dict_rec_r['otd'],
                     prof_k=dict_rec_r['prof_k'],
                     icx=dict_rec_r['icx'],
                     dsny=dict_rec_r['dsny'],
                     dsk=dict_rec_r['dsk'],
                     dskz=dict_rec_r['dskz'],
                     dsc=dict_rec_r['dsc'],
                     ds_osl=dict_rec_r['ds_osl'],
                     dson=dict_rec_r['dson'],
                     ksg_osn=dict_rec_r['ksg_osn'],
                     ksg_sop=dict_rec_r['ksg_sop'],
                     vid_hmp=dict_rec_r['vid_hmp'],
                     metod_hmp=dict_rec_r['metod_hmp'],
                     trs=dict_rec_r['trs'],
                     tm_let=tm_let,
                     pri=dict_rec_r['pri'],
                     ds_let=dict_rec_r['ds_let'],
                     wskr=dict_rec_r['wskr'],
                     dspat=dict_rec_r['dspat'],
                     rasxp=dict_rec_r['rasxp'],
                     otd_y=dict_rec_r['otd_y'],
                     vds=dict_rec_r['vds'],
                     sctp=dict_rec_r['sctp'],
                     nctp=dict_rec_r['nctp'],
                     t_pol=dict_rec_r['t_pol'],
                     ctkom=dict_rec_r['ctkom'],
                     ksg_ts=dict_rec_r['ksg_ts'],
                     t_trv=dict_rec_r['t_trv'],
                     details=dict_rec_r['details'],
                     trav_ns=dict_rec_r['trav_ns'],
                     pmg=dict_rec_r['pmg'],
                     user=request.user.id)
                 self.SLUCH = True
             except KeyError:
                 continue
Пример #23
0
# @File : dbf_cut.py
# @desc : cut big dbf file to small excel files

import xlsxwriter
from dbfread import DBF

print("""
用于dbf文件自动下载失败
需要切割大dbf文件为xlsx
上传后用备用抓取下载入库
""")

MAX = 5000
FILE_list = ['bond634.dbf', 'BOND634_O.dbf']  #input('file: ') #文件名称
for FILE in FILE_list:
    table = DBF(filename=FILE, encoding='GBK')


def write_to_excel(data: [[]], path: str):
    """
    Args:
        data: [[]] double list
        path: export file path

    """
    workbook = xlsxwriter.Workbook(path)
    worksheet = workbook.add_worksheet('sheet1')
    x = 0
    for d in data:
        y = 0
        for value in d:
Пример #24
0
folder_geoparks = 'geoparks_world/'
folder_geoparks_EA = 'geoparks_asia_europe/'

#%% data World
#change directory
path = disk + folder + folder_world
os.chdir(path)

fileList = glob.glob("*.dbf")

#create empty dataframe
data_world = pd.DataFrame()

for file in glob.glob("*.dbf"):
    # for index, file in enumerate(fileList):
    data = pd.DataFrame(iter(DBF((path + file), load=True)))
    data['name'] = file.replace(".dbf", "")
    data_world = pd.concat([data_world, data], axis=0, sort=True)
    data_world.replace(99999, np.nan, inplace=True)  #replace 0 by nan

data_worldM = pd.melt(data_world, id_vars=["name"])

#%% data Europe
path = disk + folder + folder_europe
os.chdir(path)

fileList = glob.glob("*.dbf")

#create empty dataframe
data_europe = pd.DataFrame()
in_dbf = r"I:\Projects\Darren\PPA_V2_GIS\SACSIM Model Data\trans.link.all_{}.dbf".format(
    year)

out_csv = r'I:\Projects\Darren\PPA_V2_GIS\SACSIM Model Data\transit_linkvol_{}.csv'.format(
    year)

col_anode = 'A'
col_bnode = 'B'

grouby_cons_col = '{}_{}'.format(col_anode, col_bnode)

val_cols = ['VOL', 'REV_VOL']
# ---------------------------------------------------------

dbf = DBF(in_dbf)
df_data = pd.DataFrame(dbf)

input_cols = [col_anode, col_bnode] + val_cols

df_data = df_data[input_cols]

df_data[grouby_cons_col] = df_data[col_anode].map(
    str) + '_' + df_data[col_bnode].map(str)

#df_data[grouby_cons_col] = df_data.swifter.apply(lambda x: '{}_{}'.format(x[group_cols[0]], x[group_cols[1]]), axis = 1)

val_cols.append(grouby_cons_col)

df_summed = df_data[val_cols].groupby([grouby_cons_col]).sum().reset_index()
df_summed[val_cols].to_csv(out_csv, index=False)
Пример #26
0
from dbfread import DBF
import re
import datetime
import config

client = config.client
nivel = ["secundaria"]
subnivel = ["general"]
table = DBF('SECUNI16.dbf', load=True, encoding="latin-1")
#print table.field_names
#print table.records[0]
r = re.compile("V\d")
fields = filter(r.match, table.field_names)
#print fields
info_general = [
    'CLAVECCT', 'N_CLAVECCT', 'DIRSERVREG', 'UNIDADRESP', 'PROGRAMA',
    'SUBPROG', 'RENGLON', 'N_RENGLON', 'PERIODO', 'MOTIVO', 'DISPON'
]
turno = ['TURNO']
ubicacion = [
    'N_ENTIDAD', 'MUNICIPIO', 'N_MUNICIPI', 'LOCALIDAD', 'N_LOCALIDA',
    'DOMICILIO', 'ZONAESCOLA', 'SECTOR'
]
sostenimiento = ['SOSTENIMIE']
dependencia_normativa = ['DEPNORMTVA', 'DEPADMVA']
servicio = ['SERVICIO']
renombres = {
    u'CLAVECCT': 'clave',
    u'N_CLAVECCT': 'nombre',
    u'TURNO': 'turno',
    u'N_ENTIDAD': 'nombre_entidad',
Пример #27
0
    def _import_move(self, file_dir, files, scanfiles, account_data, account_central, journal_data, partner_data, vatcode_data, param_data):
        """Import the journal entries from *_act*.dfb and @scandbk.zip files.
        The data in *_act*.dfb files are related to the moves and the data in
        @scandbk.zip files are the attachments.
        """
        _logger.info("Import Moves")
        recs = []
        ResCurrency = self.env['res.currency']
        IrAttachment = self.env['ir.attachment']
        suspense_account = self.env['account.account'].search([('code', '=', self.suspense_code)], limit=1)
        if not self.only_open and not suspense_account:
            raise UserError(_("The code for the Suspense Account you entered doesn't match any account"))
        counter_part_created = False
        for file_name in scanfiles:
            with zipfile.ZipFile(join(file_dir, file_name), 'r') as scan_zip:
                scan_zip.extractall(file_dir)
        for file_name in files:
            for rec in DBF(join(file_dir, file_name), encoding='latin').records:
                if rec.get('BOOKYEAR') and rec.get('DOCNUMBER') != '99999999':
                    recs.append(rec)
        result = [dict(tupleized) for tupleized in set(tuple(item.items()) for item in recs)]
        grouped = collections.defaultdict(list)
        for item in result:
            # Group by number/year/period
            grouped[item['DOCNUMBER'], item['DBKCODE'], item['DBKTYPE'], item['BOOKYEAR'], item['PERIOD']] += [item]

        move_data_list = []
        pdf_file_list = []
        reconcile_number_set = set()
        for key, val in grouped.items():
            journal_id = self.env['account.journal'].browse(journal_data.get(key[1]))
            bookyear = int(key[3], 36)
            if not bookyear or (self.only_open and bookyear not in param_data['openyears']):
                continue
            perdiod_number = len(param_data['period_date'][bookyear]) - 2
            period = min(int(key[4]), perdiod_number + 1)  # closing is 99 in winbooks, not 13
            start_period_date = param_data['period_date'][bookyear][period]
            if 1 <= period < perdiod_number:
                end_period_date = param_data['period_date'][bookyear][period + 1] + timedelta(days=-1)
            elif period == perdiod_number:  # take the last day of the year = day of closing
                end_period_date = param_data['period_date'][bookyear][period + 1]
            else:  # opening (0) or closing (99) are at a fixed date
                end_period_date = start_period_date
            move_date = val[0].get('DATEDOC')
            move_data_dict = {
                'journal_id': journal_id.id,
                'move_type': 'out_invoice' if journal_id.type == 'sale' else 'in_invoice' if journal_id.type == 'purchase' else 'entry',
                'ref': '%s_%s' % (key[1], key[0]),
                'company_id': self.env.company.id,
                'date': min(max(start_period_date, move_date), end_period_date),
            }
            if not move_data_dict.get('journal_id') and key[1] == 'MATCHG':
                continue
            move_line_data_list = []
            move_amount_total = 0
            move_total_receivable_payable = 0

            # Split lines having a different sign on the balance in company currency and foreign currency
            tmp_val = []
            for rec in val:
                tmp_val += [rec]
                if rec['AMOUNTEUR'] * (rec['CURRAMOUNT'] or 0) < 0:
                    tmp_val[-1]['CURRAMOUNT'] = 0
                    tmp_val += [rec.copy()]
                    tmp_val[-1]['AMOUNTEUR'] = 0
            val = tmp_val

            # Basic line info
            for rec in val:
                currency = ResCurrency.search([('name', '=', rec.get('CURRCODE'))], limit=1)
                if currency == self.env.company.currency_id:
                    currency = self.env['res.currency']
                partner_id = self.env['res.partner'].browse(partner_data.get(rec.get('ACCOUNTRP'), False))
                account_id = self.env['account.account'].browse(account_data.get(rec.get('ACCOUNTGL')))
                matching_number = rec.get('MATCHNO') and '%s-%s' % (rec.get('ACCOUNTGL'), rec.get('MATCHNO')) or False
                line_data = {
                    'date': rec.get('DATE', False),
                    'account_id': account_id.id,
                    'partner_id': partner_id.id,
                    'date_maturity': rec.get('DUEDATE', False),
                    'name': rec.get('COMMENT'),
                    'currency_id': currency.id,
                    'debit': rec.get('AMOUNTEUR') if rec.get('AMOUNTEUR') and rec.get('AMOUNTEUR') >= 0 else 0.0,
                    'credit': abs(rec.get('AMOUNTEUR')) if rec.get('AMOUNTEUR') and rec.get('AMOUNTEUR') < 0 else 0.0,
                    'amount_currency': rec.get('CURRAMOUNT') if currency and rec.get('CURRAMOUNT') else 0.0,
                    'amount_residual_currency': rec.get('CURRAMOUNT') if currency and rec.get('CURRAMOUNT') else 0.0,
                    'winbooks_matching_number': matching_number,
                    'exclude_from_invoice_tab': rec.get('DOCORDER') == 'VAT' or (account_id.user_type_id.type in ('receivable', 'payable') and journal_id.type in ('sale', 'purchase')),
                }
                if matching_number:
                    reconcile_number_set.add(matching_number)
                if rec.get('AMOUNTEUR'):
                    move_amount_total = round(move_amount_total, 2) + round(rec.get('AMOUNTEUR'), 2)
                move_line_data_list.append((0, 0, line_data))
                if account_id.user_type_id.type in ('receivable', 'payable'):
                    move_total_receivable_payable += rec.get('AMOUNTEUR')

            # Compute refund value
            if journal_id.type in ('sale', 'purchase'):
                is_refund = move_total_receivable_payable < 0 if journal_id.type == 'sale' else move_total_receivable_payable > 0
            else:
                is_refund = False

            # Add tax information
            for line_data, rec in zip(move_line_data_list, val):
                if self.env['account.account'].browse(account_data.get(rec.get('ACCOUNTGL'))).user_type_id.type in ('receivable', 'payable'):
                    continue
                tax_line = self.env['account.tax'].browse(vatcode_data.get(rec.get('VATCODE') or rec.get('VATIMPUT', [])))
                if not tax_line and line_data[2]['account_id'] in account_central.values():
                    # this line is on a centralised account, most likely a tax account, but is not linked to a tax
                    # this is because the counterpart (second repartion line) line of a tax is not flagged in Winbooks
                    try:
                        counterpart = next(r for r in val if r['AMOUNTEUR'] == -rec['AMOUNTEUR'] and r['DOCORDER'] == 'VAT' and r['VATCODE'])
                        tax_line = self.env['account.tax'].browse(vatcode_data.get(counterpart['VATCODE']))
                    except StopIteration:
                        pass  # We didn't find a tax line that is counterpart with same amount
                repartition_line = is_refund and tax_line.refund_repartition_line_ids or tax_line.invoice_repartition_line_ids
                repartition_type = 'tax' if rec.get('DOCORDER') == 'VAT' else 'base'
                line_data[2].update({
                    'tax_ids': tax_line and rec.get('DOCORDER') != 'VAT' and [(4, tax_line.id)] or [],
                    'tax_tag_ids': [(6, 0, tax_line.get_tax_tags(is_refund, repartition_type).ids)],
                    'tax_repartition_line_id': rec.get('DOCORDER') == 'VAT' and repartition_line.filtered(lambda x: x.repartition_type == repartition_type and x.account_id.id == line_data[2]['account_id']).id or False,
                })
            move_line_data_list = [i for i in move_line_data_list if i[2]['account_id'] or i[2]['debit'] or i[2]['credit']]  # Remove empty lines

            # Adapt invoice specific informations
            if move_data_dict['move_type'] != 'entry':
                move_data_dict['partner_id'] = move_line_data_list[0][2]['partner_id']
                move_data_dict['invoice_date_due'] = move_line_data_list[0][2]['date_maturity']
                move_data_dict['invoice_date'] = move_line_data_list[0][2]['date']
                if is_refund:
                    move_data_dict['move_type'] = move_data_dict['move_type'].replace('invoice', 'refund')

            # Balance move, should not happen in an import from a complete db
            if move_amount_total:
                if not counter_part_created:
                    _logger.warning('At least one automatic counterpart has been created at import. This is probably an error. Please check entry lines with reference: ' + _('Counterpart (generated at import from Winbooks)'))
                counter_part_created = True
                account_id = journal_id.default_account_id
                account_id = account_id or (partner_id.property_account_payable_id if rec.get('DOCTYPE') in ['0', '1'] else partner_id.property_account_receivable_id)
                account_id = account_id or suspense_account  # Use suspense account as fallback
                line_data = {
                    'account_id': account_id.id,
                    'date_maturity': rec.get('DUEDATE', False),
                    'name': _('Counterpart (generated at import from Winbooks)'),
                    'credit': move_amount_total if move_amount_total >= 0 else 0.0,
                    'debit': abs(move_amount_total) if move_amount_total < 0 else 0.0,
                }
                move_line_data_list.append((0, 0, line_data))

            # Link all to the move
            move_data_dict['line_ids'] = move_line_data_list
            attachment = '%s_%s_%s' % (key[1], key[4], key[0])
            pdf_file = self.find_file(attachment, file_dir)
            pdf_file_list.append(pdf_file)
            move_data_list.append(move_data_dict)

            if len(move_data_list) % 100 == 0:
                _logger.info("Advancement: {}".format(len(move_data_list)))

        _logger.info("Creating moves")
        move_ids = self.env['account.move'].create(move_data_list)
        move_ids._post()
        _logger.info("Creating attachments")
        for move, pdf_file in zip(move_ids, pdf_file_list):
            if pdf_file:
                attachment_ids = []
                for pdf in pdf_file:
                    attachment_data = {
                        'name': pdf.split('/')[-1],
                        'type': 'binary',
                        'datas': base64.b64encode(open(pdf, "rb").read()),
                        'res_model': move._name,
                        'res_id': move.id,
                        'res_name': move.name
                    }
                    attachment_ids.append(IrAttachment.create(attachment_data))
                move.message_post(attachments=attachment_ids)
        _logger.info("Reconcile")
        for matching_number in reconcile_number_set:
            lines = self.env['account.move.line'].search([('winbooks_matching_number', '=', matching_number), ('reconciled', '=', False)])
            try:
                lines.with_context(no_exchange_difference=True).reconcile()
            except UserError as ue:
                if len(lines.account_id) > 1:
                    _logger.warning('Winbooks matching number {} uses multiple accounts: {}. Lines with that number have not been reconciled in Odoo.'.format(matching_number, ', '.join(lines.mapped('account_id.display_name'))))
                elif not lines.account_id.reconcile:
                    _logger.info("{} {} has reconciled lines, changing the config".format(lines.account_id.code, lines.account_id.name))
                    lines.account_id.reconcile = True
                    lines.with_context(no_exchange_difference=True).reconcile()
                else:
                    raise ue
        return True
Пример #28
0
import psycopg2
import datetime
conn = psycopg2.connect(
    "host='localhost' dbname='odoo' user='******' password='******'")
#conn = psycopg2.connect("host='192.168.1.27' dbname='odoo' user='******' password='******'")
cursor = conn.cursor()
datakosong = 0
listOfUkuran = [
    '2', '3', '4', '5', '6', '26', '27', '28', '29', '30', '31', '32', '33',
    '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45'
]
from dbfread import DBF
# for record in DBF('/mnt/poserver/ICS/INV.DBF',encoding='iso-8859-1'):
# print(record)
for item in DBF('/mnt/poserver/ics/DAT/INV.DBF', encoding='iso-8859-1'):
    #  print item[1]
    #  print  list(item.values())
    # print len(item),
    # print item['CODE'],item['DESC1'],item['LQOH']

    list = item['DESC1'].split(" ")
    print list[len(list) - 1]
    ukuran = list[len(list) - 1][-2:]
    if ukuran in listOfUkuran:
        print ukuran
        article = item['DESC1'].replace(ukuran, "").replace("'", "")
    else:
        ukuran = ""
        print "AllSize"
        article = item['DESC1'].replace("'", "")
Пример #29
0
    def import_partner(self, file_dir, files, civility_data, category_data, account_data):
        """Import partners from *_csf*.dbf files.
        The data in those files is the partner details, its type, its category,
        bank informations, and central accounts.
        :return: a dictionary whose keys are the Winbooks partner references and
            the values are the partner ids in Odoo.
        """
        _logger.info("Import Partners")
        partner_data = {}
        ResBank = self.env['res.bank']
        ResCountry = self.env['res.country']
        ResPartner = self.env['res.partner']
        ResPartnerBank = self.env['res.partner.bank']
        partner_data_dict = {}
        for file_name in files:
            for rec in DBF(join(file_dir, file_name), encoding='latin').records:
                if not rec.get('NUMBER'):
                    continue
                partner = ResPartner.search([('ref', '=', rec.get('NUMBER'))], limit=1)
                if partner:
                    partner_data[rec.get('NUMBER')] = partner.id
                if not partner:
                    vatcode = rec.get('VATNUMBER') and rec.get('COUNTRY') and (rec.get('COUNTRY') + rec.get('VATNUMBER').replace('.', ''))
                    if not rec.get('VATNUMBER') or not rec.get('COUNTRY') or not ResPartner.simple_vat_check(rec.get('COUNTRY').lower(), vatcode):
                        vatcode = ''
                    data = {
                        'ref': rec.get('NUMBER'),
                        'name': rec.get('NAME1'),
                        'street': rec.get('ADRESS1'),
                        'country_id': ResCountry.search([('code', '=', rec.get('COUNTRY'))], limit=1).id,
                        'city': rec.get('CITY'),
                        'street2': rec.get('ADRESS2'),
                        'vat': vatcode,
                        'phone': rec.get('TELNUMBER'),
                        'zip': rec.get('ZIPCODE') and ''.join([n for n in rec.get('ZIPCODE') if n.isdigit()]),
                        'email': rec.get('EMAIL'),
                        'active': not rec.get('ISLOCKED'),
                        'title': civility_data.get(rec.get('CIVNAME1'), False),
                        'category_id': [(6, 0, [category_data.get(rec.get('CATEGORY'))])] if category_data.get(rec.get('CATEGORY')) else False
                    }
                    if partner_data_dict.get(rec.get('IBANAUTO') or 'num' + rec.get('NUMBER')):
                        for key, value in partner_data_dict[rec.get('IBANAUTO') or 'num' + rec.get('NUMBER')].items():
                            if value:  # Winbooks has different partners for customer/supplier. Here we merge the data of the 2
                                data[key] = value
                    if rec.get('NAME2'):
                        data.update({
                            'child_ids': [(0, 0, {'name': rec.get('NAME2'), 'title': civility_data.get(rec.get('CIVNAME2'), False)})]
                        })
                    # manage the bank account of the partner
                    if rec.get('IBANAUTO'):
                        partner_bank = ResPartnerBank.search([('acc_number', '=', rec.get('IBANAUTO'))], limit=1)
                        if partner_bank:
                            data['bank_ids'] = [(4, partner_bank.id)]
                        else:
                            bank = ResBank.search([('name', '=', rec.get('BICAUTO'))], limit=1)
                            if not bank:
                                bank = ResBank.create({'name': rec.get('BICAUTO')})
                            data.update({
                                'bank_ids': [(0, 0, {
                                    'acc_number': rec.get('IBANAUTO'),
                                    'bank_id': bank.id
                                })],
                            })
                    # manage the default payable/receivable accounts for the partner
                    if rec.get('CENTRAL'):
                        if rec.get('TYPE') == '1':
                            data['property_account_receivable_id'] = account_data[rec.get('CENTRAL')]
                        else:
                            data['property_account_payable_id'] = account_data[rec.get('CENTRAL')]

                    partner_data_dict[rec.get('IBANAUTO') or 'num' + rec.get('NUMBER')] = data
                    if len(partner_data_dict) % 100 == 0:
                        _logger.info("Advancement: {}".format(len(partner_data_dict)))

        partner_ids = ResPartner.create(partner_data_dict.values())
        for partner in partner_ids:
            partner_data[partner.ref] = partner.id
        return partner_data
Пример #30
0
from dbfread import DBF
ybraw=open('./output/yb18.csv',"w")
table = DBF('./DBF/yb.dbf',load=True,ignore_missing_memofile=True)
print('qhdm','sq','csdbrs','csdbhs','ncdbrs','ncdbhs',sep=',',file=ybraw)
for record in range(0,len(table)):
#    print(table.records[record]['SYS_ZDM'][0:12],',',table.records[record]['SYS_ZDM'][12:14])
    print(table.records[record]['SYS_ZDM'][0:12],',',table.records[record]['SYS_ZDM'][12:14],",",table.records[record]['YZH21110'],\
          ",",table.records[record]['YZH21120'],",",table.records[record]['YZH21210'],",",table.records[record]['YZH21220'],\
          sep="",file=ybraw)
ybraw.close()