Ejemplo n.º 1
0
def escribir(regs, archivos=None, carpeta=None):
    "Grabar en talbas dbf la lista de diccionarios con la factura"
    if DEBUG:
        print("Creando DBF...")
    if not archivos:
        filenames = {}

    for reg in regs:
        formatos = [
            ('Encabezado', ENCABEZADO, [reg]),
            ('Detalle', DETALLE, reg.get('detalles', [])),
            ('Iva', IVA, reg.get('ivas', [])),
            ('Tributo', TRIBUTO, reg.get('tributos', [])),
            ('Permiso', PERMISO, reg.get('permisos', [])),
            ('Comprobante Asociado', CMP_ASOC, reg.get('cbtes_asoc', [])),
            ('Dato', DATO, reg.get('datos', [])),
        ]
        for nombre, formato, l in formatos:
            claves, campos = definir_campos(formato)
            filename = archivos.get(nombre.lower(), "%s.dbf" % nombre[:8])
            # construir ruta absoluta si se especifica carpeta
            if carpeta is not None:
                filename = os.path.join(carpeta, filename)
            if DEBUG:
                print("leyendo tabla", nombre, filename)
            tabla = dbf.Table(filename, campos)

            for d in l:
                r = {}
                for fmt in formato:
                    clave, longitud, tipo = fmt[0:3]
                    if clave == 'id':
                        v = reg['id']
                    else:
                        v = d.get(clave, None)
                    if DEBUG:
                        print(clave, v, tipo)
                    if v is None and tipo == A:
                        v = ''
                    if (v is None or v == '') and tipo in (I, N):
                        v = 0
                    if tipo == A:
                        if isinstance(v, str):
                            v = v.encode('utf8', 'ignore')
                        elif isinstance(v, str):
                            v = v.decode('latin1',
                                         'ignore').encode('utf8', 'ignore')
                        else:
                            v = str(v)
                    r[dar_nombre_campo(clave)] = v
                registro = tabla.append(r)
            tabla.close()
Ejemplo n.º 2
0
    def deletefromupdate(self):
        logging.info('Deleting info abount updates')
        table = dbf.Table(self.pathtodbf + '/1SUPDTS.DBF')
        with table.open(mode=dbf.READ_WRITE):
            for record in table:
                if record['dbsign'] == dbsign:
                    if record['typeid'] in self.objects:
                        if record['objid'].rstrip() in self.objects[
                                record['typeid']]:
                            dbf.delete(record)

            table.pack()
        logging.info('Deleting info abount updates - done')
Ejemplo n.º 3
0
 def getnewobjects(self):
     table = dbf.Table(self.pathtodbf + '/1SUPDTS.DBF')
     objects = {}
     logging.info('Reading updates')
     with table.open(mode=dbf.READ_ONLY):
         for record in table:
             if record['dbsign'] == self.dbsign:
                 tabletype = record['typeid']
                 objlist = objects.get(tabletype, set())
                 objlist.add(record['objid'].rstrip())
                 objects[tabletype] = objlist
     self.objects = objects
     logging.info('Updates reading - done')
Ejemplo n.º 4
0
def dbf_csv(variable_name, config):
    vdef = parse_variables.variable_definition(variable_name, config)

    table = dbf.Table(os.path.join(DBF_PATH,
                                   dbf_fname(vdef['entname'], vdef['name'])))
    table.open()

    with open(os.path.join(CSV_PATH, csv_fname(vdef['entname'], vdef['name'])), 'w') as f:
        w = unicodecsv.writer(f)
        w.writerow(['radio'] + vdef['value_labels_list'] + ['TOTAL'])

        for row in table:
            w.writerow(row)
Ejemplo n.º 5
0
 def init(self, db_path):
     try:
         self._sh_db = dbf.Table(db_path['SH'], codepage=self._codepage)
         # self._sz_db = dbf.Table(db_path['SZ'])
     except KeyError:
         log = VtLogData()
         log.gatewayName = 'CastMdApi'
         log.logContent = u'dbf路径配置出错,请检查'
         self.onLog(log)
         return
     self.active = True
     self._req_thread.start()
     self._hq_prices_thread.start()
     self.on_inited()
Ejemplo n.º 6
0
def aggreate_dbf(dbf_list,
                 headers,
                 output_name,
                 parent_dir=".",
                 begin=11,
                 end=27):
    f = open(output_name, "w")
    f.write(",".join(headers) + "\n")
    for q in dbf_list:
        g = dbf.Table(os.path.join(parent_dir, q))
        g.open()
        l = q[begin:end] + "," + ",".join(map(str, list(g[0]))) + "\n"
        f.write(l)
    f.close()
    def collect_annual_data_byRP_from_dbf_country(self):

        contatore_si = 0
        lista_si_dbf = []

        direttorio = self.proj_dir + self.paese
        dct_valori_inondazione_annuale = {}
        for direttorio_principale, direttorio_secondario, file_vuoto in os.walk(
                direttorio):
            if direttorio_principale != direttorio:
                linea_taglio = 0
                contatore = 0
                for lettera in direttorio_principale:
                    contatore += 1
                    if lettera == '_':
                        linea_taglio = contatore
                admin_name = direttorio_principale[0:linea_taglio -
                                                   1].split("\\")[1]
                admin_code = direttorio_principale[linea_taglio:]
                files_dbf = glob.glob(direttorio_principale + "/*.dbf")
                for file in files_dbf:
                    fileName, fileExtension = os.path.splitext(file)
                    if 'stat' in fileName:
                        contatore_si += 1
                        lista_si_dbf.append(direttorio_principale)
                        try:
                            tabella = dbf.Table(file)
                            tabella.open()
                            dct_valori_inondazione_annuale[admin_code] = {}
                            dct_valori_inondazione_annuale[admin_code][
                                'adm_name'] = admin_name
                            for recordio in tabella:
                                dct_valori_inondazione_annuale[admin_code][
                                    'adm_name'] = admin_name
                                if recordio.value > 0:
                                    dct_valori_inondazione_annuale[admin_code][
                                        recordio.value] = recordio.sum
                        except:
                            pass

                lista_stat_dbf = [25, 50, 100, 200, 500, 1000]
                for valore in dct_valori_inondazione_annuale.items():
                    quanti_rp = len(valore[1].keys())
                    if quanti_rp < 6:
                        for rp in lista_stat_dbf:
                            if rp not in valore[1].keys():
                                dct_valori_inondazione_annuale[
                                    valore[0]][rp] = 0

        return dct_valori_inondazione_annuale
Ejemplo n.º 8
0
def dbf2csv(f, **kwargs):
    """
    Convert a dBASE .dbf file to csv.
    """
    with dbf.Table(f.name) as db:
        column_names = db.field_names
        table = agate.Table(db, column_names)

    output = six.StringIO()
    table.to_csv(output)
    result = output.getvalue()
    output.close()

    return result
Ejemplo n.º 9
0
def process_sogaz_control_file_to_csv(path_name, file_name):

    dbf_file = dbf.Table(r'S:\_out\reestr.dbf')
    dbf_file.open()

    index_numer = dbf_file.create_index(lambda rec: (rec.fam.strip().upper(
    ) + ' ' + rec.im.strip().upper() + ' ' + rec.ot.strip().upper(
    ), rec.dr, rec.ds.strip(), rec.cod, rec.d_u))

    file_full_name = os.path.join(path_name, file_name)
    wb = openpyxl.load_workbook(file_full_name)
    start_row = 3 - 1
    ws = wb.worksheets[0]
    for row_num, row in enumerate(ws.iter_rows()):
        if row_num < start_row:
            continue
        row_num += 1
        fam_value = row[0].value
        if fam_value is None:
            continue
        if 'Итого' in fam_value:
            continue
        dr_value = row[1].value
        ds_value = row[4].value
        cod_value = row[6].value
        date_value1 = row[8].value
        date_value2 = ws.cell(row_num + 1, 9).value
        sum_value = row[10].value
        history_value = row[11].value
        recs = index_numer.search(
            match=(fam_value.strip(),
                   datetime.datetime.strptime(dr_value, '%d.%m.%Y').date(),
                   ds_value.strip(), int(cod_value),
                   datetime.datetime.strptime(date_value2, '%d.%m.%Y').date()))
        # for rec_numer in recs:
        #     logging.warning(rec_numer)
        if len(recs) == 1:
            doctor_name = recs[0]['NAME']
            logging.warning(doctor_name)
            ws.cell(row_num, 15).value = doctor_name
        else:
            logging.warning([
                len(recs), fam_value, dr_value, ds_value,
                int(cod_value), date_value1, date_value2, sum_value,
                history_value
            ])

    dbf_file.close()
    wb.save(file_full_name)
Ejemplo n.º 10
0
def create_columns_from_dbf(data, context):
    tmp = tempfile.NamedTemporaryFile(suffix='.dbf')
    tmp.file.write(data)
    tmp.file.flush()
    dbt = dbf.Table(tmp.name)
    for fieldname in dbt.field_names:
        fieldinfo = dbt.field_info(fieldname)
        column_type = None
        column_lenght = 0
        if fieldinfo[0] in ['N', 'F', 'B', 'Y']:
            column_type = 'Float'
            if fieldinfo[2] == 0:
                column_type = 'Integer'
        elif fieldinfo[0] in [
                'I',
        ]:
            column_type = 'Integer'
        elif fieldinfo[0] in ['C']:
            column_type = 'String'
            column_lenght = fieldinfo[1]
        elif fieldinfo[0] in ['D']:
            column_type = 'Date'
        elif fieldinfo[0] in ['T']:
            column_type = 'DateTime'
        elif fieldinfo[0] in ['L']:
            column_type = 'Boolean'
        elif fieldinfo[0] in ['P']:
            logger.warn('Picture type not suppported')
        if column_type:
            name = title_to_name(fieldname, blacklist=context.keys())
            if fieldname.endswith('_'):
                destname = fieldname[:-1]
            else:
                destname = fieldname
            if len(destname) < 2:
                destname = destname + '0'
            column = RDBTableColumn(parent=context,
                                    name=name,
                                    title=fieldname,
                                    src_column_name=fieldname,
                                    dest_column_name=destname,
                                    column_type=column_type,
                                    column_lenght=column_lenght,
                                    is_pk=False)
            DBSession.add(column)
        else:
            raise TypeError(u'Unsupported type %s' % fieldinfo[0])
    dbt.close()
    tmp.close()
Ejemplo n.º 11
0
def leer(archivos=None, carpeta=None):
    "Leer las tablas dbf y devolver una lista de diccionarios con las facturas"
    if DEBUG:
        print("Leyendo DBF...")
    if archivos is None:
        archivos = {}
    regs = {}
    formatos = [
        ("Encabezado", ENCABEZADO, None),
        ("Detalle", DETALLE, "detalles"),
        ("Iva", IVA, "ivas"),
        ("Tributo", TRIBUTO, "tributos"),
        ("Permiso", PERMISO, "permisos"),
        ("Comprobante Asociado", CMP_ASOC, "cbtes_asoc"),
        ("Dato", DATO, "datos"),
    ]
    for nombre, formato, subclave in formatos:
        filename = archivos.get(nombre.lower(), "%s.dbf" % nombre[:8]).strip()
        if not filename:
            continue
        # construir ruta absoluta si se especifica carpeta
        if carpeta is not None:
            filename = os.path.join(carpeta, filename)
        if DEBUG:
            print("leyendo tabla", nombre, filename)
        tabla = dbf.Table(filename, codepage=CODEPAGE)
        for reg in tabla:
            r = {}
            d = reg.scatter_fields()
            for fmt in formato:
                clave, longitud, tipo = fmt[0:3]
                nombre = dar_nombre_campo(clave)
                v = d.get(nombre)
                r[clave] = v
            # agrego
            if formato == ENCABEZADO:
                r.update({
                    "detalles": [],
                    "ivas": [],
                    "tributos": [],
                    "permisos": [],
                    "cbtes_asoc": [],
                    "datos": [],
                })
                regs[r["id"]] = r
            else:
                regs[r["id"]][subclave].append(r)

    return regs
Ejemplo n.º 12
0
def reader(fn):
    t = dbf.Table(fn)
    t.open()

    for ri, r in enumerate(t):
        print("record %d" % ri)

        for f in t.field_names:
            try:
                print("%s:\t%s" % (f, r[f]))
            except ValueError as e:
                print("%s:\tValueError %s" % (f, e))
        print

    t.close()
Ejemplo n.º 13
0
def write_to_table(dbf_path, references_dataFrame, fields=['WT_ASS', 'TYPE']):
    count = 0

    table = dbf.Table(dbf_path)
    table.open()
    for record in table:
        with record as r:

            if 'WT_ASS' in fields:
                r.wt_ass = references_dataFrame.iloc[count]['WT_ASS']
            if 'TYPE' in fields:
                r.type = references_dataFrame.iloc[count]['TYPE']

            count += 1
    table.close()
Ejemplo n.º 14
0
def generate_examinations_file(examinations, path_name, generate_file=True):
    if generate_file:
        table = dbf.Table(os.path.join(path_name, 'obsled.dbf'),
                          '''fam C(50)
                          ;im C(50)
                          ;ot C(50)
                          ;code C(6)
                          ;d_u D
                          ;dr D
                          ;doctor C(100)
                          ;speciality C(2)''',
                          codepage='cp866')
    else:
        table = dbf.Table(os.path.join(path_name, 'obsled.dbf'))

    table.open(mode=dbf.READ_WRITE)
    for exam in examinations:
        # logging.warning(exam)
        record = {}
        for key in table.field_names:
            record[key] = exam[key]
        table.append(record)

    table.close()
Ejemplo n.º 15
0
def main():
    dirs = filter(os.path.isdir, sorted(os.listdir(".")))
    for d in dirs:
        files = os.listdir(d)
        dbf_list = filter(lambda p: p.endswith(".dbf"), files)
        f = dbf.Table(os.path.join(d, dbf_list[0]))
        f.open()
        headers = ['time'] + f.field_names
        f.close()
        aggreate_dbf(dbf_list,
                     headers,
                     "%s.csv" % d,
                     parent_dir=d,
                     begin=11,
                     end=27)
        pass
def process_stopcheck(branch_code):
    global branchcode
    global skipstoppedcheck
    branchcode = branch_code
    try:
        stopdbf = dbf.Table('STOPFILE.DBF')
        stopdbf.open()
    except DbfError:
        print '--' * 5, 'STOP CHECK NOT FOUND ------------ SKIPPING ---------------------------------'
        skipstoppedcheck = True
    if not skipstoppedcheck:
        filelist = os.listdir(os.getcwd())
        for fil in filelist:
            if fil.startswith(branchcode) and fil.endswith('.dat'):
                stopcheckfile = fil
        oldtonew = open(stopcheckfile, 'rb').readlines()
        stopfile = open(branchcode + '-StoppedCheques.csv', 'wb')
        stopwriter = csv.writer(stopfile, delimiter='|')
        oldtonewrecord = []
        stopchecks = []
        for old2new in oldtonew:
            for rec in stopdbf:
                oldtonewrecord = old2new.split('|')
                if (int(rec.fld01) == int(
                        oldtonewrecord[0])) and (oldtonewrecord[3]
                                                 == 'CurrentAccount'):
                    stopchecks.append(oldtonewrecord[1])
                    stopchecks.append(rec.fld06)  # Amount
                    stopchecks.append(re.sub(
                        '[\D]', '', rec.fld02))  # From Stop Cheque Referrence
                    stopchecks.append('ETB')  # Iso Currency Code
                    stopchecks.append(re.sub(
                        '[\D]', '', rec.fld02))  # Stop Cheque Referrence
                    stopchecks.append(rec.fld04)  # Stop Date
                    stopchecks.append(re.sub('[\D]', '',
                                             rec.fld02))  # STOPPEDCHEQUEID
                    stopchecks.append('REQUEST BY CUSTOMER' if rec.fld06 > 0
                                      else 'LOST CHECK')  # Stop Reason
                    stopchecks.append(re.sub(
                        '[\D]', '', rec.fld03))  # To Stop Cheque Referrence
                    stopchecks.append('superit')  # User Id
                    print 'RANGE OF CHECK STOPPED FOR ACCOUNT.\t' + rec.fld01 + '\t', (
                        int(re.sub('[\D]', '', rec.fld03)) -
                        int(re.sub('[\D]', '', rec.fld02)))
                    stopwriter.writerow(stopchecks)
                    stopchecks[:] = []
        stopfile.close()
Ejemplo n.º 17
0
    def create_csv(self):
        """Get alL DBF file records and create CSV files from them"""

        for source_path in self.get_dir_paths():
            for f_year in self.get_file_year():
                out_csv = source_path + "/BBAT_TRMM_" + f_year + ".csv"  # Output CSV file
                with open(out_csv,
                          "wb") as output_csv:  # Open CSV file for write
                    csv_data = []
                    for file in os.listdir(source_path):
                        if file.startswith("DBF_") & file.endswith(
                                ".dbf"):  # Check if DBF file
                            if file[4:
                                    8] == f_year:  # Check if file is correct year
                                csv_data.append(" ")
                                csv_data.append(
                                    file[4:8] + "_" +
                                    calendar.month_name[int(file[8:10].strip(
                                        "0"))])  # Month and year as text
                                #csv_data.append(" ")
                                csv_data.append(
                                    "ID,Monthly_mm"
                                )  # Append ID and precipitation
                                file_path = os.path.join(source_path,
                                                         file).replace(
                                                             "\\", "/")
                                print("READING RECORDS FROM ...... " + file)
                                with dbf.Table(
                                        file_path) as table:  # Open DBF file
                                    count = 0
                                    for record in table:  # Get DBF records
                                        count += 1
                                        csv_values = str(count) + "," + str(
                                            record.bbat_trmm1
                                        )  # Concatenate records and count
                                        csv_data.append(
                                            csv_values
                                        )  # Append DBF values of the year

                    writer = csv.writer(
                        output_csv, delimiter=','
                    )  # Create a CSV write object with comma delimiter
                    print("CREATING CSV FILE FOR ...... " + f_year)
                    for values in csv_data:  # Iterate data array
                        writer.writerow(values.split(
                            ","))  # Write each array row to CSV file
        print("CSV FILES SUCCESSFULLY CREATED!!!!!")
Ejemplo n.º 18
0
    def deletefromupdate(self):
        logging.info('Deleting info abount updates')
        filename = os.path.join(self.pathtodbf, '1SUPDTS.DBF')
        indexfile = os.path.join(self.pathtodbf, '1SUPDTS.CDX')
        table = dbf.Table(filename)
        with table.open(mode=dbf.READ_WRITE):
            for record in table:
                if record['dbsign'] == dbsign:
                    if record['typeid'] in self.objects:
                        if record['objid'].rstrip() in self.objects[
                                record['typeid']]:
                            dbf.delete(record)

            table.pack()
        if os.path.exists(indexfile):
            os.remove(indexfile)
        logging.info('Deleting info abount updates - done')
Ejemplo n.º 19
0
def load_defects():

    defect_types_dbf = dbf.Table(r'site_app\files\defect_type.dbf')
    defect_types_dbf.open()

    for rec in defect_types_dbf:
        defect_type_recs = db.session.query(RefDefectTypes).filter_by(
            defect_type_code=rec["SCODE"].strip()).all()
        if not defect_type_recs:
            defect_type = RefDefectTypes(defect_type_code=rec["SCODE"].strip(),
                                         defect_name=rec["NAME"].strip())
            db.session.add(defect_type)
            db.session.commit()
        logging.warning(defect_type_recs)

    logging.warning("ALL")
    defect_types_dbf.close()
Ejemplo n.º 20
0
def read_words(filename):
    words = []
    table = dbf.Table(filename, codepage='cp936')
    table.open()
    for record in table:
        for idx in range(1, 25):
            f_w = '单词' + str(idx)
            w = record[f_w].strip()
            if len(w) == 0:
                continue
            f_p = '音标' + str(idx)
            f_h = '词意' + str(idx)
            word = {'w': w, 'p': record[f_p].strip(), 'h': record[f_h].strip()}
            words.append(word)

    table.close()
    return words
Ejemplo n.º 21
0
def load_otdels():
    logging.warning("otdels load start")
    strings_dbf = dbf.Table(os.path.join(STAT_PATH, 'strings.dbf'))
    strings_dbf.open()
    for rec in strings_dbf:
        if rec["CODE"] == 5:
            logging.warning(rec["SCODE"])

            otdel_recs = db.session.query(RefOtdels).filter_by(
                otdel_stat_code=rec["SCODE"].strip()).all()
            if not otdel_recs:
                otdel_rec = RefOtdels(otdel_stat_code=rec["SCODE"].strip(),
                                      otdel_name=rec["NAME"].strip())
                db.session.add(otdel_rec)
                db.session.commit()

    logging.warning("otdels load complete")
    strings_dbf.close()
Ejemplo n.º 22
0
    def criando_tabela_sistema(self, tab):

        array_campos = []
        resultado = ''
        strcampos = ''

        # CRIANDO ESTRUTURA DE DADOS DA TABELA
        for campos in tab.planilha_campo_ids.sorted('sequence'):
            resultado = campos.mapeamento_id.name + ' ' + \
                        campos.mapeamento_id.tipo_campo
            if campos.mapeamento_id.tipo_campo != 'D':
                resultado += '(' + campos.mapeamento_id.tamanho_campo + ')'
            array_campos.append(resultado)

        strcampos = '; '.join([str(i) for i in array_campos])
        table = dbf.Table(tab.name, strcampos)
        table.open(mode=dbf.READ_WRITE)
        return table
Ejemplo n.º 23
0
 def parse(self, file_path):
     result = list()
     if os.path.isfile(file_path):
         table = dbf.Table(file_path)
         if table:
             table.open()
             for i, record in enumerate(table):
                 value = Tariff_Type(number=i + 1)
                 if self.__check_record(record['c_tar']):
                     for key in self.dbf_keys:
                         if isinstance(record[key], date):
                             setattr(value, key,
                                     self.__convert_date(record[key]))
                         else:
                             setattr(value, key, record[key])
                     result.append(value)
             table.close()
     return result
Ejemplo n.º 24
0
 def get_total_records(self):
     tmp_dbf = os.path.abspath('tmp/%s.dbf' % self.id)
     if not os.path.exists('tmp'): os.mkdir('tmp')
     if os.path.exists(tmp_dbf):
         os.remove(tmp_dbf)
     try:
         shutil.copy(self.filefrom, tmp_dbf)
     except:
         pass
     if not os.path.exists(tmp_dbf):
         self.log.error('task%s:%s copy %s to tmp_dbf fail:%s.dbf' %
                        (self.id, self.fileid, self.filefrom, self.id))
         return False
     records = dbf.Table(tmp_dbf)
     records.open()
     if records not in self.dbfs:
         self.dbfs.append(records)
     return records
Ejemplo n.º 25
0
def dbf2csv(filename):
    '''
        tem que ter a package dbf (sudo pip install dbf)
        entre com o caminho+nome da tabela, a saida sera escrita
        na mesma pasta com o .csv
    '''
    import dbf, os
    a = dbf.Table(filename).open()
    output = filename[:-4] + '.csv'
    dbf.export(a, filename=output, encoding='utf-8')
    f1 = open(output, 'r')
    f2 = open(output[:-4] + '_.csv', 'w')
    for line in f1:
        f2.write(line.replace(' ', ''))
    f1.close()
    f2.close()
    os.remove(output)
    os.rename(output[:-4] + '_.csv', output)
    del a
Ejemplo n.º 26
0
def dbf_to_dict(filename):
    """Read a DBF file containing station information from Network Rail's
    Railway Network Inspire data. Produces a dict which maps from station
    three-alpha-code to its name and location.
    """
    t = dbf.Table(filename)

    stations = {}

    t.open()
    try:
        for row in t:
            stations[row.stn_code.strip()] = StationRecord(
                row.name.strip(), row.stn_code.strip(), row.gis_eastin,
                row.gis_northi)
    finally:
        t.close()

    return stations
Ejemplo n.º 27
0
def leer(archivos=None):
    "Leer las tablas dbf y devolver una lista de diccionarios con las facturas"
    if DEBUG: print "Leyendo DBF..."
    if archivos is None: archivos = {}
    regs = {}
    formatos = [
        ('Encabezado', ENCABEZADO, None),
        ('Detalle', DETALLE, 'detalles'),
        ('Iva', IVA, 'ivas'),
        ('Tributo', TRIBUTO, 'tributos'),
        ('Permiso', PERMISO, 'permisos'),
        ('Comprobante Asociado', CMP_ASOC, 'cbtes_asoc'),
        ('Dato', DATO, 'datos'),
    ]
    for nombre, formato, subclave in formatos:
        filename = archivos.get(nombre.lower(), "%s.dbf" % nombre[:8]).strip()
        if not filename:
            continue
        if DEBUG: print "leyendo tabla", nombre, filename
        tabla = dbf.Table(filename, codepage=CODEPAGE)
        for reg in tabla:
            r = {}
            d = reg.scatter_fields()
            for fmt in formato:
                clave, longitud, tipo = fmt[0:3]
                nombre = dar_nombre_campo(clave)
                v = d.get(nombre)
                r[clave] = v
            # agrego
            if formato == ENCABEZADO:
                r.update({
                    'detalles': [],
                    'ivas': [],
                    'tributos': [],
                    'permisos': [],
                    'cbtes_asoc': [],
                    'datos': [],
                })
                regs[r['id']] = r
            else:
                regs[r['id']][subclave].append(r)

    return regs
Ejemplo n.º 28
0
def dbf_to_csv(dbf_table_pth):  # Input a dbf, output a csv, same name, same path, except extension
    """Сам перевод dbf в csv"""
    csv_fn = dbf_table_pth[:-4] + ".csv"  # Set the csv file name
    table = dbf.Table(dbf_table_pth, codepage='cp866')  # table variable is a DBF object
    with open(csv_fn, 'w', newline='', encoding='cp866') as f:  # create a csv file, fill it with dbf content
        writer = csv.writer(f, delimiter=',')
        table.open(mode=dbf.READ_WRITE)
        writer.writerow(table.field_names)  # write the column name
        if len(table.field_names) == 6:
            for record in table:  # write the rows
                a = []
                try:
                    a.append(record.regn)
                    a.append(record.code)
                    a.append(record.sim_r)
                    a.append(record.sim_v)
                    a.append(record.sim_itogo)
                    a.append(record.dt)
                    writer.writerow(a)
                except dbf.FieldMissingError:
                    break
        if len(table.field_names) == 2:
            for record in table:  # write the rows
                a = []
                try:
                    a.append(record.regn)
                    a.append(record.name_b)
                    writer.writerow(a)
                except dbf.FieldMissingError:
                    break
        elif len(table.field_names) == 3:
            for record in table:  # write the rows
                a = []
                try:
                    a.append(record.cont_sum_r)
                    a.append(record.cont_sum_v)
                    a.append(record.cont_sum)
                    writer.writerow(a)
                except dbf.FieldMissingError:
                    break

    return csv_fn  # return the csv name
Ejemplo n.º 29
0
def cli(dbf_paths, sqlite_db, table, verbose):
    """
    Convert DBF files (dBase, FoxPro etc) to SQLite

    https://github.com/simonw/dbf-to-sqlite
    """
    if table and len(dbf_paths) > 1:
        raise click.ClickException("--table only works with a single DBF file")
    db = Database(sqlite_db)
    for path in dbf_paths:
        table_name = table if table else Path(path).stem
        if verbose:
            click.echo('Loading {} into table "{}"'.format(path, table_name))
        table = dbf.Table(str(path))
        table.open()
        columns = table.field_names
        db[table_name].insert_all(
            dict(zip(columns, list(row))) for row in table)
        table.close()
    db.vacuum()
Ejemplo n.º 30
0
    def create_dbf_table(self, columns_names, group_by):
        if group_by:
            table_columns = [
                '{} {}'.format(col[0], col[1]) for name, col in
                ExportReportForm.GROUPED_EXPORT_COLUMNS.items()
                if name in columns_names
            ]
        else:
            table_columns = [
                '{} {}'.format(col[0], col[2])
                for col in ExportReportForm.FLAT_EXPORT_COLUMNS
                if col[0] in columns_names
            ]
        table_schema = '; '.join(table_columns)

        table_name = 'sessions_table-{}'.format(uuid.uuid4().urn.rsplit(
            ':', 1)[1])

        return dbf.Table(table_name, table_schema,
                         codepage='cp1251'), table_name