def _postprocessTazdata(self, src_dbf_file, append_dbf_file, dat_file):
        """ This hurts, it really does.  But group quarters needed to come from the zmast file, and
            HHPOP came from sfzones, and the TazDataProcessory couldn't handle combining those to
            make the total POP so we have to do so here.  Some day I hope this is cleaner -- maybe
            if group quarters population came out of UrbanSim, or the TazDataProcessor were a bit
            smarter...
        """
        shutil.move(src_dbf_file, src_dbf_file + ".bak")
        dbfin = dbf.Dbf(src_dbf_file + ".bak", readOnly=1, ignoreErrors=True)
        dbfin2 = dbf.Dbf(append_dbf_file, readOnly=1)
        if len(dbfin) != len(dbfin2):
            raise StandardError, "%s and %s have different number of records" % (
                src_dbf_file, append_dbf_file)

        # read the append dbf file
        dist22 = {}
        dist40 = {}
        for rec in dbfin2:
            dist22[rec["SFTAZ"]] = rec["DIST22"]
            dist40[rec["SFTAZ"]] = rec["DIST40"]
        dbfin2.close()

        # add the two new fields
        dbfout = dbf.Dbf(src_dbf_file, new=True)
        datout = open(dat_file, 'w')
        fields = []
        for field in dbfin.fieldDefs:
            if field.name == "DIST51":
                dbfout.addField(("DIST22", field.typeCode, field.length,
                                 field.decimalCount))
                dbfout.addField(("DIST40", field.typeCode, field.length,
                                 field.decimalCount))
                fields.append("DIST22")
                fields.append("DIST40")
            dbfout.addField(
                (field.name, field.typeCode, field.length, field.decimalCount))
            fields.append(field.name)

        # update the dbf
        for rec in dbfin:

            newrec = dbfout.newRecord()
            for field in fields:
                if field == "POP":
                    newrec[field] = rec["GQPOP"] + rec["HHPOP"]
                elif field == "DIST22":
                    newrec[field] = dist22[rec["SFTAZ"]]
                elif field == "DIST40":
                    newrec[field] = dist40[rec["SFTAZ"]]
                else:
                    newrec[field] = rec[field]
                if field != fields[0]:
                    datout.write(" ")
                datout.write(str(newrec[field]))
            newrec.store()
            datout.write("\n")
        dbfout.close()
        datout.close()
        dbfin.close()
示例#2
0
    def test(self):
        test_filename = "test.dbf"

        db = dbf.Dbf(test_filename, new=True)
        rec_fields = ['NAME', 'SURNAME', 'INITIALS', 'BIRTHDATE']
        db.addField(
            ("NAME", "C", 15),
            ("SURNAME", "C", 25),
            ("INITIALS", "C", 10),
            ("BIRTHDATE", "D"),
        )

        ## fill DBF with some records

        expected_data = [
            ("John", "Miller", "JM", (1980, 1, 2), datetime.date(1980, 1, 2)),
            ("Andy", "Larkin", "AL", datetime.date(1981, 2, 3),
             datetime.date(1981, 2, 3)),
            ("Bill", "Clinth", "", datetime.date(1982, 3,
                                                 4), datetime.date(1982, 3,
                                                                   4)),
            ("Bobb", "McNail", "", "19830405", datetime.date(1983, 4, 5)),
        ]

        for name, surname, initials, birthdate, _ in expected_data:
            rec = db.newRecord()
            rec["NAME"] = name
            rec["SURNAME"] = surname
            rec["INITIALS"] = initials
            rec["BIRTHDATE"] = birthdate
            rec.store()
        db.close()

        ## read DBF and print records

        db = dbf.Dbf(test_filename)
        data = list(db)

        self.assertEqual(len(data), len(expected_data))

        for exp_rec, _rec in zip(expected_data, data):
            exp_rec = exp_rec[:-2] + exp_rec[-1:]
            rec = tuple(_rec[f] for f in rec_fields)
            self.assertEqual(exp_rec, rec)

        ## change record

        rec = db[2]
        rec["INITIALS"] = "BC"
        rec.store()
示例#3
0
    def DBFConverter(self, ifile, ofile, conv_type):
        # create the DBF reader object
        db = dbf.Dbf(ifile)
        # create the file to output CSV data to
        fpath = ofile
        f = open(fpath,'w')
        # initiat the header container and CSV writer
        hdr = []
        c = csv.writer(f)

        # loop through all the field names and create the header row
        for fieldName in db.fieldNames:
            hdr += [fieldName]

        # write the head to the CSV file
        c.writerow(hdr)

        # loop through all the records and write each line to the CSV file
        for rec in db:
            # using asList makes a list as opposed to asDict
            c.writerow(rec.asList())

        if conv_type in ['pandas']:
            # take the csv file and open it as a pandas dataframe
            from_csv = pd.read_csv(fpath)
            # return a dataframe object
            return from_csv

        if conv_type in ['csvfileobject']:
            return f
        # close the CSV file
        f.close()
        db.close()
def process_referencefile(file_name):
    reference_db = dbf.Dbf(file_name)
    reference_new_file_name = file_name[:-4] + "_new.csv"
    with open(reference_new_file_name, "w") as output_handle:
        writer = csv.writer(output_handle)
        origin_fields = reference_db.fieldNames
        if "ID_X" in origin_fields:
            origin_fields.remove("ID_X")
        if "ID_Y" in origin_fields:
            origin_fields.remove("ID_Y")
        new_fields = [
            "ele_new", "loc_usr", "loc_ur", "region", "state_name",
            'new_location'
        ]
        fields = origin_fields + new_fields
        writer.writerow(fields)
        for rec in reference_db:
            new_row = []
            for field in origin_fields:
                new_row.append(rec[field])
            new_row.append(elevation_new_process(rec["ELEVATION"]))
            new_row.append(location_usr_process(rec['NLCD_2011_']))
            new_row.append(location_ur_process(rec['NLCD_2011_']))
            new_row.append(region_process(rec['STATECODE']))
            new_row.append(
                state_name_process(rec['STATECODE'], state_name_dict))
            new_row.append(
                new_location_process(rec['NLCD_2011_'], rec['LOCATION']))
            writer.writerow(new_row)
    reference_db.close()
示例#5
0
def estrazione_dati_da_dbf_dello_shape_e_conversione_pandas():

    only_dbf_da_shp = [
        f for f in listdir(countries_dir) if isfile(join(countries_dir, f))
    ]
    df_pop = pd.DataFrame(columns=[
        'iso3', 'adm0_code', 'adm0_name', 'adm1_code', 'adm1_name',
        'adm2_code', 'adm2_name', 'hectares', 'area_sqkm', 'area_sqft', 'pop'
    ])

    contatore = 0
    for fileggio in sorted(only_dbf_da_shp):
        fileName, fileExtension = path.splitext(fileggio)
        if fileExtension == '.dbf':
            db = dbf.Dbf(countries_dir + "\\" + fileggio)
            for rec in db:
                df_pop.at[contatore, 'iso3'] = rec["ISO3"]
                df_pop.at[contatore, 'adm0_code'] = rec["ADM0_CODE"]
                df_pop.at[contatore, 'adm0_name'] = rec["ADM0_NAME"]
                df_pop.at[contatore, 'adm1_code'] = rec["ADM1_CODE"]
                df_pop.at[contatore, 'adm1_name'] = rec["ADM1_NAME"]
                df_pop.at[contatore, 'adm2_code'] = rec["ADM2_CODE"]
                df_pop.at[contatore, 'adm2_name'] = rec["ADM2_NAME"]
                df_pop.at[contatore, 'hectares'] = rec["HECTARES"]
                df_pop.at[contatore, 'area_sqkm'] = rec["AREA_SQKM"]
                df_pop.at[contatore, 'area_sqft'] = rec["AREA_SQFT"]
                df_pop.at[contatore, 'pop'] = rec["SUM"]
                contatore += 1

    return df_pop
示例#6
0
    def listpath(e, dirpath, dirpath1):
        tStart = time.time()
        if ".dbf" not in dirpath1 and ".DBF" not in dirpath1:
            wx.MessageBox(u"不存在DBF檔 請重新選取", u"提示訊息")
        else:
            db = dbf.Dbf(dirpath1)
            for record in db:
                # print record['Plt_no'], record['Vil_dt'], record['Vil_time'],record['Bookno'],record['Vil_addr'],record['Rule_1'],record['Truth_1'],record['Rule_2'],record['Truth_2'],record['color'],record['A_owner1']
                filename = record['Plt_no'] + "." + record[
                    'Vil_dt'] + "." + record['Vil_time'] + u'-1'  #檔名
                piece1.append(filename)  #檔名
                piece1.append(record['Plt_no'])  #車牌
                piece1.append(record['Vil_dt'])  #日期
                piece1.append(record['Vil_time'])  #時間
                piece1.append(record['Bookno'])  #冊頁號
                piece1.append(record['Vil_addr'])  #違規地點
                piece1.append(record['Rule_1'])  #法條1
                piece1.append(record['Truth_1'])  #法條1事實
                piece1.append(record['Rule_2'])  #法條2
                piece1.append(record['Truth_2'])  #法條2事實
                piece1.append(record['color'])  #車顏色
                piece1.append(record['A_owner1'])  #車廠牌
                record.store()

            print('Mission accomplished')
        tEnd = time.time()
        print("Spend  " + str((tEnd - tStart) // 1) + "  second")
示例#7
0
def set_records(values, errores):
    from dbfpy import dbf
    import os
    path_dbf = os.path.join(request.folder, 'static', 'economia', 'ban1.dbf')
    db = dbf.Dbf(path_dbf)

    try:
        for rec in db:
            found = False
            for v in values:
                if rec['NUM_IDEPER'] == v['NUM_IDEPER']:
                    rec["DIR_PERSO1"] = v["DIR_PERSO1"].upper()
                    rec["DIR_PERSO2"] = v["DIR_PERSO2"].upper()
                    rec.store()
                    if v["APELLIDO_1"]:
                        rec["APELLIDO_1"] = v["APELLIDO_1"]
                        rec.store()
                    if v["APELLIDO_2"]:
                        rec["APELLIDO_2"] = v["APELLIDO_2"]
                        rec.store()
                    rec["NOMBRE"] = v["NOMBRE"]
                    rec["NOMB_APELL"] = rec["NOMBRE"] + ' ' + rec[
                        "APELLIDO_1"] + ' ' + rec["APELLIDO_2"]
                    rec.store()
                    found = True
                    break
            if not found:
                errores.append({
                    'type': 'not_found',
                    'value': (rec["NUM_IDEPER"], rec["NOMB_APELL"])
                })

    except Exception, e:
        errores.append((str(e), v))
示例#8
0
def make_example_file(filename, fields, records, delete_last_record=False):
    field_names = [field[0] for field in fields]

    print('Creating', filename)
    print('  Fields:', ', '.join(field_names))
    print(' ', len(records), 'records')

    db = dbf.Dbf(filename, new=True)
    db.addField(*fields)

    for data in records:
        record = db.newRecord()
        for name, value in zip(field_names, data):
            record[name] = value
        record.store()

    if delete_last_record:
        # Delete the last one of those records.
        record.delete()
        record.store()

    try:
        db.close()
    except AttributeError:
        # This ignores the following error:
        #     self.memo.flush()
        # AttributeError: 'NoneType' object has no attribute 'flush'
        pass
def _read_DBF(src_file, dictionary):
    dataset = dbf.Dbf(src_file)
    for record in dataset:
        if (record['UP'] is None or record['UP'] < dictionary.arcMinDate):
            continue
        dictionary.addAccrualSize(record['TN'])
    dataset.close()
示例#10
0
def summarizeDbf(dbfList, idField, sumVals=-1):
    results = {}
    expectedLength = 0
    if sumVals == -1:
        sumVals = [["SUM"]] * len(dbfList)
    else:
        sumVals = sumVals  #[sumVals] * len(dbfList)
    for dFileIdx in range(0, len(dbfList)):
        dFile = dbfList[dFileIdx]
        dbfData = dbf.Dbf(dFile)
        for idx in range(0, len(dbfData)):
            curResults = []
            for cVal in sumVals[dFileIdx]:
                curResults.append(dbfData[idx][cVal])
            if dFile == dbfList[0]:
                results[dbfData[idx][idField]] = curResults
            else:
                try:  #For the 2nd dbf and onward, append the results to the existing data
                    #Check to make sure the current data are of the proper length, if not, pad with zeroes
                    curRes = results[dbfData[idx][idField]]
                    if len(curRes) != expectedLength:
                        results[dbfData[idx][idField]].extend(
                            [0] * (expectedLength - len(curRes)))
                    results[dbfData[idx][idField]].extend(curResults)
                except:
                    #If the key is not in the output, add a bunch of zeroes at the start, then append
                    results[dbfData[idx][idField]] = [0] * expectedLength
                    results[dbfData[idx][idField]].extend(curResults)
        expectedLength = expectedLength + len(sumVals[dFileIdx])
    return (results)
示例#11
0
    def convert_dbfs_to_csvs(self, logger=defaultLogger):
        for [band_name, band_folder] in self.band_parameters:
            self.string_args['band'] = band_name
            for dbfFilename in (glob(BANDS_DBF_FOLDER.format(**self.string_args) + '*.dbf')
                + glob(TEMP_DBF_FOLDER.format(**self.string_args) + '*.dbf')
                + glob(SAMPLE_DBF_FOLDER.format(**self.string_args) + '*.dbf')
                + glob(SEL_POINTS_FOLDER.format(**self.string_args) + '*.dbf')
                + glob(SEL_TEMP_DBF_FOLDER.format(**self.string_args) + '*.dbf')):

                ### Code for dbfpy module
                csvFilename = dbfFilename.replace('.dbf', '.csv')
                with open(csvFilename, 'wb') as outCSV:
                    inDBF = dbf.Dbf(dbfFilename)
                    #outCSV = open(csvFilename, 'wb')
                    csvWriter = csv.writer(outCSV)

                    names = [field.name for field in inDBF.header.fields]
                    csvWriter.writerow(names)

                    for rec in inDBF:
                        csvWriter.writerow(rec.fieldData)

                    inDBF.close()

                if os.path.exists(csvFilename):
                    try:
                        os.remove(dbfFilename)
                    except OSError:
                        pass
                    try:
                        os.remove(dbfFilename + '.xml')
                    except OSError:
                        pass
        return None
def writeClimData(climData, stationId, outDir):
	for ghcndVar in climData.keys():
		if ghcndVar == 'TMP':
			outFile = outDir + '/' + stationId + 'tmp.dbf'
		else:
			outFile = outDir + '/' + stationId + varMap[ghcndVar].lower() + '.dbf'
		db = dbf.Dbf(outFile, new=True)
		if ghcndVar == 'TMP':
			db.addField(
				('DATE', 'D')\
				, ('MAX','N', 5, 1)\
				, ('MIN','N', 5, 1)\
			)
		else:
			db.addField(
				("DATE", "D")\
				, (varMap[ghcndVar],'N', 5, 1)
			)
		varData = climData[ghcndVar]
		for row in range(0,varData.shape[0]):
			rec = db.newRecord()
			rec["DATE"] = (int(varData[row,0]), int(varData[row,1]), int(varData[row,2]))
			if ghcndVar == 'TMP':
				rec['MAX'] = varData[row,3]
				rec['MIN'] = varData[row,4]
			else:
				rec[varMap[ghcndVar]] = varData[row,3]
			rec.store()
		db.close()
		del db
示例#13
0
def hr_department(src_path, dst_path, dictionary):
    dst_file = dst_path + 'hr_department.csv'
    try:
        dataset = dbf.Dbf(src_path + 'PDR.DBF')
        f = open(dst_file, 'w+')
        f.write(
            'ID;code;name;parentUnitID;state;fullName;description;nameGen;fullNameGen;nameDat;nameOr;'
            + 'fullNameGen;fullNameDat;dateFrom;dateTo\n')
        ID = 0
        for record in dataset:
            ID += 1
            code = record['ID']
            name = record['NM']
            parentUnitID = record['ID_PARENT'] and dictionary.get_DepartmentID(
                record['ID_PARENT']) or ''
            state = 'ACTIVE'
            fullName = record['NMF']
            description = name + ' (' + code + ')'
            nameGen = ''
            fullNameGen = ''
            nameDat = ''
            nameOr = ''
            fullNameGen = ''
            fullNameDat = ''
            dateFrom = record['BEG']
            dateTo = record['END'] and record['END'] or ''
            f.write('%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\n' %
                    (ID, code, name, parentUnitID, state, fullName,
                     description, nameGen, fullNameGen, nameDat, nameOr,
                     fullNameGen, fullNameDat, dateFrom, dateTo))
            dictionary.set_DepartmentID(code, ID)
        dataset.close()
    except:
        print 'Error making ', dst_file, sys.exc_info()[1]
示例#14
0
def prepare_drought_tables(paese):

    direttorio_radice = r"C:\data\tools\sparc\projects\drought"
    direttorio = direttorio_radice + "\\" + paese_ricerca

    import pycountry
    iso_paese = pycountry.countries.get(name=paese_ricerca).alpha3

    lista = []
    for direttorio_principale, direttorio_secondario, file_vuoto in os.walk(
            direttorio):
        if direttorio_principale != direttorio:
            name_adm = direttorio_principale.split("\\")[7].split("_")[0]
            code_adm = direttorio_principale.split("\\")[7].split("_")[1]
            files_dbf = glob.glob(direttorio_principale + "/*.dbf")
            for file in files_dbf:
                fileName, fileExtension = os.path.splitext(file)
                if 'stat' in fileName:
                    try:
                        if str(fileExtension) == '.dbf':
                            temporal_string = fileName.split("\\")[-1].split(
                                "_")[1]
                            temporal_value = ''.join(x for x in temporal_string
                                                     if x.isdigit())
                            in_dbf = dbf.Dbf(fileName + fileExtension)
                            for rec in in_dbf:
                                stringola = (paese, iso_paese, name_adm,
                                             code_adm, temporal_value,
                                             rec['VALUE'], rec['SUM'])
                                lista.append(stringola)
                            in_dbf.close()
                    except:
                        pass
    return lista
def DBFtoCSV():
    '''Convert every DBF table into CSV table. 
    '''
    # Set new workplace where tables are located
    env.workspace = pathlist[1]
    # list tables in file
    tablelist = arcpy.ListTables()
    # iterate through every table
    for table in tablelist:
        #make sure you are just working with .dbf tables
        if table.endswith('.dbf'):
            #name csv the same as the .dbf table just with .csv at the end
            csv_fn = table[:-4] + ".csv"
            with open(pathlist[2] + csv_fn, 'wb') as csvfile:
                in_db = dbf.Dbf(pathlist[1] + table)
                out_csv = csv.writer(csvfile)
                #copy row names and items in rows from dbf to csv
                names = []
                for field in in_db.header.fields:
                    names.append(field.name)
                out_csv.writerow(names)
                for rec in in_db:
                    out_csv.writerow(rec.fieldData)
                in_db.close()
        #keep track of processing
        print "\n Processing ", table[:-4] + ".csv table complete."
def _read_DBF(src_file, entity, f, dictionary):
    dataset = dbf.Dbf(src_file)
    balance = Balance()
    for record in dataset:
        if (dictionary.isSkipEmployee(record['TN'])):
            continue
        if (record['UP'] is not None and record['UP'] < dictionary.arcMinDate):
            continue
        if (record['CD'] == 'Ќачальное—альдо'):
            try:
                employeeNumberID = record['TN']
                periodCalc = record['UP']
                sumFrom, sumTo = balance.get((employeeNumberID, periodCalc))
                sumFrom = record['SM']
                balance.set((employeeNumberID, periodCalc), (sumFrom, sumTo))
                prevPeriodCalc = (periodCalc +
                                  timedelta(days=-1)).replace(day=1)
                sumFrom, sumTo = balance.get(
                    (employeeNumberID, prevPeriodCalc))
                sumTo = record['SM']
                balance.set((employeeNumberID, prevPeriodCalc),
                            (sumFrom, sumTo))
            except:
                print 'Error accrual balance tabNum:', record[
                    'TN'], sys.exc_info()[1]
    dataset.close()
    balance.write(f, dictionary)
示例#17
0
def dbf_to_list(dbf_file, field_name):
    if os.path.splitext(dbf_file)[1] == '.shp':
        dbf_file = os.path.splitext(dbf_file)[0] + '.dbf'
    #The next exception that is handled is handled within an if loop
    #This exception would occur if a non .dbf file was entered
    #First it finds wither the extension is not a .dbf by splitting the extension out
    if os.path.splitext(dbf_file)[1] != '.dbf':
        #Then the user is prompted with what occured and prompted to exit as above
        print 'Must input a .dbf file'
        print 'Cannot compile ' + dbf_file
        raw_input('Press enter to continue')
        sys.exit()

    #Finally- the actual function code body
    #First the dbf file is read in using the dbfpy Dbf function
    db = dbf.Dbf(dbf_file)
    #Db is now a dbf object within the dbfpy class

    #Next find out how long the dbf is
    rows = len(db)

    #Set up a list variable to write the column numbers to
    out_list = []

    #Iterate through each row within the dbf
    for row in range(rows):
        #Add each number in the specified column number to the list
        out_list.append(db[row][field_name])
    db.close()
    #Return the list
    #This makes the entire function equal to the out_list
    return out_list
示例#18
0
def _read_DBF(src_file, accrual, f, dictionary, suffix):
    dataset = dbf.Dbf(src_file)
    for record in dataset:
        if (record['CD'] == 'НачальноеСальдо'):
            continue
        if (dictionary.isSkipEmployee(record['TN'])):
            continue
        if (record['UP'] is not None and record['UP'] < dictionary.arcMinDate):
            continue
        code = record['CD'] + suffix
        accrual.ID += 1
        accrual.periodCalc = record['UP']
        accrual.periodSalary = record['RP']
        accrual.tabNum = str(record['TN'])
        accrual.taxCode = dictionary.get_TaxCode(accrual.tabNum)
        accrual.employeeNumberID = accrual.tabNum = str(record['TN'])
        accrual.payElID = dictionary.get_PayElID(code)
        accrual.paySum = record['SM'] != 0 and str(record['SM']) or '0'
        accrual.days = record['DAYS'] != 0 and str(record['DAYS']) or ''
        accrual.hours = record['HRS'] != 0 and str(record['HRS']) or ''
        accrual.calculateDate = ''
        accrual.flagsRec = str(
            8 | (record['STOR'] > 0 and 512 or 0))  # 8 - import, 512 - storno
        accrual.dateFrom = record['PR_BEG']
        accrual.dateTo = record['PR_END']
        accrual.write_record(f)
    dataset.close()
示例#19
0
def convert_dbf2csv(dbfpath, csvpath, footer):
    print "Converting %s to csv" % dbfpath
    writingddata = []

    in_db = dbf.Dbf(dbfpath)
    decimalCounts = []
    index_array = [0] * len(Constants.USE_HEADER)

    src_index = 0
    for field in in_db.header.fields:
        if field.name in Constants.USE_HEADER:
            index = Constants.USE_HEADER.index(field.name)
            index_array[index] = src_index
        decimalCounts.append(field.decimalCount)
        src_index = src_index + 1

    for rec in in_db:
        row = [normalize_boolean(x) for x in rec.fieldData]
        row = [
            format_float(row[x], decimalCounts[x]) for x in xrange(len(row))
        ]
        row = format_sms(row, index_array, footer)
        if remove_nulldata(row):
            writingddata.append(row)
    in_db.close()

    #並び替える
    writingddata.sort(key=lambda x: x[3] + x[0] + x[2])

    with open(csvpath, 'wb') as csvfile:
        out_csv = csv.writer(csvfile)
        for row in writingddata:
            out_csv.writerow(row)

    print " Done..."
示例#20
0
def post_func(directory, dbname, userr, pas, hostt, portt):
    ank = dbf.Dbf(directory + "zoleav.dbf")
    f_ank = open(directory + 'oleav.csv', 'w')
    f_ank.write("ID; ORGBASE_RN; FCAC_RN; DAYTYPE_RN; STARTDATE; ENDDATE\n")
    #print len(ank)
    for i in ank:
        if i.deleted or i["OLEAV_RN"] == None or i["FCAC_RN"] == None or i[
                "DAYTYPE_RN"] == None or i["ORGBASE_RN"] == None or i[
                    "STARTDATE"] == None or i["ENDDATE"] == None:
            continue
        if not i["ORGBASE_RN"] or not i["DAYTYPE_RN"] or not i[
                "OLEAV_RN"] or not i[
                    "FCAC_RN"] or parus_id_to_odoo.parusIndexToOdoo(
                        i["FCAC_RN"].decode('cp1251').encode('utf-8').decode(
                            'utf-8')) in [
                                33, 1939, 1317, 3132, 3372, 3127, 257, 313
                            ]:
            continue
        if i["STARTDATE"] < datetime.date(2014, 12, 01):
            continue
        f_ank.write("\"" + str(
            parus_id_to_odoo.parusIndexToOdoo(i["OLEAV_RN"].decode(
                'cp1251').encode('utf-8').decode('utf-8'))) + "\"" + "; ")
        f_ank.write("\"" + str(
            parus_id_to_odoo.parusIndexToOdoo(i["ORGBASE_RN"].decode(
                'cp1251').encode('utf-8').decode('utf-8'))) + "\"" + "; ")
        f_ank.write("\"" + str(
            parus_id_to_odoo.parusIndexToOdoo(i["FCAC_RN"].decode(
                'cp1251').encode('utf-8').decode('utf-8'))) + "\"" + "; ")
        f_ank.write("\"" + str(
            parus_id_to_odoo.parusIndexToOdoo(i["DAYTYPE_RN"].decode(
                'cp1251').encode('utf-8').decode('utf-8'))) + "\"" + "; ")
        f_ank.write("\"" + str(i["STARTDATE"]) + "\"" + "; " + "\"" +
                    str(i["ENDDATE"]) + "\"" + "\n")
示例#21
0
def hr_employeeAccrual(src_path, dst_path, dictionary):
    dst_file = dst_path + 'hr_employeeAccrual.csv'
    try:
        dataset = dbf.Dbf(src_path + 'NCH.DBF')
        f = open(dst_file, 'w+')
        f.write('ID;employeeID;tabNum;taxCode;employeeNumberID;payElID;dateFrom;dateTo;accrualSum;accrualRate;' + 
            'orderNumber;orderDatefrom;taxCode\n')
        ID = 0
        for record in dataset:
            if (dictionary.isSkipEmployee(record['TN'])):
                continue
            if (record['DATK'] is not None and record['DATK'] < dictionary.arcMinDate):
                continue
            ID += 1
            employeeID = str(record['TN']) # str(record['ID'])
            tabNum = str(record['TN'])
            taxCode = dictionary.get_TaxCode(tabNum)
            employeeNumberID = str(record['TN'])
            payElID = dictionary.get_PayElID(record['CD'])
            dateFrom = record['DATN'] and record['DATN'] or ''
            dateTo = record['DATK'] and record['DATK'] or '9999-12-31'
            accrualSum = str(record['SM'])
            accrualRate = str(record['PRC'])
            orderNumber = '' # record['CDPR']
            orderDatefrom = ''
            taxCode = dictionary.get_TaxCode(tabNum)
            f.write('%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\n' % 
                (ID, employeeID, tabNum, taxCode, employeeNumberID, payElID, 
                    dateFrom, dateTo, accrualSum, accrualRate, orderNumber, orderDatefrom, taxCode))
        dataset.close()
    except:
        print 'Error making ', dst_file, sys.exc_info()[1]
示例#22
0
def person_func(directory, dbname, userr, pas, hostt, portt):
    ank = dbf.Dbf(directory + "zpost.dbf")
    f_ank = open(directory + 'post.csv', 'w')
    f_ank.write("ID; STARTDATE;  ENDDATE; POST_NUM; NAME\n")
    #print len(ank)
    for i in ank:
        if i.deleted or i["POST_CODE"] == None or i["ENDDATE"] == None or i[
                "STARTDATE"] == None:
            continue
        f_ank.write("\"" + str(
            parus_id_to_odoo.parusIndexToOdoo(i["POST_RN"].decode(
                'cp1251').encode('utf-8').decode('utf-8'))) + "\"" + "; ")
        f_ank.write("\"" + str(i["STARTDATE"]) + "\"" + "; ")
        f_ank.write("\"" + str(i["ENDDATE"]) + "\"" + "; ")
        f_ank.write("\"" + i["POST_NUM"] + "\"" + "; ")
        f_ank.write("\"" + i["POST_CODE"].decode('cp1251').encode('utf-8') +
                    "\"" + "\n")
    f_ank.close()
    print "zpost.dbf to post.csv [ ok ]"
    #CONNECT TO DATABASE
    con = psycopg2.connect(database=dbname,
                           user=userr,
                           password=pas,
                           host=hostt,
                           port=portt)
    cur = con.cursor()
    #cur.execute ("DELETE from tabel_post;")

    #OPEN CSV FILE GENERATED BY syncronize.py script
    my_file = open(directory + 'post.csv')

    #CREATE TEMP TABLE
    cur.execute(
        "CREATE TEMP TABLE tmp_z (ID int unique, STARTDATE date, ENDDATE date, NUM int, NAME  text);"
    )
    cur.copy_expert("COPY tmp_z FROM STDIN WITH DELIMITER ';' CSV HEADER;",
                    my_file)
    #cur.execute ("DELETE from tabel_fcac;")
    #UPDATE DATA
    cur.execute(
        "UPDATE tabel_post SET STARTDATE=tmp_z.STARTDATE, ENDDATE=tmp_z.ENDDATE, NUM=tmp_z.NUM, NAME=tmp_z.NAME FROM tmp_z WHERE  tabel_post.id = tmp_z.id;"
    )

    #cur.execute("SELECT G.id, G.ANK_RN, G.POST_RN, G.SUBDIV_RN, G.VIDISP_RN, G.STARTDATE, G.ENDDATE FROM (SELECT T.id, T.ANK_RN, T.POST_RN, T.SUBDIV_RN, T.VIDISP_RN, T.STARTDATE, T.ENDDATE FROM tmp_z AS T LEFT JOIN tabel_fcac AS P  ON T.id = P.id WHERE P.id IS NULL) AS G, tabel_ank AS H where G.ank_rn = H.id  ;")
    #INSERT DATA add something which lacks
    cur.execute(
        "INSERT INTO tabel_post (id, startdate, enddate, num, name) SELECT T.id, T.STARTDATE, T.ENDDATE, T.NUM, T.NAME FROM tmp_z AS T LEFT JOIN tabel_post AS P  ON T.id = P.id WHERE P.id IS NULL ;"
    )

    #rows = cur.fetchall()
    #for i in rows:
    #	print i
    #DROP TEMP TABLE or auto drop after session
    cur.execute("DROP TABLE tmp_z;")

    #CLOSE CONNECTION
    con.commit()
    cur.close()
    con.close()
    print "sql requests for table post [ ok ]"
def convertDbf2Csv(dbf_file, csv_file):

    if os.path.exists(dbf_file):

        in_db = dbf.Dbf(dbf_file)
        fieldnames_list = []
        for field in in_db.header.fields:
            fieldnames_list.append(field.name)

        if six.PY2:
            out_csv = csv.writer(open(csv_file, 'wb'))
            out_csv.writerow(fieldnames_list)
            for rec in in_db:
                out_csv.writerow(rec.fieldData)
        else:
            with open(csv_file, 'w', newline='') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames_list)
                writer.writeheader()
                for rec in in_db:
                    fieldData_dico = {}
                    for index in range(len(fieldnames_list)):
                        name_col = fieldnames_list[index]
                        value = rec.fieldData[index]
                        fieldData_dico[name_col] = value
                    writer.writerow(fieldData_dico)

        in_db.close()

    else:
        print(cyan + "convertDbf2Csv() : " + endC + bold + yellow +
              "Impossible de lire le fichier dbf : " + dbf_file +
              " . Fichier inexistant!" + endC)
    return
示例#24
0
def hr_position(src_path, dst_path, dictionary):
    dst_file = dst_path + 'hr_position.csv'
    try:
        f = open(dst_file, 'w+')
        position = Position()
        position.write_header(f)
        dataset = dbf.Dbf(src_path + 'PRK.DBF')
        position_list = set()
        for record in dataset:
            department_code = record['PDR']
            if (department_code):
                department_id = dictionary.get_DepartmentID(department_code)
                dictPosition_id = record['DOL']
                if (department_id and dictPosition_id):
                    position.ID = int(department_id) * 10000 + dictPosition_id
                    if (position.ID in position_list):
                        continue
                    position_list.add(position.ID)
                    position.code = str(dictPosition_id)
                    position.name = dictionary.get_DictPositionName(
                        position.code)
                    position.psCategory = ''
                    position.positionType = ''
                    position.description = ''
                    position.write_record(f)
        dataset.close()
    except:
        print 'Error making ', dst_file, sys.exc_info()[1]
示例#25
0
def hr_employeeNumber(src_path, dst_path, dictionary):
    dst_file = dst_path + 'hr_employeeNumber.csv'
    try:
        dataset = dbf.Dbf(src_path + 'LS.DBF')
        f = open(dst_file, 'w+')
        f.write(
            'ID;employeeID;taxCode;tabNum;dateFrom;dateTo;description;payOutID;personalAccount\n'
        )
        for record in dataset:
            if (dictionary.isSkipEmployee(record['TN'])):
                continue
            ID = str(record['TN'])  # str(record['ID'])
            employeeID = str(record['TN'])
            taxCode = record['NLP']
            tabNum = str(record['TN'])
            dateFrom = record['BEG'] and record['BEG'] or ''
            dateTo = record['END'] and record['END'] or ''
            description = record['FIO'] + ' (' + str(record['TN']) + ')'
            payOutID = ''
            personalAccount = record['BANKRAH']
            f.write('%s;%s;%s;%s;%s;%s;%s;%s;%s\n' %
                    (ID, employeeID, taxCode, tabNum, dateFrom, dateTo,
                     description, payOutID, personalAccount))
        dataset.close()
    except:
        print 'Error making ', dst_file, sys.exc_info()[1]
示例#26
0
def convert_dbf_to_csv(folder):
    for dbfFilename in (glob(folder + '*.dbf')):
        inDBF = dbf.Dbf(dbfFilename)
        csvFilename = dbfFilename.replace('.dbf', '.csv')
        outCSV = open(csvFilename, 'wb')
        csvWriter = csv.writer(outCSV)

        names = []
        for field in inDBF.header.fields:
            names.append(field.name)
        csvWriter.writerow(names)

        for rec in inDBF:
            csvWriter.writerow(rec.fieldData)

        inDBF.close()
        outCSV.close()
        if os.path.exists(csvFilename):
            try:
                os.remove(dbfFilename)
            except OSError:
                pass
            try:
                os.remove(dbfFilename + '.xml')
            except OSError:
                pass
    return None
示例#27
0
    def __init__(self,
                 filename,
                 delimiter=',',
                 quotechar='"',
                 has_headers=True):
        from dbfpy import dbf
        self.delimiter = delimiter
        self.quotechar = quotechar
        self.filename = filename
        self.data = []
        self.width = 0
        self.headers = []

        db = dbf.Dbf(self.filename, True)
        self.headers = db.fieldNames

        # using the default `for row in db` will cause a crash on invalid
        # fields produced by QGIS
        for i in range(0, len(db)):
            row_dict = {}
            for key in self.headers:
                header = db.header[key]
                try:
                    record = db[i].rawFromStream(db, i)
                    val = header.decodeFromRecord(record)
                except ValueError:
                    val = None
                row_dict[key] = val
            self.data.append(Row(row_dict))

        # Cleaner method, whenever this issue is fixed in  DBFpy
        """
示例#28
0
    def file_to_dict(self,
                     clear_null=0,
                     clear_num_key='trade_date',
                     file_type='Excel',
                     del_key='',
                     multifile_mode=False):
        def find_the_column(table, col_name, found_col_num="NA"):
            for i in range(table.ncols):
                if table.cell(0, i).value.encode('utf-8') == col_name:
                    found_col_num = i
                    break
            if found_col_num == "NA":
                raise Exception, "Invalid Column Name!"
            return found_col_num

        def dict_null_clear(dict, key):
            for i in range(len(dict[key])):
                if dict[key][i] == "":
                    for k in dict.keys():
                        del dict[k][i]

        data_dict = {}
        file_li = []
        if multifile_mode == True:
            filename = os.listdir(os.getcwd())
            for fn in filename:
                if fn.startswith(self.filename):
                    file_li.append(fn)

        if file_type == 'Excel':
            data = xlrd.open_workbook(self.filename)
            table = data.sheet_by_index(0)
            for key in self.file_db_relation.keys():
                data_dict[key] = table.col_values(
                    find_the_column(table, self.file_db_relation[key]))[1:]
                #[1:] cause first row is always title.
            if clear_null == 1:
                dict_null_clear(data_dict, clear_num_key)
        elif file_type == "DBF":
            dbffile = DBF(self.filename)
            frame = DataFrame(iter(dbffile))
            for k in self.file_db_relation.keys():
                data_dict[k] = list(frame[self.file_db_relation[k]])
        elif file_type == "DBF2":  #It's available when multifile_mode=True
            for file in file_li:
                print "Reading the file %s,waiting..." % file
                dbffile = dbf.Dbf(file, readOnly=True)
                for fn in self.file_db_relation.keys():
                    li = []
                    for i in range(0, len(dbffile)):
                        li.append(
                            str(dbffile[i][self.file_db_relation[fn]]).strip())
                    data_dict[fn] = data_dict.setdefault(fn, []) + li
        else:
            raise Exception, 'Invalid file type!'
        if not del_key == '':
            print data_dict
            del data_dict[del_key]
        return data_dict
示例#29
0
def pfcac_func(directory, dbname, userr, pas, hostt, portt):
    ank = dbf.Dbf(directory + "zdaytype.dbf")
    f_ank = open(directory + 'daytype.csv', 'w')
    f_ank.write("ID; NICK; NAME;\n")
    #print len(ank)
    for i in ank:
        if i.deleted or i["NICK"] == None or i["NAME"] == None or i[
                "DAYTYPE_RN"] == None:
            continue
        f_ank.write("\"" + str(
            parus_id_to_odoo.parusIndexToOdoo(i["DAYTYPE_RN"].decode(
                'cp1251').encode('utf-8').decode('utf-8'))) + "\"" + "; ")
        f_ank.write("\"" + str(i["NICK"].decode('cp1251').encode('utf-8')) +
                    "\"" + ";")
        f_ank.write("\"" + str(i["NAME"].decode('cp1251').encode('utf-8')) +
                    "\"" + "\n")
    print "daytype.dbf to daytype.csv [ ok ]"
    f_ank.close()
    #CONNECT TO DATABASE
    con = psycopg2.connect(database=dbname,
                           user=userr,
                           password=pas,
                           host=hostt,
                           port=portt)
    cur = con.cursor()
    #cur.execute ("DELETE from tabel_fcacwth;")
    #OPEN CSV FILE GENERATED BY syncronize.py script
    my_file = open(directory + 'daytype.csv')

    #CREATE TEMP TABLE
    cur.execute(
        "CREATE TEMP TABLE tmp_z (ID int unique, NICK  text, NAME text);")
    cur.copy_expert("COPY tmp_z FROM STDIN WITH DELIMITER ';' CSV HEADER;",
                    my_file)

    #UPDATE DATA
    cur.execute(
        "UPDATE tabel_daytype SET  NICK=tmp_z.NICK, NAME=tmp_z.NAME FROM tmp_z WHERE  tabel_daytype.id = tmp_z.id;"
    )

    #cur.execute("SELECT G.id, G.ANK_RN, G.POST_RN, G.SUBDIV_RN, G.VIDISP_RN, G.STARTDATE, G.ENDDATE FROM (SELECT T.id, T.ANK_RN, T.POST_RN, T.SUBDIV_RN, T.VIDISP_RN, T.STARTDATE, T.ENDDATE FROM tmp_z AS T LEFT JOIN tabel_fcac AS P  ON T.id = P.id WHERE P.id IS NULL) AS G, tabel_ank AS H where G.ank_rn = H.id  ;")
    #INSERT DATA add something which lacks.
    #add those records that are in the table tmp.z and not in the table fcacwth. And check that all the fields in the table fcac_rn existed fcac
    cur.execute(
        "INSERT INTO tabel_daytype (id, nick, name) SELECT G.id, G.NICK, G.NAME FROM (SELECT T.id, T.NICK, T.NAME FROM tmp_z AS T LEFT JOIN tabel_daytype AS P  ON T.id = P.id WHERE P.id IS NULL) AS G ;"
    )

    #rows = cur.fetchall()
    #for i in rows:
    #	print i
    #DROP TEMP TABLE or auto drop after session
    cur.execute("DROP TABLE tmp_z;")

    #CLOSE CONNECTION
    con.commit()
    cur.close()
    con.close()
    print "sql requests for table daytype [ ok ]"
 def CustomizeSummary(name, year, day):
     db = dbf.Dbf(name)
     for rec in db:
         zone = rec["FIPS"]
         count = rec["COUNT"]
         summ = rec["SUM"]
         mean = rec["MEAN"]
         stddev = rec["STD"]
         self.InsertDB(year, day, zone, count, mean, stddev, summ)