def create_dbf(): """ Creates a dbf file. """ success = False try: if settings.DBF_DIR is None: logger.error('DBF_DIR setting is NOT available') return else: dbf_filepath = os.path.join(settings.DBF_DIR, 'configuratie_debietberekening.dbf') logger.info("Create en open dbf file='{}'.".format(dbf_filepath)) out = Dbf(dbf_filepath, new=True) logger.info("Add fields.") fields_to_dbf(out) logger.info("Store data.") store_data(out) logger.info("Close file.") out.close() success = True except Exception as ex: logger.error(','.join(map(str, ex.args))) return success
def load_dataset(self, name, ref): col_name = self.columns["name"][name] col_pop = self.columns["population"][name] col_x = self.columns["xy"][name].format("X") col_y = self.columns["xy"][name].format("Y") ref_key = self.datasets.get(name) if ref_key is None: dataset = {} else: dataset = defaultdict(dict) if name == "ZSJ": filename = os.path.join(self.directory, "ZSJD.DBF") else: filename = os.path.join(self.directory, "{}.DBF".format(name)) for row in Dbf(filename, True): row = row.asDict() place_ref = row["LAU1"] + row["ICOB"] if not place_ref.startswith(ref) or (name == "ZSJ" and row["DD"] != row["DIL"]): continue place = { "name": row[col_name].decode("cp852"), "population": row[col_pop], "xy": (row[col_x], row[col_y]) } if name == "ZSJ": place["COBE"] = row["KOD_CAST"] match = self.fix_name.search(place["name"]) if match is not None: place["name"] = match.group(1) place["note"] = match.group(2) if ref_key is None: dataset[place_ref] = place else: dataset[place_ref][row[ref_key]] = place if ref_key is None: log.info( _("Loaded {} nodes from UIR-ZSJ dataset {}.").format( len(dataset), name)) else: log.info( _("Loaded {} nodes from UIR-ZSJ dataset {}.").format( sum(len(row) for row in dataset.values()), name)) self.data[name] = dataset
from dbfpy.dbf import Dbf import string, glob, os dirbase = "D:\Workspace\PiuraTumbes\_extract_CRU3_1_tmp" dbfList = sorted(glob.glob(dirbase + "\\*.dbf")) outtxtFile = dirbase + "\\__extract_CRU3_1_tmp.txt" if os.path.isfile(outtxtFile): outFile = open(outtxtFile, "a") else: outFile = open(outtxtFile, "w") outFile.write("Variable\tMonth\tTumbes\tPiura\n") for dbf in dbfList: dbfFile = Dbf(dbf, True) outFile.write( os.path.basename(dbf)[4:-4].split("_")[0] + "\t" + os.path.basename(dbf)[4:-4].split("_")[1] + "\n") for rec in dbfFile: outFile.write("a" + str(rec[3]) + "\n") ## for fldName in dbfFile.fieldNames: ## outFile.write(fldName + "\t" + str(rec[fldName]) + "\n") outFile.close() dbfFile.close()
do_terrobj = 1 do_tobjhnr = 1 do_huisnr = 1 postal_code = 0 if (len(sys.argv) > 2): postal_code = int(sys.argv[2]) print 'Filtering on postalcode: ' + str(postal_code) # parse & index pkancode huisnr_dic = dict() pkancode_set = set() print 'Extracting pkancode' db = Dbf() db.openFile(pkancode_dbf, readOnly=1) record_count = db.recordCount() for i in range(0, record_count): rec = db[i] if (i % (record_count / 50) is 0 and not i is 0): sys.stdout.write('.') sys.stdout.flush() huisnr_id = rec['HUISNRID'] pkancode = rec['PKANCODE'] if (pkancode == postal_code or postal_code is 0): huisnr_dic[huisnr_id] = dict()
def parse_swdb(file, options): """Parse swdb file. "file" can be a file, url, or string suitable for openAnything(). Also needs a source of the "codes" to annotate the choice names. """ one_contest_prefixes = ('PRS', 'SEN', 'PR_') dist_contest_prefixes = ('CNG', 'ASS') contest_prefixes = one_contest_prefixes + dist_contest_prefixes """ choices = {} totals = {} codes_name = "003.codes" codes = openanything.openAnything(codes_name) for l in codes: (code, choice, total) = l.rstrip().split('\t') if code.startswith(contest_prefixes): choices[code] = choice totals[code] = total elif code.endswith(('VOTE', 'REG', 'DIST')): # FIXME - deal with this later continue else: print "unrecognized code: %s in line %s" % (code, l) """ reader = Dbf(file) au = util.AuditUnit(options.election) #for r in reader: reader_iter = iter(reader) rec = 0 while True: try: r = reader[rec] except (IndexError, StopIteration): break except: import traceback traceback.print_exc(1) logging.error("Dbf error: %s\nrecord %d" % (r, rec)) rec = rec + 1 continue rec = rec + 1 #batch = r["SRPREC"] batch = r["SRPREC_KEY"] #batch = r["SVPREC"] #batch = r["SVPREC_KEY"] if batch.startswith('SOV') or batch.endswith('TOT'): continue # state-wide data marks absentee with trailing "A", # county data marks them with "_A" if batch.endswith('A'): type = "AB" if batch.endswith('_A'): batch = batch[0:-2] else: batch = batch[0:-1] else: type = "BA" addist = r['ADDIST'] cddist = r['CDDIST'] #sddist = r['SDDIST'] for code in reader.fieldNames: if code.endswith(('PREC', 'VOTE', 'REG', 'DIST', 'SVPREC_KEY')): continue code_full = code contest = code[:3] if code.startswith('ASS'): code_full = code[:3] + ("%02d" % addist) + code[-3:] contest = code_full[:5] elif code.startswith('CNG'): code_full = code[:3] + ("%02d" % cddist) + code[-3:] contest = code_full[:5] elif code.startswith('PR_'): contest = code[:-1] else: contest = code[:3] if options.contest != None and options.contest != contest: continue # until we fully figure out how to get the district numbers... # contest = contests[code] try: au = util.AuditUnit(options.election, contest, type, [batch]) au.update(code_full[len(contest):], str(r[code])) util.pushAuditUnit(au, min_ballots = options.min_ballots) except: print "Error looking up code %s (%s) for %s-%s" % (code, code_full, batch, type) continue
from dbfpy.dbf import Dbf import os folder = "E:\\dropbox\\pk\\phd\\dane" for plik in os.listdir(folder): if plik.endswith(".DBF"): baza = Dbf(os.path.join(folder, plik)) i = 0 for row in baza: i += 1 print row if i == 10: wswsw
from dbfpy.dbf import Dbf # #przystanki = Dbf("C:\dane\PRZYSTANKI_AUTOBUSOWE.dbf") #linie = Dbf("C:\dane\LINIE_PRZEWOZNIK.dbf") # #from shapefile import Reader as shpr # #przystanki =shpr("C:\dane\PRZYSTANKI_AUTOBUSOWE.shp") #Przystanki= przystanki.shapeRecords() # # #for przystanek in Przystanki: # print przystanek td = Dbf( "D:\\Dropbox\\i2\\Prace\\___Nie Visumowe\\2012, Malopolska\\Dane Wejsciowe\\PBS_styczen\\a.dbf" )
def __openFile(self): try: db = Dbf(self.__path) yield db finally: db.close()
psyco.full() except ImportError: pass from dbfpy.dbf import Dbf from constants.extensions import CSV import sys import argv import parser input = argv.input(sys.argv) output = argv.output(sys.argv) page_size = argv.page_size(sys.argv) db = Dbf() db.openFile(input, readOnly=1) # TODO: Real error handling #try: record_count = db.recordCount() # If no record number is specified write everything if (page_size == 0): page_size = record_count pages = record_count / page_size for page in xrange(pages):