def codeconv(filename, encoding='utf8'): t = dbf.Table(filename) print t.codepage if not str(t.codepage).startswith(encoding): t.open() t.codepage = dbf.CodePage(encoding) t.close() print 'Encoding updated to', t.codepage else: print 'Encoding not updated'
def add_field_to_shp(directory): print('Aktualizacja struktury metadanych...') # Zainicjowanie pustej listy dla ścieżek do plików .shp file_paths = [] # Iteracja po katalogu z danymi i wyciągnięcie do listy # pełnych ścieżek dla plików .shp for folder, subs, files in os.walk(directory): for filename in files: if filename.endswith('.dbf'): file_paths.append( os.path.abspath(os.path.join(folder, filename))) now = datetime.now() date_value = now.strftime("%Y-%m-%d %H:%M:%S") guid_str = '5861396d-8146-4469-9066-' # Utworzenie obiektu dla każdej warstwy .shp we wskazanym katalogu # oraz dodanie kolumn: guid, add_date, mod_date, user_name # (jeżeli dane kolumny nie zostały dodane wcześniej) for path in file_paths: guid_id = 1 table = dbf.Table(path) table.open(mode=dbf.READ_WRITE) table.codepage = dbf.CodePage('cp1250') fields = table.field_names for record in table: tempid = 1 try: if 'guid' not in fields: table.add_fields('guid C(36)') for record in table: with record as r: r.guid = guid_str + str(guid_id).zfill(11) guid_id += 1 if 'add_date' not in fields: table.add_fields('add_date C(19)') for record in table: with record as r: r.add_date = date_value if 'mod_date' not in fields: table.add_fields('mod_date C(19)') for record in table: with record as r: r.mod_date = date_value if 'user_name' not in fields: table.add_fields('user_name C(255)') for record in table: with record as r: r.user_name = 'public' finally: table.close() print('Metadane zaktualizowano pomyslnie!')
def edit_dbf_database(dbf_filename): def get_twitter_countries(): def patch(locations): for loc in locations: if loc['name'] == 'United States': loc['name'] = 'United States of America' elif loc['name'] == 'Korea': loc['name'] = 'South Korea' return locations countries = {} f = open(f_json_locations_from_twitter_api, 'r') locations = json.load(f) # In order to make the country names match between both inputs, # we correct some names in the data coming from twitter. locations = patch(locations) for loc in locations: if loc['placeType']['name'] == 'Country': countries[loc['name']] = loc['woeid'] # We sort by alphanum order the names of the countries. countries = dict(sorted(countries.items())) return countries def gen_world_countries(table): # As for twitter inputs, # we sort by alphanum order the names of the countries table_sorted = table.create_index(lambda rec: rec.name_en) for rec in table_sorted: yield rec twitter_countries = get_twitter_countries() table = dbf.Table(dbf_filename, codepage='utf8') table.open(mode=dbf.READ_WRITE) table.codepage = dbf.CodePage('utf8') table.add_fields('tw_woeid N(8,0)') # We browse the different countries present in the dataset provided by NE, # if the name of a country match with the twitter dataset, # we put his WOEID associated, otherwise -1. next_world_country = gen_world_countries(table) world_c = next(next_world_country) for key in twitter_countries: while (world_c.name_en.strip() < key): dbf.write(world_c, tw_woeid=-1) world_c = next(next_world_country) if world_c.name_en.strip() == key: dbf.write(world_c, tw_woeid=twitter_countries[key]) world_c = next(next_world_country) for world_c in next_world_country: dbf.write(world_c, tw_woeid=-1) table.close()
import string import dbf multiplier = 1.15 if multiplier == 1: NDIGITS = 2 else: NDIGITS = 0 #modlsthd ismenu? sequence modi_num modhd_num mlist = dbf.Table('./origin/modilist.dbf') mlist.open() #pack the delete record mlist.pack() #simplifed chinese's codepage is cp936 mlist.codepage = dbf.CodePage('cp936') mli = [] #format dbf data to a array #filled zero in menu_num field for x in mlist: if x['fnew_list'] == False: mli.append(( '',\ '%6s' % x['fmod_list'],\ '',\ '%6s' % x['fseq'],\ '%6s' % x['fmod_num'],\ '%6s' % 0\ )) elif x['fnew_list'] == True: mli.append(( '',\ '%6s' % x['fmod_list'],\