Ejemplo n.º 1
0
 def _update_from(self, val):
     record = self._current_record
     if not record:
         return
     fields = dbf.source_table(record).field_names
     update = {
         field: getattr(val, field)
         for field in fields if hasattr(val, field)
     }
     if not update:
         try:
             update = {field: v for field, v in zip(fields, val)}
         except:
             return
     dbf.write(record, **update)
Ejemplo n.º 2
0
def generate_required_fields():
    db = dbf.Table(dbf_file_path, codepage='utf8')
    codepan = db.codepage
    print('codepage: ', codepan)

    if not check_field_already_exists('SCALE_LVL'):
        print('scale_lvl is not available, hence adding it')
        with db:
            try:
                db.add_fields('scale_lvl N(15,7)')
            except:
                print('error in adding scale level')
            for db_record in db:
                dbf.write(db_record, scale_lvl=1)
    else:
        print('scale_level is already available')

    if not check_field_already_exists('RATING'):
        print('rating is not available, hence adding it')
        with db:
            try:
                db.add_fields('rating N(15,7)')
            except:
                print('error in adding rating field')
            for record in db:
                dbf.write(record, rating=0)
    else:
        print('rating is already available')

    if not check_field_already_exists('REVIEW_CNT'):
        print('review_cnt is not available, hence adding it')
        with db:
            try:
                db.add_fields('review_cnt N(15,7)')
            except:
                print('error in adding review count')
            for record in db:
                dbf.write(record, review_cnt=0)
    else:
        print('REVIEW_CNT is already available')

    if not check_field_already_exists('UPDATED'):
        print('updated is not available, hence adding it')
        with db:
            try:
                db.add_fields('updated L')
            except:
                print('error in adding updated count')
            for record in db:
                dbf.write(record, updated=0)
    else:
        print('UPDATED is already available')
    def update_shapefile_attributes(self, shapefile_display_name, dbf_location, product_record):
        display_title = self.meta_cache.extract_title(
            product_record.source_product.metadata_persistent_id)
        if (display_title == ""):
            display_title = shapefile_display_name
        with dbf.Table(dbf_location) as db:
            db.add_fields(
                'BATHY_TYPE C(20); NAME C(255); START_DATE C(30); END_DATE C(30); VESSEL C(255); INSTRUMENT C(255); FILE_URL C(255); META_URL C(255)')
            for record in db:
                file_url = self.get_zip_file_url(product_record)

                dbf.write(record,
                          NAME=display_title,
                          BATHY_TYPE='Multibeam',
                          START_DATE=self.meta_cache.extract_start(
                              product_record.source_product.metadata_persistent_id),
                          END_DATE=self.meta_cache.extract_end(
                              product_record.source_product.metadata_persistent_id),
                          VESSEL=self.meta_cache.extract_vessel(
                              product_record.source_product.metadata_persistent_id),
                          INSTRUMENT=self.meta_cache.extract_instrument(
                              product_record.source_product.metadata_persistent_id),
                          FILE_URL=file_url,
                          META_URL=product_record.source_product.metadata_persistent_id)
def edit_dbf_database(dbf_filename):
    def get_twitter_countries():
        def patch(locations):
            for loc in locations:
                if loc['name'] == 'United States':
                    loc['name'] = 'United States of America'
                elif loc['name'] == 'Korea':
                    loc['name'] = 'South Korea'
            return locations

        countries = {}
        f = open(f_json_locations_from_twitter_api, 'r')
        locations = json.load(f)

        # In order to make the country names match between both inputs,
        # we correct some names in the data coming from twitter.
        locations = patch(locations)

        for loc in locations:
            if loc['placeType']['name'] == 'Country':
                countries[loc['name']] = loc['woeid']

        # We sort by alphanum order the names of the countries.
        countries = dict(sorted(countries.items()))
        return countries

    def gen_world_countries(table):
        # As for twitter inputs,
        # we sort by alphanum order the names of the countries
        table_sorted = table.create_index(lambda rec: rec.name_en)
        for rec in table_sorted:
            yield rec

    twitter_countries = get_twitter_countries()
    table = dbf.Table(dbf_filename, codepage='utf8')
    table.open(mode=dbf.READ_WRITE)
    table.codepage = dbf.CodePage('utf8')
    table.add_fields('tw_woeid N(8,0)')

    # We browse the different countries present in the dataset provided by NE,
    # if the name of a country match with the twitter dataset,
    # we put his WOEID associated, otherwise -1.
    next_world_country = gen_world_countries(table)
    world_c = next(next_world_country)
    for key in twitter_countries:
        while (world_c.name_en.strip() < key):
            dbf.write(world_c, tw_woeid=-1)
            world_c = next(next_world_country)
        if world_c.name_en.strip() == key:
            dbf.write(world_c, tw_woeid=twitter_countries[key])
            world_c = next(next_world_country)
    for world_c in next_world_country:
        dbf.write(world_c, tw_woeid=-1)

    table.close()
Ejemplo n.º 5
0
# iterate over the table, and print the records
for record in table:
    print(record)
    print('--------')
    print(record[0:3])
    print([record.name, record.age, record.birth])
    print('--------')

# make a copy of the test table (structure, not data)
custom = table.new(
    filename='test_on_disk',
    default_data_types=dict(C=dbf.Char, D=dbf.Date, L=dbf.Logical),
)

# automatically opened and closed
with custom:
    # copy records from test to custom
    for record in table:
        custom.append(record)
    # modify each record in custom (could have done this in prior step)
    for record in custom:
        dbf.write(record, name=record.name.upper())
        # and print the modified record
        print(record)
        print('--------')
        print(record[0:3])
        print([record.name, record.age, record.birth])
        print('--------')

table.close()
Ejemplo n.º 6
0
                # 90 degree for getting the angle of the normal
                dachorient_calc = dachorient_calc * 180 / pi + 180

                dachorient_diff_temp = abs(dachorient - dachorient_calc)

                # check if the current orientation differenz is lower then the stored
                if dachorient_diff_temp < dachorient_diff:
                    print('found better edge idx.', \
                        'new_edge=', idx, \
                        'old_edge=', edge, \
                        'new_dachorient_diff=', dachorient_diff_temp, \
                        'old_dachorient_diff', dachorient_diff)
                    # update stored orienation differenz
                    dachorient_diff = dachorient_diff_temp
                    # update edge idx
                    edge = idx

                print('edge (', \
                    'id=', idx, \
                    'x1=', x1, \
                    'x2=', x2, \
                    'y1=', y1,
                    'y2=', y2, \
                    ') orientation=' , dachorient_calc)

                i = i - 1
        # write the found edge index to the shapes dbf file.
        dbf.write(properties, edgeidx=edge)

        print('edge index for feature ', fid, ' is: ', edge)
Ejemplo n.º 7
0
			fnum C(6);\
			fmodlstde1 C(30);\
			fmodlstde2 C(30);\
			fmodlstde3 C(30);\
			fmod_list C(3);\
			fmod_min N(2,0);\
			fmod_max N(2,0);\
			fdeleted L'              ,\
   codepage='cp936'\
   )
mhd_tmp.open()
for x in mh:
    mhd_tmp.append(x)
for x in mhd_tmp:
    if x['fmod_min'] == None:
        dbf.write(x, fmod_min=0)
    if x['fmod_max'] == None:
        dbf.write(x, fmod_max=0)
mhd_tmp.close()
mhd.close()
############################################################
#this section will replace the modi_num & modhd_num with new generated sequence before
#tbl will store replace list
tbl = []
modi_tmp.open()
for x in modi_tmp:
    tbl.append((x['fmod_num'], x['fnum']))
modi_tmp.close()
mlist_tmp.open()
#if you found the result in tbl,replace the record and BREAK the searching loop
#replaced record will be a 6 digits number
Ejemplo n.º 8
0
        flag = id.search(match=sub_list[1])
        for x in flag:
            temp.append(('',\
             sub_list[0],\
             x.fnew_list,\
#				'%6s' % upper,\
             '',\
             x.fmod_num,\
             x.fsub_list\
              ))
#			upper+=1
    lookup_db.pack()

    for x in temp:
        lookup_db.append(x)

    for sub_list in lookup:
        counter = 1
        id = lookup_db.create_index(lambda rec: rec.fmod_list)
        flag = id.search(match=sub_list[0])
        for x in flag:
            dbf.write(x, fseq='%6s' % counter)
            counter += 1

count = 1
for rec in lookup_db:
    dbf.write(rec, fnum='%6s' % count)
    count += 1

lookup_db.close()