def save_map (self, args): self.area_shapefile=args["area_shapefile"] self.outline_shapefile=args["outline_shapefile"] svg=map2svg (args["width"], args["height"]) svg.load_shapefile(self.area_shapefile) svg.autoscale() svgtxt=svg.build_svg(None,args['shape_fieldID'],'regio') mapdata=self.mapdata outfile=args['outfile'] if self.outline_shapefile is not None: # print 'outline' outline_shp=shpUtils.loadShapefile(self.outline_shapefile) svgtxt+=svg.build_svg(outline_shp, args['outline_fieldID'],'outline',include_data_regio=False) # labelID=args['outline_labelID'] svgtxt=svg.embed_svg(svgtxt) f=open (outfile+'.svg','w') f.write(svgtxt) f.close() regio_ids=svg.get_shapeids(None, args['shape_fieldID']) s=json.dumps(regio_ids) f=open("js/shape_ids.js",'w') f.write("var shape_ids=") f.write(s) f.write(';\n') f.close()
def loadshapefile(filename): print 'Loading shapefile %s' % filename t1 = time.time() shapefile = shpUtils.loadShapefile(filename) t2 = time.time() print '%0.3f seconds load time' % (t2 - t1) return shapefile
def loadshapefile( filename ): print 'Loading shapefile %s' % filename t1 = time.time() shapefile = shpUtils.loadShapefile( filename ) t2 = time.time() print '%0.3f seconds load time' %( t2 - t1 ) return shapefile
def __init__(self,filename): self.filename = filename # the filename and location of the shapefile self.records = [] #list of easch poly in the shapefile self.minX = 9999 self.maxX =-9999 self.minY= 9999 self.maxY= -9999 shpRecords = shpUtils.loadShapefile(self.filename) for i in range(0,len(shpRecords)): x=[] y=[] for j in range(0,len(shpRecords[i]['shp_data']['parts'][0]['points'])): tempx = float(shpRecords[i]['shp_data']['parts'][0]['points'][j]['x']) tempy = float(shpRecords[i]['shp_data']['parts'][0]['points'][j]['y']) x.append(tempx) y.append(tempy) name = shpRecords[i]['dbf_data']['NAME'] #logging.info("reading name:"+name) #name = 'test' self.records.append(Poly(x,y,name)) # Calculates the spatial extents. ideally this information is calculated in the above for loop, but i'm lazy and this is fast. for p in self.records: # for each poly tX = min(p.coords[...,0]) # find the min value of X tY = min(p.coords[...,1]) # find the min value of Y if tX<self.minX: self.minX=tX # if the current poly's min x is smaller than recorded minX, set min to current if tY<self.minY: self.minY=tY # if the current poly's min y is smaller than recorded miny, set min to current tX = max(p.coords[...,0]) # find the max value of X tY = max(p.coords[...,1]) # find the max value of Y if tX>self.maxX: self.maxX=tX # if the current poly's max x is smaller than recorded maxX, set min to current if tY>self.maxY: self.maxY=tY
def load_world(): shpRecords = shpUtils.loadShapefile('world_borders/world_borders.shp')['features'] colors = load_color_map() plt.figure(figsize=(16, 9)) last = '' for i in range(0,len(shpRecords)): if shpRecords[i]['info']['CNTRY_NAME'] != last: print shpRecords[i]['info']['CNTRY_NAME'] last = shpRecords[i]['info']['CNTRY_NAME'] # x and y are empty lists to be populated with the coords of each geometry. x = [] y = [] #print shpRecords[i] for j in range(0,len(shpRecords[i]['shape']['parts'][0]['points'])): tempx = float(shpRecords[i]['shape']['parts'][0]['points'][j][0]) tempy = float(shpRecords[i]['shape']['parts'][0]['points'][j][1]) x.append(tempx) y.append(tempy) # Populate the lists # Creates a polygon in matplotlib for each geometry in the shapefile if shpRecords[i]['info']['CNTRY_NAME'] in colors: if shpRecords[i]['info']['CNTRY_NAME'] == 'Congo' or shpRecords[i]['info']['CNTRY_NAME'] == 'Zaire': plt.fill(x,y, facecolor=colors['Democratic Republic of the Congo']) plt.fill(x,y, facecolor=colors[shpRecords[i]['info']['CNTRY_NAME']]) plt.axis('equal') plt.savefig('world_calls.png', dpi=100, format='png') plt.show()
def save_map(self, args): self.width=800 self.height=800 shpRecords=shpUtils.loadShapefile(args["area_shapefile"]) outlineRecords=shpUtils.loadShapefile(args["outline_shapefile"]) centroidRecords=shpUtils.loadShapefile(args["centroid_shapefile"]) area_js=self.build_area_js(shpRecords, args['shape_fieldID']) centroid_js=self.build_centroid_js(shpRecords, args['shape_fieldID']) labelID=args['shape_labelID'] outfile=args['outfile'] f=open("js\\area.js","w") f.write(area_js) f.write("\n") #f.write("var total_length=%d;\n" % self.total_length) f.write("var minx=%d;\n" % self.minx) f.write("var miny=%d;\n" % self.miny) f.write("var maxx=%d;\n" % self.maxx) f.write("var maxy=%d;\n" % self.maxy) f.write("var dx=%d;\n" % self.dx) f.write("var dy=%d;\n" % self.dy) f.write("var width=%d;\n" % self.width) f.write("var height=%d;\n" % self.height) f.close() f=open("js/centroids.js",'w') f.write(centroid_js); f.close() regio_ids=self.get_shapeids(shpRecords, args['shape_fieldID']) for regio,shapes in regio_ids.items(): regio_ids[regio]=[shape[1:] for shape in shapes] # f=open("js/regioshapes2.js",'w') # s=json.dumps(regiocoords); # f.write("var regioshapes2="+s+';\n'); #f.close() s='{}' if labelID is not None: self.write_keyfile('js/regiolabels.js',labels,'regio')
def read_points(shape_file): shpRecords = shpUtils.loadShapefile(shape_file) points = [] for record in shpRecords: points.append(( record['dbf_data']['POINTID'], record['shp_data'], record['dbf_data']['GRID_CODE'] )) return points
def save_map(self, args): self.width = 800 self.height = 800 shpRecords = shpUtils.loadShapefile(args["area_shapefile"]) outlineRecords = shpUtils.loadShapefile(args["outline_shapefile"]) centroidRecords = shpUtils.loadShapefile(args["centroid_shapefile"]) area_js = self.build_area_js(shpRecords, args['shape_fieldID']) centroid_js = self.build_centroid_js(shpRecords, args['shape_fieldID']) labelID = args['shape_labelID'] outfile = args['outfile'] f = open("js\\area.js", "w") f.write(area_js) f.write("\n") #f.write("var total_length=%d;\n" % self.total_length) f.write("var minx=%d;\n" % self.minx) f.write("var miny=%d;\n" % self.miny) f.write("var maxx=%d;\n" % self.maxx) f.write("var maxy=%d;\n" % self.maxy) f.write("var dx=%d;\n" % self.dx) f.write("var dy=%d;\n" % self.dy) f.write("var width=%d;\n" % self.width) f.write("var height=%d;\n" % self.height) f.close() f = open("js/centroids.js", 'w') f.write(centroid_js) f.close() regio_ids = self.get_shapeids(shpRecords, args['shape_fieldID']) for regio, shapes in regio_ids.items(): regio_ids[regio] = [shape[1:] for shape in shapes] # f=open("js/regioshapes2.js",'w') # s=json.dumps(regiocoords); # f.write("var regioshapes2="+s+';\n'); #f.close() s = '{}' if labelID is not None: self.write_keyfile('js/regiolabels.js', labels, 'regio')
def demoBetterDirectLoadSHAPE(): import shpUtils ss=shpUtils.loadShapefile('/home/cpbl/rdc/inputData/healthRegions/shp/HR000b07_PZ.shp') return(ss['features']) # now do whatever you want with the resulting data # i'm going to just print out the first feature in this shapefile print shpRecords[0]['dbf_data'] for part in shpRecords[0]['shp_data']: print part, shpRecords[0]['shp_data'][part]
def __init__(self): self.db = database.DBConnection() colors = self.get_color_map() # Dict mapping country names to polygon coords self.nations = {} # Matplotlib canvases to draw on self.figure = plt.figure(figsize=(16,9)) self.root = Tk.Tk() self.root.title('Int\'l Calls to Rwanda') self.canvas = FigureCanvasTkAgg(self.figure, master=self.root) self.base = self.figure.add_subplot(111) # Load countries from shape file into dictionary with self.nations xmax, xmin, ymax, ymin = 0, 0, 0, 0 shpRecords = shpUtils.loadShapefile('world_borders/world_borders.shp')['features'] for i in range(0,len(shpRecords)): # 'verts' is populated with tuples of each border point verts = [] for j in range(0,len(shpRecords[i]['shape']['parts'][0]['points'])): tempx = float(shpRecords[i]['shape']['parts'][0]['points'][j][0]) tempy = float(shpRecords[i]['shape']['parts'][0]['points'][j][1]) verts.append((tempx, tempy)) if tempx > xmax: xmax = tempx if tempx < xmin: xmin = tempx if tempy > ymax: ymax = tempy if tempy < ymin: ymin = tempy cntry_name = shpRecords[i]['info']['CNTRY_NAME'] if cntry_name in colors: if cntry_name.find('Congo') >= 0 or cntry_name.find('Zaire') >= 0: if cntry_name in self.nations: self.nations[cntry_name].append(PolyCollection([verts], facecolor=colors['Democratic Republic of the Congo'])) else: self.nations[cntry_name] = [PolyCollection([verts], facecolor=colors['Democratic Republic of the Congo'],edgecolor='black')] else: if cntry_name in self.nations: self.nations[cntry_name].append(PolyCollection([verts], facecolor=colors[shpRecords[i]['info']['CNTRY_NAME']],edgecolor='black')) else: self.nations[cntry_name] = [PolyCollection([verts], facecolor=colors[shpRecords[i]['info']['CNTRY_NAME']],edgecolor='black')] # Add countries loaded into self.nations to the canvas for cntry, polys in self.nations.items(): # Each country can have multiple polygons representing it for p in polys: if p != []: self.base.add_collection(p) plt.xlim(xmin, xmax) plt.ylim(ymin+20, ymax+20) self.canvas.show() self.figure.savefig('world', dpi=100, format='png')
def shape_to_dict(shapefile): name = lambda d: d['STATE']+d['COUNTY']+('%-6s'%d['TRACT']).replace(' ','0') shp = shpUtils.loadShapefile(shapefile) out = {} for sh in shp: n = name(sh['dbf_data']) parts = [] for shlist in sh['shp_data']['parts']: points = [] for pt in shlist['points']: points.append((pt['x'],pt['y'])) parts.append(np.asarray(points)) out[n] = parts return out
def getInfoFromShp(shpFile): """Read polygon from shp file and place in tuple. polyInfo = dictionary of polygon info (polygons are list of (x,y) pairs) shpFile = shp file to read from requires import of shpUtils module. """ import shpUtils polyInfo = {} shpRecs = shpUtils.loadShapefile(shpFile) numRecords = len(shpRecs) for irec in range(numRecords): polyInfo[irec] = {} polyInfo[irec]['POINTS'] = [] numVertices = len(shpRecs[irec]['shp_data']['parts'][0]['points']) for ivert in range(numVertices): lon = shpRecs[irec]['shp_data']['parts'][0]['points'][ivert]['x'] lat = shpRecs[irec]['shp_data']['parts'][0]['points'][ivert]['y'] polyInfo[irec]['POINTS'].append((lon, lat)) return polyInfo
m[zipcode] = occurencyNumb return m #Declare inputs zipcodefile = "chicago.csv" shapefile = "ZipCodes.shp" #define colours colours = {0:"#F7FCF0", 1:"#E0F3DB", 2:"#CCEBC5", 3:"#A8DDB5", 4:"#7BCCC4", 5:"#4EB3D3", 6:"#2B8CBE", 7:"#0868AC", 8:"#084081"} colours = {0:"#ffffff", 1:"#fcfcff", 2:"#ebecff", 3:"#ebecff", 4:"#dadcff", 5:"#c9ccff", 6:"#b8bcff", 7:"#a7acff", 8:"#969cff", 9:"#858cff", 10:"#747cff", 11:"#636cff", 12:"#525dff", 13:"#414dff", 14:"#303dff", 15:"#1f2dff", 16:"#0e1dff", 17:"#0010fc", 18:"#000feb", 19:"#000eda", 20:"#000dc9", 21:"#000bb8", 22:"#000aa7"} colours = {0:"#ffffff", 1:"#ebecff", 2:"#dadcff", 3:"#a7acff", 4:"#a7acff", 5:"#414dff", 6:"#0e1dff", 7:"#000eda", 8:"#000aa7"} #colours = {0:"#F7FCF0", 1:"#F7FCF0", 2:"#E0F3DB", 3:"#E0F3DB", 4:"#CCEBC5", 5:"#CCEBC5", 6:"#A8DDB5", 7:"#7BCCC4", 8:"#4EB3D3", 9:"#2B8CBE", 10:"#0868AC", 11:"#084081"} #colours = {0:"", 1:"", 2:"", 3:"", 4:"", 5:"", 6:"", 7:"", 8:""} #colours = {0:"#FFF7EC", 1:"#FEE8C8", 2:"#FDD49E", 3:"#FDBB84", 4:"#FC8D59", 5:"#EF6548", 6:"#D7301F", 7:"#B30000", 8:"#7F0000"} # load the shapefile shpRecords = shpUtils.loadShapefile(shapefile) # load zipcodefile m = loadZipcode(zipcodefile) max = 0 for i in range(0,len(shpRecords)): zipcode = shpRecords[i]["dbf_data"]["ZIP"] if m[zipcode] > max: max = m[zipcode] unit = max/8 print max for i in range(0,len(shpRecords)): # x and y are empty lists to be populated with the coordinates of each geometry. x = [] y = [] for j in range(0,len(shpRecords[i]['shp_data']['parts'][0]['points'])):
def collate_zones(shape_file): # First collate the polygons by zone name print "Loading SHP file..." rows = shpUtils.loadShapefile(shape_file) collated = {} for row in rows: name = row["dbf_data"]["TZID"].strip() if name == "uninhabited": continue sys.stderr.write("Processing row for '%s'\n" % name) collated[name] = collated.get(name, []) for p in row["shp_data"]["parts"]: collated[name].append({ "points": p["points"], }) # Then add some information and try to simplify/reduce the polygons zones = {} collation_now = time.time() for name, shp_data in collated.iteritems(): sys.stderr.write("Simpifying %s\n" % name) transition_info = [] tz = pytz.timezone(name) if "_utc_transition_times" in dir(tz): last_info = [sys.maxint, 0, ''] for i, transition_time in enumerate(tz._utc_transition_times): transition_time = int(time.mktime(transition_time.timetuple())) td = tz._transition_info[i][0] info = [ transition_time, timedelta_to_minutes(td), tz._transition_info[i][2] ] if transition_time < collation_now: last_info = info continue # Include the last timezone prior to now if last_info[0] < collation_now: transition_info.append(last_info) transition_info.append(info) last_info = info if len(transition_info) == 0: # Assume no daylight savings now = datetime.datetime.now() td = tz.utcoffset(now) transition_info.append([0, timedelta_to_minutes(td), tz.tzname(now)]) # calculate a collation key based on future timezone transitions collation_key = '' for t in transition_info: if t[0] >= collation_now: collation_key += "%d>%d," % (t[0], t[1]) # for non-daylight savings regions, just use the utc_offset if len(collation_key) == 0: collation_key = "0>%d" % transition_info[-1][1] zones[collation_key] = zones.get(collation_key, { "bounding_box": { "xmin": sys.maxint, "ymin": sys.maxint, "xmax":-sys.maxint - 1, "ymax":-sys.maxint - 1 }, "polygons": [], "transitions": {}, "name": name }) zones[collation_key]["transitions"][name] = transition_info polygons = reduce_polygons(shp_data, 0.1, 0.01, 4, 5000, 0, 0.05) for part in polygons: polygonInfo = simplify(part["points"]) polygonInfo["name"] = name zones[collation_key]["polygons"].append(polygonInfo) b = zones[collation_key]["bounding_box"] b["xmin"] = min(b["xmin"], polygonInfo["bounds"][0]) b["ymin"] = min(b["ymin"], polygonInfo["bounds"][1]) b["xmax"] = max(b["xmax"], polygonInfo["bounds"][2]) b["ymax"] = max(b["ymax"], polygonInfo["bounds"][3]) del polygonInfo["bounds"] return zones
#!/usr/bin/env python # readshape.py - test import sys import time import shpUtils t1 = time.time() # load the shapefile, populating a list of dictionaries #features = shpUtils.loadShapefile( 'states/st99_d00_shp/st99_d00.shp') shapefile = shpUtils.loadShapefile('states/st99_d00_shp-90/st99_d00.shp') features = shapefile['features'] t2 = time.time() print '%0.3f seconds load time' % (t2 - t1) print '%d features' % len(features) #for feature in features: for i in xrange(len(features)): feature = features[i] info = feature['info'] shape = feature['shape'] type = shape['type'] if type == 0: pass elif type == 5: parts = shape['parts'] if len(parts) > 1:
def load_shapefile(self, infile): self.shaperecords = shpUtils.loadShapefile(infile) return self.shaperecords
def collate_zones(shape_file): # First collate the polygons by zone name print "Loading SHP file..." rows = shpUtils.loadShapefile(shape_file) collated = {} for row in rows: name = row["dbf_data"]["TZID"].strip() if name == "uninhabited": continue sys.stderr.write("Processing row for '%s'\n" % name) collated[name] = collated.get(name, []) for p in row["shp_data"]["parts"]: collated[name].append({ "points": p["points"], }) # Then add some information and try to simplify/reduce the polygons zones = {} collation_now = time.time() for name, shp_data in collated.iteritems(): sys.stderr.write("Simpifying %s\n" % name) transition_info = [] tz = pytz.timezone(name) if "_utc_transition_times" in dir(tz): last_info = [sys.maxint, 0, ''] for i, transition_time in enumerate(tz._utc_transition_times): transition_time = int(time.mktime(transition_time.timetuple())) td = tz._transition_info[i][0] info = [ transition_time, timedelta_to_minutes(td), tz._transition_info[i][2] ] if transition_time < collation_now: last_info = info continue # Include the last timezone prior to now if last_info[0] < collation_now: transition_info.append(last_info) transition_info.append(info) last_info = info if len(transition_info) == 0: # Assume no daylight savings now = datetime.datetime.now() td = tz.utcoffset(now) transition_info.append( [0, timedelta_to_minutes(td), tz.tzname(now)]) # calculate a collation key based on future timezone transitions collation_key = '' for t in transition_info: if t[0] >= collation_now: collation_key += "%d>%d," % (t[0], t[1]) # for non-daylight savings regions, just use the utc_offset if len(collation_key) == 0: collation_key = "0>%d" % transition_info[-1][1] zones[collation_key] = zones.get( collation_key, { "bounding_box": { "xmin": sys.maxint, "ymin": sys.maxint, "xmax": -sys.maxint - 1, "ymax": -sys.maxint - 1 }, "polygons": [], "transitions": {}, "name": name }) zones[collation_key]["transitions"][name] = transition_info polygons = reduce_polygons(shp_data, 0.1, 0.01, 4, 5000, 0, 0.05) for part in polygons: polygonInfo = simplify(part["points"]) polygonInfo["name"] = name zones[collation_key]["polygons"].append(polygonInfo) b = zones[collation_key]["bounding_box"] b["xmin"] = min(b["xmin"], polygonInfo["bounds"][0]) b["ymin"] = min(b["ymin"], polygonInfo["bounds"][1]) b["xmax"] = max(b["xmax"], polygonInfo["bounds"][2]) b["ymax"] = max(b["ymax"], polygonInfo["bounds"][3]) del polygonInfo["bounds"] return zones
fnDbf = os.path.basename(dbf.filename) open("upload/" + fnDbf, "wb").write(dbf.file.read()) message = "2" # this needs to be generalized connection = Connection() db = connection.opendata my_collection = db[coll] my_collection.ensure_index([("location", GEO2D)]) att_collection = db.attributes try: attributes = set() # load the shapefile shpRecords = shpUtils.loadShapefile("upload/" + fnShp) # add all the records in the shapefile to the new collection for record in shpRecords: if "x" in record["location"]: point = (record["location"]["x"], record["location"]["y"]) elif "xmax" in record["location"]: xmax = record["location"]["xmax"] xmin = record["location"]["xmin"] ymax = record["location"]["ymax"] ymin = record["location"]["ymin"] x = xmin + ((xmax - xmin) / 2) y = ymin + ((ymax - ymin) / 2) point = (x, y) else: continue
# t = tarfile.open(shpfile) t.extractall() shp = shpfile.replace(".tar.gz", "") shp = "%s/%s.shp" % (shp, shp) # polys = [] print shp for record in shpUtils.loadShapefile(shp) : print record continue for part in record['shp_data']['parts'] : poly = [] for pt in part['points'] : if pt.has_key('x') and pt.has_key('y') : poly.append((pt['x'], pt['y'])) poly = tuple(poly) p = Polygon(poly)
#!/usr/bin/env python # Analyze the WWF Terrestrial Ecoregions of the World shape files, # http://www.worldwildlife.org/publications/terrestrial-ecoregions-of-the-world # Dependencies: shpUtils.py and dbfUtils.py, both from # http://indiemaps.com/blog/2008/03/easy-shapefile-loading-in-python/ import shpUtils shpRecords = shpUtils.loadShapefile("wwf_terr_ecos.shp") print "Loaded the shape file" # The list of biomes defined in wwf_terr_ecos.htm : biomes = [ "BIOME 0", # should never occur "Tropical & Subtropical Moist Broadleaf Forests", "Tropical & Subtropical Dry Broadleaf Forests", "Tropical & Subtropical Coniferous Forests", "Temperate Broadleaf & Mixed Forests", "Temperate Conifer Forests", "Boreal Forests/Taiga", "Tropical & Subtropical Grasslands, Savannas & Shrublands", "Temperate Grasslands, Savannas & Shrublands", "Flooded Grasslands & Savannas", "Montane Grasslands & Shrublands", "Tundra", "Mediterranean Forests, Woodlands & Scrub", "Deserts & Xeric Shrublands", "Mangroves", # WWF, at least, uses 99 for unknown biome. ]
0: "#ffffff", 1: "#ebecff", 2: "#dadcff", 3: "#a7acff", 4: "#a7acff", 5: "#414dff", 6: "#0e1dff", 7: "#000eda", 8: "#000aa7" } #colours = {0:"#F7FCF0", 1:"#F7FCF0", 2:"#E0F3DB", 3:"#E0F3DB", 4:"#CCEBC5", 5:"#CCEBC5", 6:"#A8DDB5", 7:"#7BCCC4", 8:"#4EB3D3", 9:"#2B8CBE", 10:"#0868AC", 11:"#084081"} #colours = {0:"", 1:"", 2:"", 3:"", 4:"", 5:"", 6:"", 7:"", 8:""} #colours = {0:"#FFF7EC", 1:"#FEE8C8", 2:"#FDD49E", 3:"#FDBB84", 4:"#FC8D59", 5:"#EF6548", 6:"#D7301F", 7:"#B30000", 8:"#7F0000"} # load the shapefile shpRecords = shpUtils.loadShapefile(shapefile) # load zipcodefile m = loadZipcode(zipcodefile) max = 0 for i in range(0, len(shpRecords)): zipcode = shpRecords[i]["dbf_data"]["ZIP"] if m[zipcode] > max: max = m[zipcode] unit = max / 8 print max for i in range(0, len(shpRecords)): # x and y are empty lists to be populated with the coordinates of each geometry. x = [] y = [] for j in range(0, len(shpRecords[i]['shp_data']['parts'][0]['points'])):
import shpUtils import re shpRecords = shpUtils.loadShapefile('santiago/cl_13comunas_geo.shp') all_coords = [] for i in range(0, len(shpRecords)): coords = [] name = re.sub("[^\w\s]", "", shpRecords[i]['dbf_data']["NOMBRE"].lower().strip()) if name in ("san jose de maipo", "lo barnechea", "curacavi", "melipilla", "maria pinto", "pirque", "buin", "el monte", "talagante", "lampa", "colina", "peaflor"): print(name) else: print(name) for j in range(0, len(shpRecords[i]['shp_data']['parts'][0]['points'])): tempx = float( shpRecords[i]['shp_data']['parts'][0]['points'][j]['x']) tempy = float( shpRecords[i]['shp_data']['parts'][0]['points'][j]['y']) coords.append((tempx, tempy)) coords = ["[%s, %s]" % row for row in coords] coords = ',\n '.join(coords) all_coords.append("{'name':'" + name + "','coords':[" + coords + "]}") all_coords = ',\n'.join(all_coords)
import shpUtils import re shpRecords = shpUtils.loadShapefile('santiago/cl_13comunas_geo.shp') all_coords = [] for i in range(0, len(shpRecords)): coords = [] name = re.sub("[^\w\s]", "", shpRecords[i]['dbf_data']["NOMBRE"].lower().strip()) if name in ("san jose de maipo", "lo barnechea", "curacavi", "melipilla", "maria pinto", "pirque", "buin", "el monte", "talagante", "lampa", "colina", "peaflor"): print(name) else: print(name) for j in range(0, len(shpRecords[i]['shp_data']['parts'][0]['points'])): tempx = float( shpRecords[i]['shp_data']['parts'][0]['points'][j]['x']) tempy = float( shpRecords[i]['shp_data']['parts'][0]['points'][j]['y']) coords.append((tempx, tempy)) coords = ["[%s, %s]" % row for row in coords] coords = ',\n '.join(coords) all_coords.append("{'name':'" + name + "','coords':[" + coords + "]}") all_coords = ',\n'.join(all_coords) santiago = open("santiago.js", "w")
#!/usr/bin/env python # Analyze the WWF Terrestrial Ecoregions of the World shape files, # http://www.worldwildlife.org/publications/terrestrial-ecoregions-of-the-world # Dependencies: shpUtils.py and dbfUtils.py, both from # http://indiemaps.com/blog/2008/03/easy-shapefile-loading-in-python/ import shpUtils shpRecords = shpUtils.loadShapefile('wwf_terr_ecos.shp') print "Loaded the shape file" # The list of biomes defined in wwf_terr_ecos.htm : biomes = [ "BIOME 0", # should never occur "Tropical & Subtropical Moist Broadleaf Forests", "Tropical & Subtropical Dry Broadleaf Forests", "Tropical & Subtropical Coniferous Forests", "Temperate Broadleaf & Mixed Forests", "Temperate Conifer Forests", "Boreal Forests/Taiga", "Tropical & Subtropical Grasslands, Savannas & Shrublands", "Temperate Grasslands, Savannas & Shrublands", "Flooded Grasslands & Savannas", "Montane Grasslands & Shrublands", "Tundra", "Mediterranean Forests, Woodlands & Scrub", "Deserts & Xeric Shrublands", "Mangroves", # WWF, at least, uses 99 for unknown biome. ]
def load_shapefile(self,infile): self.shaperecords=shpUtils.loadShapefile(infile) return self.shaperecords
#original script from Vadim Ogievetsky modified by Vincent Hiribarren # load the shapefile, populating a list of dictionaries import shpUtils shpRecords = shpUtils.loadShapefile('your_file.shp') # now do whatever you want with the resulting data # i'm going to just print out the first feature in this shapefile #print '[[[', shpRecords[3] , ']]]' print '{"type":"FeatureCollection","features":[' for record in shpRecords: dbf = record['dbf_data'] shp = record['shp_data'] name = dbf['NAME'].strip().replace("'","\\'") code = dbf['ID'] # or FIPS or ISO3 or UN borders = [] for part in shp.get('parts', []): border = [] for point in part['points']: border.append('[%.6f, %.6f]' % (point['x'], point['y'])) border = ','.join(border) borders.append('[' + border + ']') borders = '[' + ','.join(borders) + ']' str = '{"type":"Feature","properties":{"name":"%s","code":"%s"},"geometry":{"type":"MultiPolygon","coordinates":[%s]}},' % (name, code, borders)
#!/usr/bin/env python # readshape.py - test import sys import time import shpUtils t1 = time.time() # load the shapefile, populating a list of dictionaries #features = shpUtils.loadShapefile( 'states/st99_d00_shp/st99_d00.shp') shapefile = shpUtils.loadShapefile( 'states/st99_d00_shp-90/st99_d00.shp') features = shapefile['features'] t2 = time.time() print '%0.3f seconds load time' %( t2 - t1 ) print '%d features' % len(features) #for feature in features: for i in xrange(len(features)): feature = features[i] info = feature['info'] shape = feature['shape'] type = shape['type'] if type == 0: pass elif type == 5: parts = shape['parts'] if len(parts) > 1: