Beispiel #1
0
    def __init__(self, shape_paths):


        pluto_filter = lambda x: ('unclipped' not in x) and ('mappluto' in x)
        shape_paths = list(filter(pluto_filter, shape_paths))


        print("aggregating shapefiles")
        if len(shape_paths) > 1:
            d = [read_shapefile(f) for f in shape_paths]
            d = pd.concat(d)
        else:
            d = read_shapefile(shape_paths[0])

        print("filling nulls geometries with empty polygons")
        d['geometry'] = d['geometry'].fillna()
        
        

        print("converting coordinate reference system to ESPG:4326")
        d = d.to_crs({'init': "EPSG:4326"})

        print("making well known text representation out of geometries")
        wkt_geom = d['geometry'].apply(lambda x: wkt.dumps(x))
        d = pd.DataFrame(d)
        d = d.drop(columns=['geometry'])
        d['wkt_geom'] = wkt_geom
        print("cleaning column names")
        d = d.clean_names()
        self.wkt_file = d
Beispiel #2
0
SIMPLIFICATION = .001

# read in boundary info using the utils file read_shapefile
BOUNDARY_DATA = '../Chicago_boundaries/'

# --------------------------------------------- read in all of the boundary data
# CENSUS_BLOCK_2000 = read_shapefile(
#     os.path.join(BOUNDARY_DATA, 'Census Blocks.shp'),
#     id_fieldname = "CENSUS_B_1",
#     simplification = SIMPLIFICATION,
# )
# CENSUS_BLOCK_2000_INDEX = create_spatial_index(CENSUS_BLOCK_2000)
print "getting 2010 census"
CENSUS_BLOCK_2010 = read_shapefile(
    os.path.join(BOUNDARY_DATA, 'CensusBlockTIGER2010.shp'),
    id_fieldname = "GEOID10",
    simplification = SIMPLIFICATION,
)
pickle.dump(CENSUS_BLOCK_2010, open("../web/apps/main/management/commands/census.p", 'wb'))

CENSUS_BLOCK_2010_INDEX = create_spatial_index(CENSUS_BLOCK_2010)

# print CENSUS_BLOCK_2010

# SCHOOL = read_shapefile(
#     os.path.join(BOUNDARY_DATA, 'BoundaryGrades10.shp'),
#     id_fieldname = "SCHOOLID",
#     simplification = SIMPLIFICATION,
# )

# CENSUS_TRACT_2000 = read_shapefile(
# 3rd party
from utils import read_shapefile, create_spatial_index, aggregate_metrics, \
    write_json

# set this to an amount (in lat/lng units) that for boundary
# simplification (None for no simplification)
SIMPLIFICATION = None

# read in boundary info using the utils file read_shapefile
BOUNDARY_DATA = '../data/Comm_20Areas'

# --------------------------------------------- read in all of the boundary data
COMM_AREA = read_shapefile(
    os.path.join(BOUNDARY_DATA, 'CommAreas.shp'),
    id_fieldname = "COMMUNITY",
    simplification = SIMPLIFICATION,
)

MUNI_AREA = read_shapefile(
    os.path.join(BOUNDARY_DATA, 'Municipalities.shp'),
    id_fieldname = "Name",
    simplification = SIMPLIFICATION,
)

# ILL_COUNTY = read_shapefile(
#     os.path.join(BOUNDARY_DATA, 'Illinois_all.shp'),
#     id_fieldname = "NAME",
#     simplification = SIMPLIFICATION,
# )
# CENSUS_TRACT_2000 = read_shapefile(
#     os.path.join(BOUNDARY_DATA, 'Census_Tracts.shp'),
#     id_fieldname = "CENSUS_T_1",
#     simplification = SIMPLIFICATION,
# )

# CENSUS_TRACT_2010 = read_shapefile(
#     os.path.join(BOUNDARY_DATA, 'CensusTractsTIGER2010.shp'),
#     id_fieldname = "GEOID10",
#     simplification = SIMPLIFICATION,
# )

COMMUNITY_AREA = read_shapefile(
    os.path.join(BOUNDARY_DATA, 'CommAreas.shp'),
    id_fieldname="COMMUNITY",
    simplification=SIMPLIFICATION,
)
with open('community_test.p', 'wb') as stream:
    pickle.dump(COMMUNITY_AREA, stream)
k
# NEIGHBORHOOD = read_shapefile(
#     os.path.join(BOUNDARY_DATA, 'Neighborhoods_2012b.shp'),
#     id_fieldname = "PRI_NEIGH",
#     simplification = SIMPLIFICATION,
# )
with open('../data/raw_energy_data.csv', 'r') as infile:
    reader = csv.reader(infile, delimiter='|')
    header_row = reader.next()
    name_set = set()
    for row in reader:
# CENSUS_TRACT_2000 = read_shapefile(
#     os.path.join(BOUNDARY_DATA, 'Census_Tracts.shp'),
#     id_fieldname = "CENSUS_T_1",
#     simplification = SIMPLIFICATION,
# )

# CENSUS_TRACT_2010 = read_shapefile(
#     os.path.join(BOUNDARY_DATA, 'CensusTractsTIGER2010.shp'),
#     id_fieldname = "GEOID10",
#     simplification = SIMPLIFICATION,
# )

COMMUNITY_AREA = read_shapefile(
    os.path.join(BOUNDARY_DATA, 'CommAreas.shp'),
    id_fieldname = "COMMUNITY",
    simplification = SIMPLIFICATION,
)
with open('community_test.p', 'wb') as stream:
    pickle.dump(COMMUNITY_AREA, stream)
k
# NEIGHBORHOOD = read_shapefile(
#     os.path.join(BOUNDARY_DATA, 'Neighborhoods_2012b.shp'),
#     id_fieldname = "PRI_NEIGH",
#     simplification = SIMPLIFICATION,
# )
with open('../data/raw_energy_data.csv', 'r') as infile:
    reader = csv.reader(infile, delimiter='|')
    header_row = reader.next()
    name_set = set()
    for row in reader: