Beispiel #1
0
links = stirrups(vertical_rebars, vAttribute, pureRebars)

if len(links) == 2:
    main_links = links[0]
    side_links = links[1]
else:
    main_links = links
print(len(links))
""" Plotting """
# Inside Links
for main_link in main_links:
    if len(gc_curve(main_link)) == 2:
        m_line = geometry.LineString(gc_curve(main_link))
        plt.plot(gc_point(main_link)[0], gc_point(main_link)[1])
    else:
        m_poly = geometry.Polygon(gc_curve(main_link))
        m_curve = m_poly.exterior.xy
        plt.plot(m_curve[0], m_curve[1], 'bo-')
if len(links) == 2:
    for side_link in side_links:
        s_poly = geometry.Polygon(gc_curve(side_link))
        s_curve = s_poly.exterior.xy
        plt.plot(s_curve[0], s_curve[1], 'bo-')

# Column Concrete Curve
column_curves = column[0]
column_plot_curve = column_curves.exterior.xy
plt.plot(column_plot_curve[0], column_plot_curve[1], 'bo-')

# Main Stirrups
basic_vertical_points = vertical_rebars['bPoints']+vertical_rebars['_hPoints']+vertical_rebars['_bPoints'][::-1]+vertical_rebars['hPoints'][::-1]
Beispiel #2
0
                lon_0=8.0)
    pf = open('coast_proj.pickle', 'wb')
    pickle.dump((m, ), pf)
    pf.close()

mxu, myu = m(xu, yu)
mxl, myl = m(xl, yl)

# get coast polygon
polys = m.coastpolygons
land = []
sland = []
for (xp, yp), ctype in zip(polys, m.coastpolygontypes):
    if ctype < 2:
        land.append(zip(xp, yp))
        sland.append(sg.Polygon(zip(xp, yp)))

# add Helgoland islands
hcoords = [(7.891, 54.169), (7.899, 54.175), (7.892, 54.184), (7.884, 54.189),
           (7.869, 54.189)]
hcoords = [m(xx, yy) for xx, yy in hcoords]
land.append(hcoords)
sland.append(sg.Polygon(hcoords))
hcoords1 = hcoords

hcoords = [(7.909, 54.180), (7.919, 54.181), (7.920, 54.190), (7.901, 54.188)]
hcoords = [m(xx, yy) for xx, yy in hcoords]
land.append(hcoords)
sland.append(sg.Polygon(hcoords))

newland = []
Beispiel #3
0
def plot_voltage_incident(incident_id: int,
                          report_dir: str,
                          mongo_client: pymongo.MongoClient):
    incidents_coll = mongo_client.opq.incidents

    incident = incidents_coll.find_one({"incident_id": incident_id})

    start_ms = incident["start_timestamp_ms"]
    start_dt = datetime.datetime.utcfromtimestamp(start_ms / 1000.0)
    wf_y_values = reports.calib_waveform(incident["gridfs_filename"], incident["box_id"], mongo_client)
    wf_x_values = [datetime.datetime.utcfromtimestamp((start_ms + reports.sample_to_ms(t)) / 1000.0) for t in
                   range(len(wf_y_values))]

    voltage_high = 120.0 + (120.0 * .06)
    voltage_low = 120.0 - (120.0 * .06)

    fig, ax = plt.subplots(3, 1, figsize=(16, 9))
    fig.suptitle("Incident: %d (%s) OPQ Box: %s (%s) @ %s UTC" % (
        incident["incident_id"],
        incident["classifications"][0],
        incident["box_id"],
        reports.box_to_location[incident["box_id"]],
        start_dt.strftime("%Y-%m-%d %H:%M:%S")
    ))
    ax[0].plot(wf_x_values, wf_y_values, color="blue")
    ax[0].set_title("Waveform and $V_{RMS}$")
    ax[0].set_ylabel("Voltage")
    ax[0].set_xlabel("Time M:S.nS")
    ax[0].tick_params(axis="y", colors="blue")
    ax[0].yaxis.label.set_color("blue")

    ax2 = ax[0].twinx()

    vrms_y_values = reports.vrms_waveform(wf_y_values)
    vrms_x_values = wf_x_values[0::200]

    mean_rms = vrms_y_values.mean()
    duration_c = len(vrms_x_values)


    ax2.plot(vrms_x_values, vrms_y_values, color="red")
    ax2.plot(vrms_x_values, [voltage_low for _ in vrms_x_values], linestyle="--", color="red", linewidth=1)
    ax2.plot(vrms_x_values, [voltage_high for _ in vrms_x_values], linestyle="--", color="red", linewidth=1)
    ax2.set_ylabel("$V_{RMS}$")
    ax2.tick_params(axis="y", colors="red")
    ax2.yaxis.label.set_color("red")
    xfmt = md.DateFormatter('%M:%S.%f')
    ax2.xaxis.set_major_formatter(xfmt)

    ax[1].plot(*geom.Polygon(itic.PROHIBITED_REGION_POLYGON).exterior.xy, color="red", label="Prohibited Region Bounds")
    ax[1].plot(*geom.Polygon(itic.NO_DAMAGE_REGION_POLYGON).exterior.xy, color="blue", label="No-Damage Region Bounds")
    print(reports.percent_nominal(120.0, mean_rms))
    ax[1].scatter([duration_c], [reports.percent_nominal(120.0, mean_rms)], linewidth=5, color="black", label="ITIC Value")

    ax[1].set_xscale("log")
    ax[1].set_ylim((1, 500))
    ax[1].set_xlim(xmax=15 * 60 * 1000)
    ax[1].set_ylabel("% Nominal Voltage")
    ax[1].set_xlabel("Duration Cycles")
    ax[1].set_title("ITIC")

    ax[1].text(1000, 300, "Prohibited Region", fontsize=12)
    ax[1].text(.1, 150, "No Interruption Region", fontsize=12)
    ax[1].text(10000, 30, "No Damage Region", fontsize=12)

    ax[1].legend()

    ax[2].set_title("Semi F47")
    ax[2].plot(*geom.Polygon(SEMI_F47_VIOLATION_POLYGON).exterior.xy, color="red")
    ax[2].set_xscale("log")
    ax[2].set_ylim((1, 100))
    ax[2].set_ylabel("% Nominal Voltage")
    ax[2].set_xlabel("Duration Cycles")
    ax[2].set_xlim(xmax=(reports.ms_to_c(1_000_000)))
    ax[2].text(1, 80, "Semi F47 Nominal Region", fontsize=12)
    ax[2].text(10**2, 40, "Semi F47 Violation Region", fontsize=12)
    ax[2].scatter([duration_c], [reports.percent_nominal(120.0, mean_rms)], linewidth=5, color="black", label="Semi F47 Value")
    ax[2].legend(loc="lower right")
    plt.subplots_adjust(hspace=.5)
    plt.savefig("%s/voltage-incident-%d.png" % (report_dir, incident_id))
Beispiel #4
0
import urllib
import urllib.parse
import urllib.request
from src.objects.image import Satellite_Image




boundary = [[-76.7112977,39.3719562],[-76.7112726,39.3543824],[-76.7112337,39.3271971],[-76.7111829,39.2916607],[-76.7111659,39.2778381],[-76.6888115,39.2680858],[-76.6597751,39.2554169],[-76.6178331,39.2371116],[-76.6116152,39.2343976],[-76.5836752,39.2081197],[-76.582525,39.2077508],[-76.5805555,39.2071192],[-76.5497285,39.1972328],[-76.5464898,39.1992526],[-76.5461562,39.1994606],[-76.5298609,39.2096221],[-76.5298503,39.2189088],[-76.5298259,39.2402128],
                [-76.5297584,39.2991345],[-76.5297583,39.2992049],[-76.5297304,39.3239064],[-76.5297299,39.3243919],[-76.5297172,39.3347547],[-76.529677,39.3708243],[-76.5296761,39.3716015],[-76.5296757,39.3719713],[-76.569831,39.37204],[-76.5820693,39.3719669],[-76.6526604,39.3719623],[-76.6529027,39.371961],[-76.6529981,39.371961],[-76.7112977,39.3719562]]
for value in boundary:
    temp = value[0]
    value[0] = value[1]
    value[1] = temp

poly = geometry.Polygon(boundary)
images = []

EARTH_RADIUS = 6378137
EQUATOR_CIRCUMFERENCE = 2 * pi *  EARTH_RADIUS
INITIAL_RESOLUTION =  EQUATOR_CIRCUMFERENCE / 256.0
ORIGIN_SHIFT =  EQUATOR_CIRCUMFERENCE / 2.0


# https://stackoverflow.com/questions/7490491/capture-embedded-google-map-image-with-python-without-using-a-browser
def latlontopixels(lat, lon, zoom):
    mx = (lon *  ORIGIN_SHIFT) / 180.0
    my = log(tan((90 + lat) * pi/360.0))/(pi/180.0)
    my = (my *  ORIGIN_SHIFT) /180.0
    res =  INITIAL_RESOLUTION / (2**zoom)
    px = (mx +  ORIGIN_SHIFT) / res
def getRect(reg1):
    maxx, maxy, miny, minx = reg1['right'], reg1['bottom'], reg1['top'], reg1[
        'left']
    return sg.Polygon([(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny),
                       (minx, miny)])
def _format_shape_osm(bbox, result_NodesFromWays, result_NodesWaysFromRels,
                      item, save_path):
    """format edges, nodes and relations from overpy result objects into shapes
    Parameters:
        bbox
        result_NodesFromWays
        result_NodesWaysFromRels
        item
        save_path

    Returns:
        gdf_all: Geodataframe with Linestrings, Polygons & Multipolygons
    """
    # polygon vs. linestrings in nodes from ways result:

    schema_poly = {
        'geometry': 'Polygon',
        'properties': {
            'Name': 'str:80',
            'Natural_Type': 'str:80',
            'Item': 'str:80'
        }
    }
    schema_line = {
        'geometry': 'LineString',
        'properties': {
            'Name': 'str:80',
            'Natural_Type': 'str:80',
            'Item': 'str:80'
        }
    }
    shapeout_poly = save_path + '/' + str(item) + '_poly_' + str(int(bbox[0])) +\
    '_' + str(int(bbox[1])) + ".shp"
    shapeout_line = save_path + '/' + str(item) + '_line_' + str(int(bbox[0])) +\
    '_' + str(int(bbox[1])) + ".shp"

    way_poly = []
    way_line = []
    for way in result_NodesFromWays.ways:
        if (way.nodes[0].id == way.nodes[-1].id) & (len(way.nodes) > 2):
            way_poly.append(way)
        else:
            way_line.append(way)

    with fiona.open(shapeout_poly,
                    'w',
                    crs=from_epsg(4326),
                    driver='ESRI Shapefile',
                    schema=schema_poly) as output:
        for way in way_poly:
            geom = mapping(
                geometry.Polygon([node.lon, node.lat] for node in way.nodes))
            prop = {
                'Name': way.tags.get("name", "n/a"),
                'Natural_Type': way.tags.get("natural", "n/a"),
                'Item': item
            }
            output.write({'geometry': geom, 'properties': prop})

    with fiona.open(shapeout_line,
                    'w',
                    crs=from_epsg(4326),
                    driver='ESRI Shapefile',
                    schema=schema_line) as output2:
        for way in way_line:
            geom2 = {
                'type': 'LineString',
                'coordinates': [(node.lon, node.lat) for node in way.nodes]
            }
            prop2 = {
                'Name': way.tags.get("name", "n/a"),
                'Natural_Type': way.tags.get("natural", "n/a"),
                'Item': item
            }
            output2.write({'geometry': geom2, 'properties': prop2})

    gdf_poly = geopandas.read_file(shapeout_poly)
    for ending in ['.shp', ".cpg", ".dbf", ".prj", '.shx']:
        os.remove(save_path + '/' + str(item) + '_poly_' + str(int(bbox[0])) +
                  '_' + str(int(bbox[1])) + ending)
    gdf_line = geopandas.read_file(shapeout_line)
    for ending in ['.shp', ".cpg", ".dbf", ".prj", '.shx']:
        os.remove(save_path + '/' + str(item) + '_line_' + str(int(bbox[0])) +
                  '_' + str(int(bbox[1])) + ending)

    # add buffer to the lines (0.000045° are ~5m)
    for geom in gdf_line.geometry:
        geom = geom.buffer(0.000045)

    gdf_all = gdf_poly.append(gdf_line)

    # detect multipolygons in relations:
    print(
        'Converting results for %s to correct geometry and GeoDataFrame: MultiPolygons'
        % item)

    MultiPoly = []
    for relation in result_NodesWaysFromRels.relations:
        OuterList = []
        InnerList = []
        PolyList = []
        # get inner and outer parts from overpy results, convert into linestrings
        # to check for closedness later
        for relationway in relation.members:
            if relationway.role == 'outer':
                for way in result_NodesWaysFromRels.ways:
                    if way.id == relationway.ref:
                        OuterList.append(
                            geometry.LineString([node.lon, node.lat]
                                                for node in way.nodes))
            else:
                for way in result_NodesWaysFromRels.ways:
                    if way.id == relationway.ref:
                        InnerList.append(
                            geometry.LineString([node.lon, node.lat]
                                                for node in way.nodes))

        OuterPoly = []
        # in case outer polygons are not fragmented, add those already in correct geometry
        for outer in OuterList:
            if outer.is_closed:
                OuterPoly.append(
                    Polygon(outer.coords[0:(len(outer.coords) + 1)]))
                OuterList.remove(outer)

        initialLength = len(OuterList)
        i = 0
        OuterCoords = []

        # loop to account for more than one fragmented outer ring
        while (len(OuterList) > 0) & (i <= initialLength):
            OuterCoords.append(
                OuterList[0].coords[0:(len(OuterList[0].coords) + 1)])
            OuterList.remove(OuterList[0])
            for _ in range(0, len(OuterList)):
                # get all the other outer polygon pieces in the right order
                # (only works if fragments are in correct order, anyways!!
                # so added another loop around it in case not!)
                for outer in OuterList:
                    if outer.coords[0] == OuterCoords[-1][-1]:
                        OuterCoords[-1] = OuterCoords[-1] + outer.coords[0:(
                            len(outer.coords) + 1)]
                        OuterList.remove(outer)

        for entry in OuterCoords:
            if len(entry) > 2:
                OuterPoly.append(Polygon(entry))

        PolyList = OuterPoly
        # get the inner polygons (usually in correct, closed shape - not accounting
        # for the fragmented case as in outer poly)
        for inner in InnerList:
            if inner.is_closed:
                PolyList.append(Polygon(inner))

        MultiPoly.append(MultiPolygon([shape(poly) for poly in PolyList]))

    schema_multi = {
        'geometry': 'MultiPolygon',
        'properties': {
            'Name': 'str:80',
            'Type': 'str:80',
            'Item': 'str:80'
        }
    }

    shapeout_multi = (save_path + '/' + str(item) + '_multi_' +
                      str(int(bbox[0])) + '_' + str(int(bbox[1])) + ".shp")

    with fiona.open(shapeout_multi,
                    'w',
                    crs=from_epsg(4326),
                    driver='ESRI Shapefile',
                    schema=schema_multi) as output:
        for i in range(0, len(MultiPoly)):
            prop1 = {
                'Name': relation.tags.get("name", "n/a"),
                'Type': relation.tags.get("type", "n/a"),
                'Item': item
            }
            geom = mapping(MultiPoly[i])
            output.write({'geometry': geom, 'properties': prop1})
    gdf_multi = geopandas.read_file(
        shapeout_multi)  # save_path + '/' + shapeout_multi)
    for ending in ['.shp', ".cpg", ".dbf", ".prj", '.shx']:
        os.remove(save_path + '/' + str(item) + '_multi_' + str(int(bbox[0])) +
                  '_' + str(int(bbox[1])) + ending)
    gdf_all = gdf_all.append(gdf_multi, sort=True)

    print('Combined all results for %s to one GeoDataFrame: done' % item)

    return gdf_all
    def __init__(self, random_entrypoints=False):
        super().__init__()

        self.mvas = [
            model.MinimumVectoringAltitude(
                shape.Polygon([(48.43, 2.09), (39.36, 4.22), (27.26, 20.01),
                               (54.03, 12.95), (48.43, 2.09)]), 4800),
            model.MinimumVectoringAltitude(
                shape.Polygon([(27.26, 20.01), (26.37, 21.35), (29.73, 26.39),
                               (28.83, 31.09), (34.32, 25.55), (46.08, 22.36),
                               (42.47, 16), (27.26, 20.01)]), 3700),
            model.MinimumVectoringAltitude(
                shape.Polygon([(26.37, 21.35), (13.15, 38.60), (22.0, 36.13),
                               (22.0, 30.65), (29.73, 26.39), (26.37, 21.35)]),
                5700),
            model.MinimumVectoringAltitude(
                shape.Polygon([(29.73, 26.39), (22.0, 30.65), (22.0, 36.13),
                               (13.15, 38.60), (8, 45.68), (18.75, 44.98),
                               (28.83, 31.09), (29.73, 26.39)]), 4600),
            model.MinimumVectoringAltitude(
                shape.Polygon([(28.83, 31.09), (18.75, 44.98), (22.0, 45.68),
                               (26.37, 43.08), (28.83, 31.09)]), 4100),
            model.MinimumVectoringAltitude(
                shape.Polygon([(28.83, 31.09), (28.83, 33.45), (31.29, 34.12),
                               (29.73, 41.29), (26.9, 40.47), (28.83, 31.09)]),
                4000),
            model.MinimumVectoringAltitude(
                shape.Polygon([(22.0, 45.68), (18.75, 44.98), (8, 45.68),
                               (4.08, 50.25), (15.73, 76.12), (29.73, 80.71),
                               (56.16, 82.05), (58.51, 69.84), (42.94, 71.97),
                               (22.56, 65.36), (16.17, 50.25), (23.23, 49.01),
                               (22.0, 45.68)]), 3500),
            model.MinimumVectoringAltitude(
                shape.Polygon([(46.08, 22.36), (34.32, 25.55), (31.5, 28.4),
                               (36.22, 35.46), (44.46, 31.76),
                               (46.08, 22.36)]), 3000),
            model.MinimumVectoringAltitude(
                shape.Polygon([(31.5, 28.4), (28.83, 31.09), (28.83, 33.45),
                               (31.29, 34.12), (29.73, 41.29), (26.9, 40.47),
                               (26.37, 43.08), (22.0, 45.68), (23.23, 49.01),
                               (31.29, 48.01), (30.17, 45.71), (32.19, 44.98),
                               (35.14, 41.62), (36.22, 42.29), (37.56, 36.69),
                               (36.22, 35.46), (31.5, 28.4)]), 3500),
            model.MinimumVectoringAltitude(
                shape.Polygon([(35.14, 41.62), (32.19, 44.98), (30.17, 45.71),
                               (31.29, 48.01), (23.23, 49.01), (16.17, 50.25),
                               (22.56, 65.36), (36.58, 69.91), (39.47, 60.55),
                               (35.73, 59.13), (36.22, 56.18), (38.46, 53.72),
                               (34.32, 45.68), (35.14, 41.62)]), 3200),
            model.MinimumVectoringAltitude(
                shape.Polygon([(46.08, 22.36), (44.95, 28.91), (53.5, 31.43),
                               (57.97, 41.89), (47.17, 55.97), (40.75, 53.72),
                               (38.46, 53.72), (36.22, 56.18), (35.73, 59.13),
                               (39.47, 60.55), (36.58, 69.91), (42.94, 71.97),
                               (58.51, 69.84), (54.78, 60.01), (68.15, 38.6),
                               (66.34, 36.85), (65.53, 30.62), (62.92, 29.97),
                               (66.58, 20.58), (52.88, 18.68), (51.64, 21.35),
                               (46.08, 22.36)]), 2700),
            model.MinimumVectoringAltitude(
                shape.Polygon([(44.95, 28.91), (44.46, 31.76), (36.22, 35.46),
                               (37.56, 36.69), (36.22, 42.29), (35.14, 41.62),
                               (34.32, 45.68), (38.46, 53.72), (40.75, 53.72),
                               (47.17, 55.97), (57.97, 41.89), (53.5, 31.43),
                               (44.95, 28.91)]), 2600)
        ]

        self.runway = model.Runway(45.16, 43.26, 586, 160)

        self.airspace = model.Airspace(self.mvas, self.runway)

        if random_entrypoints:
            self.entrypoints = [
                model.EntryPoint(10, 51, 90, [130, 150, 170, 190, 210, 230]),
                model.EntryPoint(17, 74.6, 120,
                                 [130, 150, 170, 190, 210, 230]),
                model.EntryPoint(19.0, 34.0, 45,
                                 [130, 150, 170, 190, 210, 230]),
                model.EntryPoint(29.8, 79.4, 170,
                                 [130, 150, 170, 190, 210, 230]),
                model.EntryPoint(54.0, 80.5, 230,
                                 [140, 160, 180, 200, 220, 240]),
                model.EntryPoint(53.0, 60.0, 260,
                                 [140, 160, 180, 200, 220, 240]),
                model.EntryPoint(66.0, 39.0, 290, [140, 160, 180, 200, 220]),
                model.EntryPoint(64.4, 22.0, 320, [140, 160, 180, 200, 220]),
                model.EntryPoint(46.0, 7.0, 320,
                                 [140, 160, 180, 200, 220, 240, 260])
            ]
        else:
            self.entrypoints = [model.EntryPoint(10, 51, 90, [150])]
Beispiel #8
0
    def _getBetaFirstOctant(self, theta, frequency, alphaMtx, lowResCell):
        """
    theta must be <= pi/4
    alphaMtx must be referred to the first octant.
    """
        def catValueToArrayIfOutside(array, val):
            minarr, maxarr = np.min(array), np.max(array)
            valGtArray = (val > array).all() and not abutls.isClose(
                val, maxarr)
            valLtArray = (val < array).all() and not abutls.isClose(
                val, minarr)

            if not (valGtArray or valLtArray):
                return array

            ornt = np.sign(array[-1] - array[0])
            compr = 1 if valGtArray else -1
            pos = ornt * compr

            if pos == 1.:
                return np.concatenate((array, np.array([val])))
            else:
                return np.concatenate((np.array([val]), array))

        if len(alphaMtx.alphas.shape) < 2:
            return 1
        normTheta = theta + np.pi / 2.
        normSlope = np.tan(normTheta) if not abutls.isClose(
            theta, 0, thetaTolerance) else np.nan

        cell = alphaMtx.polygon
        try:
            crds = list(cell.boundary.coords)
        except:
            # skipping the computation for strange geometries without coordinate sequence
            return 1
        cellxs = np.array([p[0] for p in crds])
        cellys = np.array([p[1] for p in crds])

        #getting y coords and extending them to cover the whole cell
        ys = alphaMtx.ys[:]
        ys = catValueToArrayIfOutside(ys, max(cellys))
        ys = catValueToArrayIfOutside(ys, min(cellys))
        miny = min(ys)
        maxy = max(ys)

        #getting x coords and extending them
        #so that trasverse polygons can cover the whole cell
        xs = alphaMtx.xs[:]
        xs = catValueToArrayIfOutside(xs, min(cellxs))
        dx = abs(xs[-1] - xs[-2] if len(xs) else max(cellxs) - min(cellxs))
        if not np.isnan(normSlope):
            #normslope should be never 0
            polyxproj = cellxs + (miny - cellys) / normSlope
            subpolymaxx = max(polyxproj)
            mxxxs = max(xs)
            while mxxxs < subpolymaxx:
                mxxxs = min(mxxxs + dx, subpolymaxx)
                xs = catValueToArrayIfOutside(xs, mxxxs)
        elif max(xs) < max(cellxs):
            xs = np.concatenate((xs, np.array([max(cellxs)])))

        minx = min(xs)
        maxx = max(xs)
        alphas = []
        dys = []

        def getPolygonDY(pl):
            crds = list(pl.boundary.coords)
            plxs = np.array([c[0] for c in crds])
            plys = np.array([c[1] for c in crds])
            plprojy = plys + (maxx - plxs) * np.tan(theta)
            dy = max(plprojy) - min(plprojy)
            return dy

        #looping on the trasverse polygons
        lminx = min(xs)
        lmaxx = max(xs)
        lenx = min(len(xs), self.maxSubSections)
        #guaranteeing that xs are equally spaced
        prevsubcell = None
        loopxs = np.linspace(lminx, lmaxx, lenx)
        loopxs = loopxs[loopxs > min(loopxs)]
        for x in loopxs:
            if not np.isnan(normSlope):
                #normSlope is negative
                y = miny - normSlope * (x - minx)
                pxs = [minx, x, minx, minx]
                pys = [miny, miny, y, miny]
            else:
                pxs = [minx, x, x, minx, minx]
                pys = [miny, miny, maxy, maxy, miny]
            pxy = [p for p in zip(pxs, pys)]
            poly = g.Polygon(pxy)
            subcell = cell.intersection(poly)
            if abutls.isClose(subcell.area,
                              0.) or (prevsubcell and abutls.isClose(
                                  prevsubcell.area, subcell.area)):
                #the trasverse polygon does not cross the cell yet
                continue
            if subcell.__class__ == g.Polygon:
                #the intersection between the trasverse polygon and the cell
                #is a polygon. Computing the alpha
                alphaEst = abSingleCellAlphaEstimator(subcell,
                                                      alphaMtx,
                                                      kshape=self.kshape,
                                                      recalibFactor=1)
                alphaEst.obstrAlleviationEnabled = self.obstrAlleviationEnabled
                alpha = alphaEst.computeAlpha(theta, frequency)
                totDy = getPolygonDY(subcell)
            elif subcell.__class__ in [g.GeometryCollection, g.MultiPolygon]:
                #the intersection between the trasverse polygon and the cell
                #is a collection of polygons (possible for concave cells).
                #Computing the alpha for each subpolygon, and
                #weight-averaging to compute the overall alpha
                totDy = 0
                for pl in subcell:
                    plobstrs = []
                    pldys = []
                    if pl.__class__ == g.Polygon:
                        alphaEst = abSingleCellAlphaEstimator(
                            pl, alphaMtx, kshape=self.kshape)
                        alphaEst.obstrAlleviationEnabled = self.obstrAlleviationEnabled
                        palpha = alphaEst.computeAlpha(theta, frequency)
                        dy = getPolygonDY(pl)
                        plobstrs.append(1 - palpha)
                        pldys.append(dy)
                        totDy += dy
                plobstrs = np.array(plobstrs)
                pldys = np.array(pldys)
                totobstr = plobstrs * pldys / sum(pldys)
                alpha = 1 - totobstr
            else:
                raise TypeError('Unsupported geometry object: ' +
                                str(subcell.__class__))
            alphas.append(alpha)
            dys.append(totDy)
            prevsubcell = subcell
        alphas = np.array(alphas)
        #weighting to the width or to dys is the same thing
        widths = np.array(dys)
        avgWidth = np.mean(widths)
        weights = widths[:]
        weights[weights > avgWidth] = avgWidth
        beta = np.sum(alphas * weights) / np.sum(weights)
        return beta
Beispiel #9
0
        sys.exit("The current script is working only with 2D meshes !")

    resin.get_time()

    # Define zones from polylines
    print("~> Lecture des polylignes et recherche des noeuds inclus dedans")
    masks = []
    with BlueKenueRead_i2s(args.i2s_name) as in_i2s:
        in_i2s.read_header()
        for i, (value, polyline) in enumerate(in_i2s.iter_on_polylines()):
            if not polyline.is_valid:
                sys.exit("ERROR: polyline {} is not valid (probably because it intersects itself) !".format(i))
            if not polyline.is_ring:
                sys.exit("ERROR: polyline {} is not closed".format(i))

            polygon = geo.Polygon(polyline)  # only Polygon has `contains` method

            # Construction du tableau de maskage (avec des booléens)
            nodes_included = np.zeros(resin.nnode2d, dtype=bool)
            for j in range(resin.nnode):  # iterate over all nodes
                node = j + 1
                (x, y) = resin.get_coord(node)
                pt = geo.Point(x,y)
                if polygon.contains(pt):
                    nodes_included[j] = True

            nb_nodes_included = int(np.sum(nodes_included))
            print("Polyligne {} (avec {} points et une valeur à {}) contient {} noeuds".format(i, len(polyline.coords), value, nb_nodes_included))

            mask = Mask(value, polyline, nodes_included)
            masks.append(mask)
Beispiel #10
0
    plt.show()

#Subset to bounding box of basin
demdata = rio.open('Mads_1.tif')
show((demdata, 1), cmap='terrain')
print(demdata.crs, basin.crs)
# print(basin.boundary.to_json())
basin_coords = getFeatures(basin)
minx, miny, maxx, maxy = basin.total_bounds
p1 = geometry.Point(minx, miny)
p2 = geometry.Point(minx, maxy)
p3 = geometry.Point(maxx, maxy)
p4 = geometry.Point(maxx, miny)

pointlist = [p1, p2, p3, p4, p1]
clipBnd = geometry.Polygon(pointlist)
clip = gpd.GeoSeries(clipBnd)

out_img, out_transform = mask(dataset=demdata, shapes=clip, crop=True)
out_meta = demdata.meta
out_meta.update({
    "driver": "GTiff",
    "height": out_img.shape[1],
    "width": out_img.shape[2],
    "transform": out_transform
})
out_raster = rio.open('Mads_clip_1.tif', "w", **out_meta)
out_raster.write(out_img)

out_raster.close()
demdata_clip = rio.open('Mads_clip_1.tif')
Beispiel #11
0
def get_rgi_df(reset=False):
    """This function prepares a kind of `fake` RGI file, with the updated
    geometries for ITMIX.
    """

    # This makes an RGI dataframe with all ITMIX + WGMS + GTD glaciers
    RGI_DIR = utils.get_rgi_dir()

    df_rgi_file = os.path.join(DATA_DIR, 'itmix', 'itmix_rgi_shp.pkl')
    if os.path.exists(df_rgi_file) and not reset:
        rgidf = pd.read_pickle(df_rgi_file)
    else:
        linkf = os.path.join(DATA_DIR, 'itmix', 'itmix_rgi_links.pkl')
        df_itmix = pd.read_pickle(linkf)

        f, d = utils.get_wgms_files()
        wgms_df = pd.read_csv(f)

        f = utils.get_glathida_file()
        gtd_df = pd.read_csv(f)

        divides = []
        rgidf = []
        _rgi_ids_for_overwrite = []
        for i, row in df_itmix.iterrows():

            log.info('Prepare RGI df for ' + row.name)

            # read the rgi region
            rgi_shp = find_path(RGI_DIR, row['rgi_reg'] + '_rgi50_*.shp')
            rgi_df = salem.read_shapefile(rgi_shp, cached=True)

            rgi_parts = row.T['rgi_parts_ids']
            sel = rgi_df.loc[rgi_df.RGIId.isin(rgi_parts)].copy()

            # use the ITMIX shape where possible
            if row.name in [
                    'Hellstugubreen', 'Freya', 'Aqqutikitsoq', 'Brewster',
                    'Kesselwandferner', 'NorthGlacier', 'SouthGlacier',
                    'Tasman', 'Unteraar', 'Washmawapta', 'Columbia'
            ]:
                shf = find_path(SEARCHD, '*_' + row.name + '*.shp')
                shp = salem.read_shapefile(shf)
                if row.name == 'Unteraar':
                    shp = shp.iloc[[-1]]
                if 'LineString' == shp.iloc[0].geometry.type:
                    shp.loc[shp.index[0],
                            'geometry'] = shpg.Polygon(shp.iloc[0].geometry)
                if shp.iloc[0].geometry.type == 'MultiLineString':
                    # Columbia
                    geometry = shp.iloc[0].geometry
                    parts = list(geometry)
                    for p in parts:
                        assert p.type == 'LineString'
                    exterior = shpg.Polygon(parts[0])
                    # let's assume that all other polygons are in fact interiors
                    interiors = []
                    for p in parts[1:]:
                        assert exterior.contains(p)
                        interiors.append(p)
                    geometry = shpg.Polygon(parts[0], interiors)
                    assert 'Polygon' in geometry.type
                    shp.loc[shp.index[0], 'geometry'] = geometry

                assert len(shp) == 1
                area_km2 = shp.iloc[0].geometry.area * 1e-6
                shp = salem.gis.transform_geopandas(shp)
                shp = shp.iloc[0].geometry
                sel = sel.iloc[[0]]
                sel.loc[sel.index[0], 'geometry'] = shp
                sel.loc[sel.index[0], 'Area'] = area_km2
            elif row.name == 'Urumqi':
                # ITMIX Urumqi is in fact two glaciers
                shf = find_path(SEARCHD, '*_' + row.name + '*.shp')
                shp2 = salem.read_shapefile(shf)
                assert len(shp2) == 2
                for k in [0, 1]:
                    shp = shp2.iloc[[k]].copy()
                    area_km2 = shp.iloc[0].geometry.area * 1e-6
                    shp = salem.gis.transform_geopandas(shp)
                    shp = shp.iloc[0].geometry
                    assert sel.loc[sel.index[k],
                                   'geometry'].contains(shp.centroid)
                    sel.loc[sel.index[k], 'geometry'] = shp
                    sel.loc[sel.index[k], 'Area'] = area_km2
                assert len(sel) == 2
            elif len(rgi_parts) > 1:
                # Ice-caps. Make divides
                # First we gather all the parts:
                sel = rgi_df.loc[rgi_df.RGIId.isin(rgi_parts)].copy()
                # Make the multipolygon for the record
                multi = shpg.MultiPolygon([g for g in sel.geometry])
                # update the RGI attributes. We take a dummy rgi ID
                new_area = np.sum(sel.Area)
                found = False
                for i in range(len(sel)):
                    tsel = sel.iloc[[i]].copy()
                    if 'Multi' in tsel.loc[tsel.index[0], 'geometry'].type:
                        continue
                    else:
                        found = True
                        sel = tsel
                        break
                if not found:
                    raise RuntimeError()

                inif = 0.
                add = 1e-5
                if row.name == 'Devon':
                    inif = 0.001
                    add = 1e-4
                while True:
                    buff = multi.buffer(inif)
                    if 'Multi' in buff.type:
                        inif += add
                    else:
                        break
                x, y = multi.centroid.xy
                if 'Multi' in buff.type:
                    raise RuntimeError
                sel.loc[sel.index[0], 'geometry'] = buff
                sel.loc[sel.index[0], 'Area'] = new_area
                sel.loc[sel.index[0], 'CenLon'] = np.asarray(x)[0]
                sel.loc[sel.index[0], 'CenLat'] = np.asarray(y)[0]

                # Divides db
                div_sel = dict()
                for k, v in sel.iloc[0].iteritems():
                    if k == 'geometry':
                        div_sel[k] = multi
                    elif k == 'RGIId':
                        div_sel['RGIID'] = v
                    else:
                        div_sel[k] = v
                divides.append(div_sel)
            else:
                pass

            # add glacier name to the entity
            name = ['I:' + row.name] * len(sel)
            add_n = sel.RGIId.isin(wgms_df.RGI_ID.values)
            for z, it in enumerate(add_n.values):
                if it:
                    name[z] = 'W-' + name[z]
            add_n = sel.RGIId.isin(gtd_df.RGI_ID.values)
            for z, it in enumerate(add_n.values):
                if it:
                    name[z] = 'G-' + name[z]
            sel.loc[:, 'Name'] = name
            rgidf.append(sel)

            # Add divides to the original one
            adf = pd.DataFrame(divides)
            adf.to_pickle(cfg.PATHS['itmix_divs'])

        log.info('N glaciers ITMIX: {}'.format(len(rgidf)))

        # WGMS glaciers which are not already there
        # Actually we should remove the data of those 7 to be honest...
        f, d = utils.get_wgms_files()
        wgms_df = pd.read_csv(f)
        wgms_df = wgms_df.loc[~wgms_df.RGI_ID.isin(_rgi_ids_for_overwrite)]

        log.info('N glaciers WGMS: {}'.format(len(wgms_df)))
        for i, row in wgms_df.iterrows():
            rid = row.RGI_ID
            reg = rid.split('-')[1].split('.')[0]
            # read the rgi region
            rgi_shp = find_path(RGI_DIR, reg + '_rgi50_*.shp')
            rgi_df = salem.read_shapefile(rgi_shp, cached=True)

            sel = rgi_df.loc[rgi_df.RGIId.isin([rid])].copy()
            assert len(sel) == 1

            # add glacier name to the entity
            _cor = row.NAME.replace('/', 'or').replace('.',
                                                       '').replace(' ', '-')
            name = ['W:' + _cor] * len(sel)
            add_n = sel.RGIId.isin(gtd_df.RGI_ID.values)
            for z, it in enumerate(add_n.values):
                if it:
                    name[z] = 'G-' + name[z]
            for n in name:
                if len(n) > 48:
                    raise
            sel.loc[:, 'Name'] = name
            rgidf.append(sel)

        _rgi_ids_for_overwrite.extend(wgms_df.RGI_ID.values)

        # GTD glaciers which are not already there
        # Actually we should remove the data of those 2 to be honest...
        gtd_df = gtd_df.loc[~gtd_df.RGI_ID.isin(_rgi_ids_for_overwrite)]
        log.info('N glaciers GTD: {}'.format(len(gtd_df)))

        for i, row in gtd_df.iterrows():
            rid = row.RGI_ID
            reg = rid.split('-')[1].split('.')[0]
            # read the rgi region
            rgi_shp = find_path(RGI_DIR, reg + '_rgi50_*.shp')
            rgi_df = salem.read_shapefile(rgi_shp, cached=True)

            sel = rgi_df.loc[rgi_df.RGIId.isin([rid])].copy()
            assert len(sel) == 1

            # add glacier name to the entity
            _corname = row.NAME.replace('/',
                                        'or').replace('.',
                                                      '').replace(' ', '-')
            name = ['G:' + _corname] * len(sel)
            for n in name:
                if len(n) > 48:
                    raise
            sel.loc[:, 'Name'] = name
            rgidf.append(sel)

        # Save for not computing each time
        rgidf = pd.concat(rgidf)
        rgidf.to_pickle(df_rgi_file)

    return rgidf
Beispiel #12
0
vertical_rebars = vRebar(vAttribute)
column = col_conc(vAttribute)
pureRebars = check_range(vertical_rebars, vAttribute)
ties = tie(vertical_rebars, vAttribute, pureRebars)
links = stirrups(vertical_rebars, vAttribute, pureRebars)

main_links = links[0]
side_links = links[1]
""" Plotting """
# Inside Links
for main_link in main_links:
    if len(gc_curve(main_link)) == 2:
        m_line = geometry.LineString(gc_curve(main_link))
        plt.plot(gc_point(main_link)[0], gc_point(main_link)[1])
    else:
        m_poly = geometry.Polygon(gc_curve(main_link))
        m_curve = m_poly.exterior.xy
        plt.plot(m_curve[0], m_curve[1], 'bo-')

for side_link in side_links:
    s_poly = geometry.Polygon(gc_curve(side_link))
    s_curve = s_poly.exterior.xy
    plt.plot(s_curve[0], s_curve[1], 'bo-')

# Column Concrete Curve
column_curves = column[0]
column_plot_curve = column_curves.exterior.xy
plt.plot(column_plot_curve[0], column_plot_curve[1], 'bo-')

# Main Stirrups
basic_vertical_points = vertical_rebars['bPoints'] + vertical_rebars[
Beispiel #13
0
def query_osm_polygons(osm_tag, bound_poly: sg.Polygon) -> gpd.GeoDataFrame:
    """
    Perform blocking query on OpenStreetMaps Overpass API for objects with the passed tag.
    Retain only polygons and store in GeoPandas GeoDataFrame
    :param osm_tag: OSM tag to query
    :param shapely.Polygon bound_poly: bounding box around requested area in EPSG:4326 coordinates
    """
    from time import time

    t0 = time()
    bounds = bound_poly.bounds
    overpass_urls = ["https://overpass.kumi.systems/api/interpreter", "https://lz4.overpass-api.de/api/interpreter",
                     "https://z.overpass-api.de/api/interpreter", "https://overpass.openstreetmap.ru/api/interpreter",
                     "https://overpass.openstreetmap.fr/api/interpreter",
                     "https://overpass.nchc.org.tw/api/interpreter"]
    for i, url in enumerate(overpass_urls):
        resp, data = query_request(overpass_urls[i], osm_tag, bounds)
        if resp.status_code == 200:
            break
        else:
            print(resp.status_code)

    print("OSM query took ", time() - t0)

    ways = {o['id']: o['nodes'] for o in data['elements'] if o['type'] == 'way'}
    nodes = {o['id']: (o['lon'], o['lat']) for o in data['elements'] if o['type'] == 'node'}
    relations = [o for o in data['elements'] if o['type'] == 'relation']

    used_ways = []
    df_list = []
    for rel in relations:
        # Get all the id and role (inner/outer) of all relation members that are ways
        rw = [(w['ref'], w['role']) for w in rel['members'] if w['type'] == 'way']
        rel_outer_rings = []
        rel_inner_rings = []

        unclosed_ways = []
        # Iterate each way in relation
        for way_id, role in rw:
            # Find the way ID in ways
            if way_id in ways:
                way = ways[way_id]
                # Find the vertices (AKA nodes) that make up each polygon
                locs = [nodes[i] for i in way]
                start = locs[0]
                end = locs[-1]
                # Check if way is already a closed ring
                if start == end:
                    # Simplest case where way is already valid ring
                    # Create linear ring from vertices and classify by role
                    ring = sg.LinearRing(locs)
                    if role == 'inner':
                        rel_inner_rings.append(ring)
                    else:
                        rel_outer_rings.append(ring)
                elif len(unclosed_ways) > 0:
                    consumed = False
                    # Way is not a valid ring
                    # Check if there are any matching dangling nodes in other unclosed ways
                    for _, uw in unclosed_ways:
                        uw_start = uw[0]
                        uw_end = uw[-1]
                        if uw_end == start:
                            # If the start of this way is the same as the end of the unclosed way
                            # Append this ways nodes with duplicate coord sliced off
                            uw += locs
                            consumed = True
                        elif uw_end == end:
                            # If the end of this way is the same as the end of the unclosed way
                            # Append this ways nodes reversed with the duplicate coord sliced off
                            uw += locs[::-1]
                            consumed = True
                        elif uw_start == end:
                            # If the end of this way is the same as the start of the unclosed way
                            # Prepend this ways nodes with the duplicate coord sliced off
                            uw[0:0] = locs
                            consumed = True
                        elif uw_start == start:
                            # If the start of this way is the same as the start of the unclosed way
                            # Prepend this ways nodes reversed with the duplicate coord sliced off
                            uw[0:0] = locs[::-1]
                            consumed = True

                        # Check if this way has been used
                        if consumed:
                            # uw_start/end may have changed by now
                            if uw[0] == uw[-1]:
                                # Made a closed ring
                                # Create linear ring from vertices and classify by role
                                ring = sg.LinearRing(uw)
                                if role == 'inner':
                                    rel_inner_rings.append(ring)
                                else:
                                    rel_outer_rings.append(ring)
                                # Remove from unclosed ways
                                unclosed_ways.remove((role, uw))
                            break

                    # unclosed_ways is never empty in this if block, therefore loop is always entered
                    # and consumed is always defined
                    if not consumed:
                        # This way is currently isolated so store as another unclosed way
                        unclosed_ways.append((role, locs))
                else:
                    unclosed_ways.append((role, locs))
                # Store used ways to prevent double processing later on
                # Do not pop them out as other relations could be using them!
                used_ways.append(way_id)
        # Link up remaining unclosed polys
        if unclosed_ways:
            for (r1, uw1), (r2, uw2) in combinations(unclosed_ways, 2):
                consumed = False
                uw1_start = uw1[0]
                uw1_end = uw1[-1]
                uw2_start = uw2[0]
                uw2_end = uw2[-1]
                if uw1_end == uw2_start:
                    # If the start of this way is the same as the end of the unclosed way
                    # Append this ways nodes with duplicate coord sliced off
                    uw1 += uw2[1:]
                    consumed = True
                elif uw1_end == uw2_end:
                    # If the end of this way is the same as the end of the unclosed way
                    # Append this ways nodes reversed with the duplicate coord sliced off
                    uw1 += uw2[:0:-1]
                    consumed = True
                elif uw1_start == uw2_end:
                    # If the end of this way is the same as the start of the unclosed way
                    # Prepend this ways nodes with the duplicate coord sliced off
                    uw1[0:0] = uw2[:-1]
                    consumed = True
                elif uw1_start == uw2_start:
                    # If the start of this way is the same as the start of the unclosed way
                    # Prepend this ways nodes reversed with the duplicate coord sliced off
                    uw1[0:0] = uw2[:0:-1]
                    consumed = True

                # Check if this way has been used
                if consumed:
                    unclosed_ways.remove((r2, uw2))
                    # uw_start/end may have changed by now
                    if uw1[0] == uw1[-1]:
                        # Made a closed ring
                        # Create linear ring from vertices and classify by role
                        ring = sg.LinearRing(uw1)
                        if r1 == 'inner':
                            rel_inner_rings.append(ring)
                        else:
                            rel_outer_rings.append(ring)
                        # Remove from unclosed ways
                        unclosed_ways.remove((r1, uw1))
                        if not unclosed_ways:
                            break

        # Combine outer rings to a single ring
        if len(rel_outer_rings) > 1:
            coords = []
            for c in rel_outer_rings:
                coords += c.coords
            outer_ring = sg.LinearRing(coords)
        elif len(rel_outer_rings) < 1:
            if len(unclosed_ways) == 1:
                outer_ring = sg.LinearRing(unclosed_ways[0][1])
            else:
                print("No outer rings in multipolygon")
                continue
        else:
            outer_ring = rel_outer_rings[0]
        poly = sg.Polygon(shell=outer_ring, holes=rel_inner_rings)
        df_list.append(poly)

    # Iterate polygons ways
    for way_id, element in ways.items():
        if way_id in used_ways:
            continue
        # Find the vertices (AKA nodes) that make up each polygon
        locs = [nodes[i] for i in element]
        # Not a polygon if less than 3 vertices, so ignore
        if len(locs) < 3:
            continue
        # Add Shapely polygon to list
        poly = sg.Polygon(locs)
        df_list.append(poly)
    # OSM uses Web Mercator so set CRS without projecting as CRS is known
    poly_df = gpd.GeoDataFrame(df_list, columns=['geometry']).set_crs('EPSG:4326')
    poly_df.drop_duplicates(subset='geometry', inplace=True, ignore_index=True)
    return poly_df
def create(output_path):
    """
    This script creates test fixtures for the Île-de-France / France pipeline.

    For that, we generate a couple of artificial data sets that have the same
    structure as the initial French data. We deliberately do *not* base this script
    on the actual data sets (e.g., to filter and reduce them), but generate them
    from scratch. This way, we can extend and improve these artificial files step
    by step to test specific features of the pipeline.

    In this artificial France, we have two regions: 10 and 20.

        +---------+---------+
        |         |         |
        |   10    |   20    | 50km
        |         |         |
        +---------+---------+
           50km       50km

    Both regions are divided in four departments 1A, 1B, 1C, 1D and 2A, 2B, 2C, 2D:

        +----+----+              +----+----+
        | 1A | 1B | 25km         | 2A | 2B | 25km
        +----+----+              +----+----+
        | 1C | 1D | 25km         | 2C | 2D | 25km
        +----+----+              +----+----+
         25km 25km                25km 25km

    Each department is divided in 25 municipalities, e.g. 1A001 to 1A025, which are boxes
    of 5km x 5km:

        001 002 003 004 005
        006 007 008 009 010
        011 012 013 014 015
        016 017 018 019 020
        021 022 023 024 025

    The municipalities are furthermore divided into IRIS of size 500m x 500m. This
    gives 10x10 = 100 IRIS per municipality, e.g. 1A00250001 to 1A00250100. Only
    few municipalities are covered by IRIS:
    - 1B013, 1B014, 1B018, 1B019
    - 2D007, 2D008, 2D012, 2D013
    """

    BPE_OBSERVATIONS = 500
    HTS_HOUSEHOLDS = 300
    HTS_HOUSEHOLD_MEMBERS = 3

    CENSUS_HOUSEHOLDS = 300
    CENSUS_HOUSEHOLD_MEMBERS = 3

    COMMUTE_FLOW_OBSERVATIONS = 500
    ADDRESS_OBSERVATIONS = 2000
    SIRENE_OBSERVATIONS = 2000

    import geopandas as gpd
    import pandas as pd
    import shapely.geometry as geo
    import numpy as np
    import os

    random = np.random.RandomState(0)

    REGION_LENGTH = 50 * 1e3
    DEPARTMENT_LENGTH = 25 * 1e3
    MUNICIPALITY_LENGTH = 5 * 1e3
    IRIS_LENGTH = 500

    anchor_x = 638589
    anchor_y = 6861081

    # Define internal zoing system
    print("Creating zoning system ...")
    df = []

    WITH_IRIS = set([
        "1B013", "1B014", "1B018", "1B019", "2D007", "2D008", "2D012", "2D013"
    ])

    for region_column in np.arange(2):
        region_prefix = region_column + 1
        region_number = region_prefix * 10

        region_x = anchor_x + region_column * REGION_LENGTH
        region_y = anchor_y + 0

        for department_row in np.arange(2):
            for department_column in np.arange(2):
                department_letter = {
                    (0, 0): "A",
                    (0, 1): "B",
                    (1, 0): "C",
                    (1, 1): "D"
                }[(department_row, department_column)]

                department_name = "%d%s" % (region_prefix, department_letter)

                department_x = region_x + department_column * DEPARTMENT_LENGTH
                department_y = region_y - department_row * DEPARTMENT_LENGTH

                for municipality_index in np.arange(25):
                    municipality_name = "%s%03d" % (department_name,
                                                    municipality_index + 1)

                    municipality_row = municipality_index // 5
                    municipality_column = municipality_index % 5

                    municipality_x = department_x + municipality_column * MUNICIPALITY_LENGTH
                    municipality_y = department_y - municipality_row * MUNICIPALITY_LENGTH

                    if municipality_name in WITH_IRIS:
                        for iris_index in np.arange(100):
                            iris_name = "%s%04d" % (municipality_name,
                                                    iris_index + 1)

                            iris_row = iris_index // 10
                            iris_column = iris_index % 10

                            iris_x = municipality_x + iris_column * IRIS_LENGTH
                            iris_y = municipality_y - iris_row * IRIS_LENGTH

                            iris_polygon = geo.Polygon([
                                (iris_x, iris_y),
                                (iris_x + IRIS_LENGTH, iris_y),
                                (iris_x + IRIS_LENGTH, iris_y - IRIS_LENGTH),
                                (iris_x, iris_y - IRIS_LENGTH)
                            ])

                            df.append(
                                dict(region=region_number,
                                     department=department_name,
                                     municipality=municipality_name,
                                     iris=iris_name,
                                     geometry=iris_polygon))

                    else:
                        municipality_polygon = geo.Polygon([
                            (municipality_x, municipality_y),
                            (municipality_x + MUNICIPALITY_LENGTH,
                             municipality_y),
                            (municipality_x + MUNICIPALITY_LENGTH,
                             municipality_y - MUNICIPALITY_LENGTH),
                            (municipality_x,
                             municipality_y - MUNICIPALITY_LENGTH)
                        ])

                        iris_name = "%s0000" % municipality_name

                        df.append(
                            dict(region=region_number,
                                 department=department_name,
                                 municipality=municipality_name,
                                 iris=iris_name,
                                 geometry=municipality_polygon))

    df = pd.DataFrame.from_records(df)
    df = gpd.GeoDataFrame(df, crs="EPSG:2154")

    # Dataset: IRIS zones
    # Required attributes: CODE_IRIS, INSEE_COM, geometry
    print("Creating IRIS zones ...")

    df_iris = df.copy()
    df_iris = df_iris[[
        "iris", "municipality", "geometry"
    ]].rename(columns=dict(iris="CODE_IRIS", municipality="INSEE_COM"))

    os.mkdir("%s/iris_2017" % output_path)
    df_iris.to_file("%s/iris_2017/CONTOURS-IRIS.shp" % output_path)

    # Dataset: Codes
    # Required attributes: CODE_IRIS, DEPCOM, DEP, REG
    print("Creating codes ...")

    df_codes = df.copy()
    df_codes = df_codes[["iris", "municipality", "department",
                         "region"]].rename(columns=dict(iris="CODE_IRIS",
                                                        municipality="DEPCOM",
                                                        department="DEP",
                                                        region="REG"))

    os.mkdir("%s/codes_2017" % output_path)
    df_codes.to_excel("%s/codes_2017/reference_IRIS_geo2017.xls" % output_path,
                      sheet_name="Emboitements_IRIS",
                      startrow=5,
                      index=False)

    # Dataset: Aggregate census
    # Required attributes: IRIS, COM, DEP, REG, P15_POP
    print("Creating aggregate census ...")

    df_population = df.copy()
    df_population = df_population[[
        "iris", "municipality", "department", "region"
    ]].rename(columns=dict(
        iris="IRIS", municipality="COM", department="DEP", region="REG"))

    # Set all population to fixed number
    df_population["P15_POP"] = 120.0

    os.mkdir("%s/rp_2015" % output_path)
    df_population.to_excel("%s/rp_2015/base-ic-evol-struct-pop-2015.xls" %
                           output_path,
                           sheet_name="IRIS",
                           startrow=5,
                           index=False)

    # Dataset: BPE
    # Required attributes: DCIRIS, LAMBERT_X, LAMBERT_Y, TYPEQU, DEPCOM, DEP
    print("Creating BPE ...")

    # We put enterprises at the centroid of the shapes
    observations = BPE_OBSERVATIONS
    categories = np.array(["A", "B", "C", "D", "E", "F", "G"])

    df_selection = df.iloc[random.randint(0, len(df),
                                          size=observations)].copy()
    df_selection["DCIRIS"] = df_selection["iris"]
    df_selection["DEPCOM"] = df_selection["municipality"]
    df_selection["DEP"] = df_selection["department"]
    df_selection["LAMBERT_X"] = df_selection["geometry"].centroid.x
    df_selection["LAMBERT_Y"] = df_selection["geometry"].centroid.y
    df_selection["TYPEQU"] = categories[random.randint(0,
                                                       len(categories),
                                                       size=len(df_selection))]

    # Deliberately set coordinates for some to NaN
    df_selection["LAMBERT_X"].iloc[-10:] = np.nan
    df_selection["LAMBERT_Y"].iloc[-10:] = np.nan

    import pysal

    types = [("C", 10, 0), ("C", 12, 0), ("C", 12, 0), ("C", 4, 0),
             ("C", 5, 0), ("C", 3, 0)]
    columns = ["DCIRIS", "LAMBERT_X", "LAMBERT_Y", "TYPEQU", "DEPCOM", "DEP"]

    os.mkdir("%s/bpe_2018" % output_path)
    db = pysal.open("%s/bpe_2018/bpe18_ensemble_xy.dbf" % output_path, "w")

    db.header = columns
    db.field_spec = types

    for index, row in df_selection[columns].iterrows():
        db.write(row)

    db.close()

    # Dataset: Tax data
    # Required attributes: CODGEO, D115, ..., D915
    print("Creating FILOSOFI ...")

    df_income = df.drop_duplicates("municipality")[[
        "municipality"
    ]].rename(columns=dict(municipality="CODGEO"))
    df_income["D115"] = 9122.0
    df_income["D215"] = 11874.0
    df_income["D315"] = 14430.0
    df_income["D415"] = 16907.0
    df_income["Q215"] = 22240.0
    df_income["D615"] = 22827.0
    df_income["D715"] = 25699.0
    df_income["D815"] = 30094.0
    df_income["D915"] = 32303.0

    # Deliberately remove some of them
    df_income = df_income[~df_income["CODGEO"].isin(["1A015", "1A016"])]

    # Deliberately only provide median for some
    f = df_income["CODGEO"].isin(["1D002", "1D005"])
    df_income.loc[f, "D215"] = np.nan

    os.mkdir("%s/filosofi_2015" % output_path)
    df_income.to_excel("%s/filosofi_2015/FILO_DISP_COM.xls" % output_path,
                       sheet_name="ENSEMBLE",
                       startrow=5,
                       index=False)

    # Data set: ENTD
    print("Creating ENTD ...")

    data = dict(
        Q_MENAGE=[],
        Q_TCM_MENAGE=[],
        Q_INDIVIDU=[],
        Q_TCM_INDIVIDU=[],
        K_DEPLOC=[],
    )

    for household_index in range(HTS_HOUSEHOLDS):
        household_id = household_index

        region = random.choice([10, 20])
        department = "%d%s" % (region // 10, random.choice(
            ["A", "B", "C", "D"]))

        data["Q_MENAGE"].append(
            dict(DEP=department,
                 idENT_MEN=household_id,
                 PONDV1=1.0,
                 RG=region,
                 V1_JNBVELOADT=random.randint(4),
                 V1_JNBVEH=random.randint(3),
                 V1_JNBMOTO=random.randint(2),
                 V1_JNBCYCLO=0))

        data["Q_TCM_MENAGE"].append(
            dict(NPERS=3,
                 PONDV1=1.0,
                 DEP=department,
                 idENT_MEN=household_id,
                 RG=region,
                 TrancheRevenuMensuel=random.choice([
                     "Moins de 400", "De 400", "De 600", "De 800", "De 1 000",
                     "De 1 200", "De 1 500", "De 1800", "De 2 000", "De 2 500",
                     "De 3 000", "De 4 000", "De 6 000", "10 000"
                 ])))

        for person_index in range(HTS_HOUSEHOLD_MEMBERS):
            person_id = household_id * 1000 + person_index
            studies = random.random_sample() < 0.3

            data["Q_INDIVIDU"].append(
                dict(
                    IDENT_IND=person_id,
                    idENT_MEN=household_id,
                    RG=region,
                    V1_GPERMIS=random.choice([1, 2]),
                    V1_GPERMIS2R=random.choice([1, 2]),
                    V1_ICARTABON=random.choice([1, 2]),
                ))

            data["Q_TCM_INDIVIDU"].append(
                dict(AGE=random.randint(90),
                     SEXE=random.choice([1, 2]),
                     CS24=random.randint(8) * 10,
                     DEP=department,
                     ETUDES=1 if studies else 2,
                     IDENT_IND=person_id,
                     IDENT_MEN=household_id,
                     PONDV1=1.0,
                     SITUA=random.choice([1, 2])))

            if person_index == 0:  # Only one person per household has activity chain
                home_department = department
                work_department = random.choice(df["department"].unique())

                purpose = "1.11" if studies else "9"
                mode = random.choice(["1", "2", "2.20", "2.23", "4"])

                data["K_DEPLOC"].append(
                    dict(
                        IDENT_IND=person_id,
                        V2_MMOTIFDES=purpose,
                        V2_MMOTIFORI=1,
                        V2_TYPJOUR=1,
                        V2_MORIHDEP="08:00:00",
                        V2_MDESHARR="09:00:00",
                        V2_MDISTTOT=3,  # km
                        IDENT_JOUR=1,
                        V2_MTP=mode,
                        V2_MDESDEP=work_department,
                        V2_MORIDEP=home_department,
                        NDEP=4,
                        V2_MOBILREF=1,
                        PONDKI=3.0))

                data["K_DEPLOC"].append(
                    dict(
                        IDENT_IND=person_id,
                        V2_MMOTIFDES=2,
                        V2_MMOTIFORI=purpose,
                        V2_TYPJOUR=1,
                        V2_MORIHDEP="17:00:00",
                        V2_MDESHARR="17:30:00",
                        V2_MDISTTOT=3,  # km
                        IDENT_JOUR=1,
                        V2_MTP=mode,
                        V2_MDESDEP=home_department,
                        V2_MORIDEP=work_department,
                        NDEP=4,
                        V2_MOBILREF=1,
                        PONDKI=3.0))

                data["K_DEPLOC"].append(
                    dict(
                        IDENT_IND=person_id,
                        V2_MMOTIFDES=1,
                        V2_MMOTIFORI=2,
                        V2_TYPJOUR=1,
                        V2_MORIHDEP="18:00:00",
                        V2_MDESHARR="19:00:00",
                        V2_MDISTTOT=3,  # km
                        IDENT_JOUR=1,
                        V2_MTP=mode,
                        V2_MDESDEP=home_department,
                        V2_MORIDEP=home_department,
                        NDEP=4,
                        V2_MOBILREF=1,
                        PONDKI=3.0))

                # Add a tail
                data["K_DEPLOC"].append(
                    dict(
                        IDENT_IND=person_id,
                        V2_MMOTIFDES=2,
                        V2_MMOTIFORI=1,
                        V2_TYPJOUR=1,
                        V2_MORIHDEP="21:00:00",
                        V2_MDESHARR="22:00:00",
                        V2_MDISTTOT=3,  # km
                        IDENT_JOUR=1,
                        V2_MTP=mode,
                        V2_MDESDEP=home_department,
                        V2_MORIDEP=home_department,
                        NDEP=4,
                        V2_MOBILREF=1,
                        PONDKI=3.0))

    os.mkdir("%s/entd_2008" % output_path)
    pd.DataFrame.from_records(data["Q_MENAGE"]).to_csv(
        "%s/entd_2008/Q_menage.csv" % output_path, index=False, sep=";")
    pd.DataFrame.from_records(data["Q_TCM_MENAGE"]).to_csv(
        "%s/entd_2008/Q_tcm_menage_0.csv" % output_path, index=False, sep=";")
    pd.DataFrame.from_records(data["Q_INDIVIDU"]).to_csv(
        "%s/entd_2008/Q_individu.csv" % output_path, index=False, sep=";")
    pd.DataFrame.from_records(data["Q_TCM_INDIVIDU"]).to_csv(
        "%s/entd_2008/Q_tcm_individu.csv" % output_path, index=False, sep=";")
    pd.DataFrame.from_records(data["K_DEPLOC"]).to_csv(
        "%s/entd_2008/K_deploc.csv" % output_path, index=False, sep=";")

    # Data set: EGT
    print("Creating EGT ...")

    data = dict(households=[], persons=[], trips=[])

    for household_index in range(HTS_HOUSEHOLDS):
        household_id = household_index

        municipality = random.choice(df["municipality"].unique())
        region = df[df["municipality"] == municipality]["region"].values[0]
        department = df[df["municipality"] ==
                        municipality]["department"].values[0]

        data["households"].append(
            dict(RESDEP=department,
                 NQUEST=household_id,
                 POIDSM=1.0,
                 NB_VELO=random.randint(3),
                 NB_VD=random.randint(2),
                 RESCOMM=municipality,
                 NB_2RM=0,
                 MNP=3,
                 REVENU=random.randint(12)))

        for person_index in range(HTS_HOUSEHOLD_MEMBERS):
            person_id = household_id * 1000 + person_index
            studies = random.random_sample() < 0.3

            data["persons"].append(
                dict(RESDEP=department,
                     NP=person_id,
                     POIDSP=1.0,
                     NQUEST=household_id,
                     SEXE=random.choice([1, 2]),
                     AGE=random.randint(90),
                     PERMVP=random.choice([1, 2]),
                     ABONTC=random.choice([1, 2]),
                     OCCP=3 if studies else 2,
                     PERM2RM=random.choice([1, 2]),
                     NBDEPL=2,
                     CS8=random.randint(9)))

            home_department = department
            home_municipality = municipality

            work_municipality = random.choice(df["municipality"].unique())
            work_region = df[df["municipality"] ==
                             work_municipality]["region"].values[0]
            work_department = df[df["municipality"] ==
                                 work_municipality]["department"].values[0]

            purpose = 21 if studies else 11
            mode = random.choice([1, 2, 3, 5, 7])

            data["trips"].append(
                dict(NQUEST=household_id,
                     NP=person_id,
                     ND=1,
                     ORDEP=home_department,
                     DESTDEP=work_department,
                     ORH=8,
                     ORM=0,
                     DESTH=9,
                     DESTM=0,
                     ORCOMM=home_municipality,
                     DESTCOMM=work_municipality,
                     DPORTEE=3,
                     MODP_H7=2,
                     DESTMOT_H9=purpose,
                     ORMOT_H9=1))

            data["trips"].append(
                dict(NQUEST=household_id,
                     NP=person_id,
                     ND=1,
                     ORDEP=work_department,
                     DESTDEP=home_department,
                     ORH=8,
                     ORM=0,
                     DESTH=9,
                     DESTM=0,
                     ORCOMM=work_municipality,
                     DESTCOMM=home_municipality,
                     DPORTEE=3,
                     MODP_H7=2,
                     DESTMOT_H9=31,
                     ORMOT_H9=purpose))

            data["trips"].append(
                dict(NQUEST=household_id,
                     NP=person_id,
                     ND=2,
                     ORDEP=home_department,
                     DESTDEP=home_department,
                     ORH=17,
                     ORM=0,
                     DESTH=18,
                     DESTM=0,
                     ORCOMM=home_municipality,
                     DESTCOMM=home_municipality,
                     DPORTEE=3,
                     MODP_H7=2,
                     DESTMOT_H9=1,
                     ORMOT_H9=31))

    os.mkdir("%s/egt_2010" % output_path)
    pd.DataFrame.from_records(data["households"]).to_csv(
        "%s/egt_2010/Menages_semaine.csv" % output_path, index=False, sep=",")
    pd.DataFrame.from_records(data["persons"]).to_csv(
        "%s/egt_2010/Personnes_semaine.csv" % output_path,
        index=False,
        sep=",")
    pd.DataFrame.from_records(data["trips"]).to_csv(
        "%s/egt_2010/Deplacements_semaine.csv" % output_path,
        index=False,
        sep=",")

    # Data set: Census
    print("Creating census ...")

    persons = []

    for household_index in range(CENSUS_HOUSEHOLDS):
        household_id = household_index

        iris = df["iris"].iloc[random.randint(len(df))]
        department = iris[:2]
        if iris.endswith("0000"): iris = iris[:-4] + "XXXX"

        if random.random_sample() < 0.1:  # For some, commune is not known
            iris = "ZZZZZZZZZ"

        destination_municipality = random.choice(df["municipality"].unique())
        destination_region = df[df["municipality"] ==
                                destination_municipality]["region"].values[0]
        destination_department = df[
            df["municipality"] ==
            destination_municipality]["department"].values[0]

        for person_index in range(CENSUS_HOUSEHOLD_MEMBERS):
            persons.append(
                dict(CANTVILLE="ABCE",
                     NUMMI=household_id,
                     AGED="%03d" % random.randint(90),
                     COUPLE=random.choice([1, 2]),
                     CS1=random.randint(9),
                     DEPT=department,
                     IRIS=iris,
                     REGION=region,
                     ETUD=random.choice([1, 2]),
                     ILETUD=4 if department != destination_department else 0,
                     ILT=4 if department != destination_department else 0,
                     IPONDI=float(1.0),
                     SEXE=random.choice([1, 2]),
                     TACT=random.choice([1, 2]),
                     TRANS=4,
                     VOIT=random.randint(3),
                     DEROU=random.randint(2)))

    columns = [
        "CANTVILLE", "NUMMI", "AGED", "COUPLE", "CS1", "DEPT", "IRIS",
        "REGION", "ETUD", "ILETUD", "ILT", "IPONDI", "SEXE", "TACT", "TRANS",
        "VOIT", "DEROU"
    ]

    types = [
        ("C", 5, 0),
        ("C", 7, 0),
        ("C", 3, 0),
        ("C", 1, 0),
        ("C", 1, 0),  # CS 1
        ("C", 3, 0),
        ("C", 9, 0),
        ("C", 2, 0),
        ("C", 1, 0),
        ("C", 1, 0),
        ("C", 1, 0),
        ("N", 10, 7),  # IPONDI
        ("C", 1, 0),
        ("C", 2, 0),
        ("C", 1, 0),
        ("C", 1, 0),
        ("C", 1, 0),
    ]

    df_persons = pd.DataFrame.from_records(persons)[columns]

    db = pysal.open("%s/rp_2015/FD_INDCVIZA_2015.dbf" % output_path, "w")
    db.header = columns
    db.field_spec = types
    for index, row in df_persons.iterrows():
        db.write(row)
    db.close()

    # Data set: commute flows
    print("Creating commute flows ...")

    municipalities = df["municipality"].unique()
    observations = COMMUTE_FLOW_OBSERVATIONS

    # ... work
    df_work = pd.DataFrame(
        dict(COMMUNE=municipalities[random.randint(0, len(municipalities),
                                                   observations)],
             DCLT=municipalities[random.randint(0, len(municipalities),
                                                observations)],
             TRANS=random.randint(1, 6, size=(observations, ))))

    df_work["ARM"] = "Z"
    df_work["IPONDI"] = 1.0

    columns = ["COMMUNE", "DCLT", "TRANS", "ARM", "IPONDI"]
    types = [("C", 5, 0), ("C", 5, 0), ("C", 1, 0), ("C", 5, 0), ("N", 10, 7)]
    db = pysal.open("%s/rp_2015/FD_MOBPRO_2015.dbf" % output_path, "w")
    db.header = columns
    db.field_spec = types
    for index, row in df_work[columns].iterrows():
        db.write(row)
    db.close()

    # ... education
    df_education = pd.DataFrame(
        dict(COMMUNE=municipalities[random.randint(0, len(municipalities),
                                                   observations)],
             DCETUF=municipalities[random.randint(0, len(municipalities),
                                                  observations)]))
    df_education["ARM"] = "Z"
    df_education["IPONDI"] = 1.0

    columns = ["COMMUNE", "DCETUF", "ARM", "IPONDI"]
    types = [("C", 5, 0), ("C", 5, 0), ("C", 5, 0), ("N", 10, 7)]
    db = pysal.open("%s/rp_2015/FD_MOBSCO_2015.dbf" % output_path, "w")
    db.header = columns
    db.field_spec = types
    for index, row in df_education[columns].iterrows():
        db.write(row)
    db.close()

    # Data set: BD-TOPO
    print("Creating BD-TOPO ...")

    observations = ADDRESS_OBSERVATIONS

    streets = np.array([
        "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N",
        "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
    ])[random.randint(0, 26, observations)]

    numbers = random.randint(0, 20, observations)

    x = random.random_sample(size=(observations, )) * 100
    y = random.random_sample(size=(observations, )) * 50

    df_bdtopo = gpd.GeoDataFrame(
        {
            "CODE_INSEE":
            municipalities[random.randint(0, len(municipalities),
                                          observations)],
            "NUMERO":
            numbers,
            "NOM_1":
            streets,
            "geometry": [geo.Point(x, y) for x, y in zip(x, y)]
        },
        crs="EPSG:2154")

    df_bdtopo["NOM_1"] = "R " + df_bdtopo["NOM_1"]

    os.mkdir("%s/bdtopo" % output_path)
    df_bdtopo.to_file("%s/bdtopo/ADRESSE.shp" % output_path)

    # Data set: SIRENE
    print("Creating SIRENE ...")

    observations = SIRENE_OBSERVATIONS

    streets = np.array([
        "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N",
        "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
    ])[random.randint(0, 26, observations)]

    numbers = random.randint(0, 20, observations)

    df_sirene = pd.DataFrame({
        "siret":
        random.randint(0, 99999999, observations),
        "libelleVoieEtablissement":
        streets,
        "numeroVoieEtablissement":
        numbers,
        "codeCommuneEtablissement":
        municipalities[random.randint(0, len(municipalities), observations)]
    })

    df_sirene["activitePrincipaleEtablissement"] = "52.1"
    df_sirene["trancheEffectifsEtablissement"] = "03"
    df_sirene["typeVoieEtablissement"] = "RUE"

    os.mkdir("%s/sirene" % output_path)
    df_sirene.to_csv("%s/sirene/StockEtablissement_utf8.csv" % output_path,
                     index=False)

    # Data set: OSM
    # We add add a road grid of 500m
    print("Creating OSM ...")
    import itertools

    osm = []
    osm.append('<?xml version="1.0" encoding="UTF-8"?>')
    osm.append('<osm version="0.6">')

    df_nodes = []
    links = []

    node_index = 1

    lengthx = 200
    lengthy = 100

    for i in range(lengthx):
        for j in range(lengthy):
            df_nodes.append(
                dict(id=node_index,
                     geometry=geo.Point(anchor_x + 500 * i + 250,
                                        anchor_y - 500 * j - 250)))

            if j < lengthy - 1:
                links.append([node_index, node_index + 1])

            if i < lengthx - 1:
                links.append([node_index, node_index + lengthx])

            node_index += 1

    df_nodes = gpd.GeoDataFrame(df_nodes, crs="EPSG:2154")
    df_nodes = df_nodes.to_crs("EPSG:4326")

    for row in df_nodes.itertuples():
        osm.append(
            '<node id="%d" lat="%f" lon="%f" version="3" timestamp="2010-12-05T17:00:00" />'
            % (row[1], row[2].y, row[2].x))

    for index, link in enumerate(links):
        osm.append(
            '<way id="%d" version="3" timestamp="2010-12-05T17:00:00">' %
            (index + 1))
        osm.append('<nd ref="%d" />' % link[0])
        osm.append('<nd ref="%d" />' % link[1])
        osm.append('<tag k="highway" v="primary" />')
        osm.append('</way>')

    osm.append('</osm>')

    import gzip
    os.mkdir("%s/osm" % output_path)
    with gzip.open("%s/osm/ile-de-france-latest.osm.gz" % output_path,
                   "wb+") as f:
        f.write(bytes("\n".join(osm), "utf-8"))

    import subprocess
    subprocess.check_call([
        "osmosis", "--read-xml",
        "%s/osm/ile-de-france-latest.osm.gz" % output_path, "--write-pbf",
        "%s/osm/ile-de-france-latest.osm.pbf" % output_path
    ])

    # Data set: GTFS
    print("Creating GTFS ...")

    feed = {}

    feed["agency"] = pd.DataFrame.from_records([
        dict(agency_id=1,
             agency_name="eqasim",
             agency_timezone="Europe/Paris",
             agency_url="https://eqasim.org")
    ])

    feed["calendar"] = pd.DataFrame.from_records([
        dict(service_id=1,
             monday=1,
             tuesday=1,
             wednesday=1,
             thursday=1,
             friday=1,
             saturday=1,
             sunday=1,
             start_date="20100101",
             end_date="20500101")
    ])

    feed["routes"] = pd.DataFrame.from_records([
        dict(route_id=1,
             agency_id=1,
             route_short_name="EQ",
             route_long_name="The eqasim train",
             route_desc="",
             route_type=2)
    ])

    stops = []

    df_stops = df[df["municipality"].isin(["1B019", "2D007"])].copy()
    df_stops = df_stops.to_crs("EPSG:4326")

    feed["stops"] = pd.DataFrame.from_records([
        dict(stop_id="A",
             stop_code="A",
             stop_name="A",
             stop_desc="",
             stop_lat=df_stops["geometry"].iloc[0].centroid.y,
             stop_lon=df_stops["geometry"].iloc[0].centroid.x,
             location_type=1,
             parent_station=None),
        dict(stop_id="B",
             stop_code="B",
             stop_name="B",
             stop_desc="",
             stop_lat=df_stops["geometry"].iloc[1].centroid.y,
             stop_lon=df_stops["geometry"].iloc[1].centroid.x,
             location_type=1,
             parent_station=None)
    ])

    trips = []
    times = []

    trip_id = 1

    for origin, destination in [("A", "B"), ("B", "A")]:
        for hour in np.arange(1, 24):
            trips.append(dict(route_id=1, service_id=1, trip_id=trip_id))

            times.append(
                dict(trip_id=trip_id,
                     arrival_time="%02d:00:00" % hour,
                     departure_time="%02d:00:00" % hour,
                     stop_id=origin,
                     stop_sequence=1))

            times.append(
                dict(trip_id=trip_id,
                     arrival_time="%02d:00:00" % (hour + 1),
                     departure_time="%02d:00:00" % (hour + 1),
                     stop_id=destination,
                     stop_sequence=2))

            trip_id += 1

    feed["trips"] = pd.DataFrame.from_records(trips)
    feed["stop_times"] = pd.DataFrame.from_records(times)

    # Transfers
    feed["transfers"] = pd.DataFrame(
        dict(from_stop_id=[], to_stop_id=[], transfer_type=[]))

    os.mkdir("%s/gtfs" % output_path)

    import data.gtfs.utils
    data.gtfs.utils.write_feed(feed, "%s/gtfs/IDFM_gtfs.zip" % output_path)
Beispiel #15
0
from shapely import affinity
import matplotlib.pyplot as plt
import numpy as np

plt.style.use('./miller.mplstyle')

n = 360
theta = np.linspace(0, np.pi * 2, n)

a = 2
b = 1
angle = 45.0

r = a * b / np.sqrt((b * np.cos(theta))**2 + (a * np.sin(theta))**2)
xy = np.stack([r * np.cos(theta), r * np.sin(theta)], 1)

ellipse = affinity.rotate(geom.Polygon(xy), angle, 'center')

x, y = ellipse.exterior.xy
plt.plot(x, y, lw=1, color='k')
ng = 50
rnd = np.array([[ii, jj] for ii in np.linspace(min(x), max(x), ng)
                for jj in np.linspace(min(y), max(y), ng)])

res = np.array([p for p in rnd if ellipse.contains(geom.Point(p))])

plt.scatter(rnd[:, 0], rnd[:, 1], s=50, color=tableau20[1])
plt.scatter(res[:, 0], res[:, 1], color=tableau20[0], s=15)
plt.savefig('./EllipseContains.png', bbox_inches='tight', dpi=300)
plt.show()
Beispiel #16
0
def LSSTPointing(xc, yc, angle_rot=0., area=None, maxbound=None):
    """
    Function to build a focal plane for LSST

    Parameters
    ---------------

    xc: float
       x-position of the center FP (RA)
    yc: float
       y-position of the center FP (Dec)
    angle_rot: float, opt
      angle of rotation of the FP (default: 0.)
    area: float
      area for the FP (default: None)
    maxbound: float
      to reduce area  (default: None)
    Returns
    ----------
    LSST FP (geometry.Polygon)

    """
    """
    arr = [[3, 0], [12, 0], [12, 1], [13, 1], [13, 2], [14, 2], [14, 3], [15, 3],
           [15, 12], [14, 12], [14, 13], [13, 13], [
               13, 14], [12, 14], [12, 15],
           [3, 15], [3, 14], [2, 14], [2, 13], [1, 13], [1, 12], [0, 12],
           [0, 3], [1, 3], [1, 2], [2, 2], [2, 1], [3, 1]]
    """
    # this is a quarter of LSST FP (with corner rafts)
    arr = [[0.0, 7.5], [4.5, 7.5], [4.5, 6.5], [5.5, 6.5], [5.5, 5.5],
           [6.5, 5.5], [6.5, 4.5], [7.5, 4.5], [7.5, 0.0]]

    # this is a quarter of LSST FP (without corner rafts)
    arr = [[0.0, 7.5], [4.5, 7.5], [4.5, 4.5], [7.5, 4.5], [7.5, 0.0]]
    if maxbound is not None:
        arr = [[0.0, maxbound], [maxbound * 4.5 / 7.5, maxbound],
               [maxbound * 4.5 / 7.5, maxbound * 4.5 / 7.5],
               [maxbound, maxbound * 4.5 / 7.5], [maxbound, 0.0]]
    # symmetry I: y -> -y
    arrcp = list(arr)
    for val in arr[::-1]:
        if val[1] > 0.:
            arrcp.append([val[0], -val[1]])

    # symmetry II: x -> -x
    arr = list(arrcp)
    for val in arrcp[::-1]:
        if val[0] > 0.:
            arr.append([-val[0], val[1]])

    # build polygon
    poly_orig = geometry.Polygon(arr)

    # set area
    if area is not None:
        poly_orig = affinity.scale(poly_orig,
                                   xfact=np.sqrt(area / poly_orig.area),
                                   yfact=np.sqrt(area / poly_orig.area))

    # set rotation angle
    rotated_poly = affinity.rotate(poly_orig, angle_rot)

    return affinity.translate(rotated_poly,
                              xoff=xc - rotated_poly.centroid.x,
                              yoff=yc - rotated_poly.centroid.y)
def get_highValueArea(bbox,
                      save_path=os.getcwd(),
                      Low_Value_gdf=None,
                      check_plot=1):
    """
    In case low-value features were queried with get_features_OSM(),
    calculate the "counter-shape" representig high value area for a given bounding box.

    Parameters:
        bbox (array): List of coordinates in format [South, West, North, East]
        save_path (str): path for results
        Low_Value_gdf (str): absolute path of gdf of low value items which is to be inverted.
          If left empty, searches for OSM_features_gdf_combined_lat_lon.shp in save_path.
        checkplot

    Returns:
        High_Value_Area (gdf): GeoDataFrame of High Value Area as High_Value_Area_lat_lon

    Example:
        High_Value_gdf_47_8 = get_highValueArea([47.16, 8.0, 47.3, 8.0712], save_path = save_path,\
                                    Low_Value_gdf = save_path+'/Low_Value_gdf_combined_47_8.shp')
    important: Use same bbox and save_path as for get_features_OSM().
    """

    Outer_Poly = geometry.Polygon([(bbox[1], bbox[2]), (bbox[1], bbox[0]),
                                   (bbox[3], bbox[0]), (bbox[3], bbox[2])])

    if Low_Value_gdf is None:
        try:
            Low_Value_gdf = geopandas.read_file(save_path +
                                                '/OSM_features_gdf_combined_' +
                                                str(int(bbox[0])) + '_' +
                                                str(int(bbox[1])) + '.shp')
        except:
            print('No Low-Value-Union found with name %s. \n Please add.' %
                  (save_path + '/OSM_features_gdf_combined_' +
                   str(int(bbox[0])) + '_' + str(int(bbox[1])) + '.shp'))
    else:
        Low_Value_gdf = geopandas.read_file(Low_Value_gdf)

    # Making one Union of individual shapes in gdfs
    Low_Value_Union = _makeUnion(Low_Value_gdf)

    # subtract low-value areas from high-value polygon
    High_Value_Area = Outer_Poly.difference(Low_Value_Union)

    # save high value multipolygon as shapefile and re-read as gdf:
    schema = {'geometry': 'MultiPolygon', 'properties': {'Name': 'str:80'}}
    shapeout = (save_path + '/High_Value_Area_' + str(int(bbox[0])) + '_' +
                str(int(bbox[1])) + ".shp")
    with fiona.open(shapeout,
                    'w',
                    crs=from_epsg(4326),
                    driver='ESRI Shapefile',
                    schema=schema) as output:
        prop1 = {'Name': 'High Value Area'}
        geom = mapping(High_Value_Area)
        output.write({'geometry': geom, 'properties': prop1})

    High_Value_Area = geopandas.read_file(shapeout)

    # plot
    if check_plot == 1:
        f, ax = plt.subplots(1)
        ax = High_Value_Area.plot(ax=ax)
        f.suptitle('High Value Area ' + str(int(bbox[0])) + ' ' +
                   str(int(bbox[1])))
        plt.show()
        f.savefig('High Value Area ' + str(int(bbox[0])) + '_' +
                  str(int(bbox[1])) + '.pdf',
                  bbox_inches='tight')

    return High_Value_Area
def PolyOffset(points, offset, scaling):

    # Create a Polygon from the given points
    OGpoly = shp.Polygon(points)

    # Get all the vertices of the offset polygon
    # Define the sizes of some variables that are used in the for loop
    edges = np.zeros((len(points), 2))  # Edges of the given polygon
    normals = np.zeros((len(points), 2))  # normal vectors to each edge
    unit_normals = np.zeros((len(points), 2))  # unit normal vectors
    midpoints = np.zeros((len(points), 2))  # midpoint of each edge
    midpoints_offset = np.zeros(
        (len(points), 2))  # midpoint of the offset polygon´s edges
    vert_offset = np.zeros((len(points), 2))  # vertices of the offset polygon

    for i in range(len(points) - 1):
        edges[i] = np.subtract(points[i + 1], points[i])
        normals[i] = (edges[i, 1], -edges[i, 0])
        mag2 = np.sqrt(normals[i, 0]**2 + normals[i, 1]**2)
        unit_normals[i] = normals[i] / mag2
        midpoints[i] = points[i] + 0.5 * edges[
            i]  # find the midpoint by the half length of each edge
        midpoints_offset[i] = midpoints[i] + offset * unit_normals[
            i]  # get the offset midpoints with the offset as a scalar (delta) and the unit normal vectors

    # Do all the same steps for the last edge from the "last point" to the "first point" of the given polygon
    edges[len(points) - 1] = np.subtract(points[0], points[len(points) - 1])
    normals[len(points) - 1] = (edges[len(points) - 1,
                                      1], -edges[len(points) - 1, 0])
    mag2 = np.sqrt(normals[len(points) - 1, 0]**2 +
                   normals[len(points) - 1, 1]**2)
    unit_normals[len(points) - 1] = normals[len(points) - 1] / mag2
    midpoints[len(points) -
              1] = points[len(points) - 1] + 0.5 * edges[len(points) - 1]
    midpoints_offset[len(points) -
                     1] = midpoints[len(points) -
                                    1] + offset * unit_normals[len(points) - 1]

    # Get the value for "t1" which discribes the scalar for the intersection between two neighbored edges of the offset polygon
    # Use Cramer´s rule to find those
    for i in range(len(points) - 1):
        A1 = np.array([
            np.subtract(midpoints_offset[i + 1], midpoints_offset[i]),
            -edges[i + 1]
        ])
        A = np.array([edges[i], -edges[i + 1]])
        t1 = np.linalg.det(A1) / np.linalg.det(A)

        vert_offset[i] = midpoints_offset[i] + t1 * edges[
            i]  # Get the vertices of the offset polygon with t1

    # Do the same steps in the previous loop for the last edge from the "last point" to the "first point" of the offset polygon
    A1 = np.array([
        np.subtract(midpoints_offset[0], midpoints_offset[len(points) - 1]),
        -edges[0]
    ])
    A = np.array([edges[len(points) - 1], -edges[0]])
    t1 = np.linalg.det(A1) / np.linalg.det(A)

    vert_offset[len(points) -
                1] = midpoints_offset[len(points) - 1] + t1 * edges[
                    len(points) -
                    1]  # Get the last vertex of the offsetted polygon with t1

    # For comparison with a scaled polygon define a transformation matrix T
    T = ([scaling, 0], [0, scaling])
    vert_trans = np.zeros(
        (len(points), 2)
    )  # define a matrix for the vertices that are calculated in the following loop

    # Get the transformed vertices by multiplication between the transformation matrix and the vertices of the given polygon
    for i in range(len(points)):
        vert_trans[i] = np.matmul(T, points[i])

    # Define the new polygons
    OffsetPoly = shp.Polygon(vert_offset)
    TransformPoly = shp.Polygon(vert_trans)

    # Turn polygon points into numpy arrays for plotting
    OGpolypts = np.array(OGpoly.exterior)
    OffsetPolypts = np.array(OffsetPoly.exterior)
    TransformPolypts = np.array(TransformPoly.exterior)

    # Plot points
    plt.plot(*OGpolypts.T, '--', color='g', label='Original Polygon')
    plt.plot(*OffsetPolypts.T, color='red', label='Offset Polygon')
    #plt.plot(*TransformPolypts.T, color='blue', label='Scaled Polygon')
    plt.axis('equal')
    plt.legend()
    plt.title('Question 2: Offset Convex Polygon')
    plt.xlabel('X')
    plt.ylabel('Y')
    plt.show()
Beispiel #19
0
    def savepatches(self, resizeimg, objects, subimgname, left, up, right,
                    down):
        outdir = os.path.join(self.outlabelpath, subimgname + '.txt')
        mask_poly = []
        imgpoly = shgeo.Polygon([(left, up), (right, up), (right, down),
                                 (left, down)])
        with codecs.open(outdir, 'w', self.code) as f_out:
            for obj in objects:
                gtpoly = shgeo.Polygon([(obj['poly'][0], obj['poly'][1]),
                                        (obj['poly'][2], obj['poly'][3]),
                                        (obj['poly'][4], obj['poly'][5]),
                                        (obj['poly'][6], obj['poly'][7])])
                if (gtpoly.area <= 0):
                    continue
                inter_poly, half_iou = self.calchalf_iou(gtpoly, imgpoly)

                # print('writing...')
                if (half_iou == 1):
                    polyInsub = self.polyorig2sub(left, up, obj['poly'])
                    outline = ' '.join(list(map(str, polyInsub)))
                    outline = outline + ' ' + obj['name'] + ' ' + str(
                        obj['difficult'])
                    f_out.write(outline + '\n')
                elif (half_iou > 0):
                    #elif (half_iou > self.thresh):
                    ##  print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
                    inter_poly = shgeo.polygon.orient(inter_poly, sign=1)
                    out_poly = list(inter_poly.exterior.coords)[0:-1]
                    if len(out_poly) < 4:
                        continue

                    out_poly2 = []
                    for i in range(len(out_poly)):
                        out_poly2.append(out_poly[i][0])
                        out_poly2.append(out_poly[i][1])

                    if (len(out_poly) == 5):
                        #print('==========================')
                        out_poly2 = self.GetPoly4FromPoly5(out_poly2)
                    elif (len(out_poly) > 5):
                        """
                            if the cut instance is a polygon with points more than 5, we do not handle it currently
                        """
                        continue
                    if (self.choosebestpoint):
                        out_poly2 = choose_best_pointorder_fit_another(
                            out_poly2, obj['poly'])

                    polyInsub = self.polyorig2sub(left, up, out_poly2)

                    for index, item in enumerate(polyInsub):
                        if (item <= 1):
                            polyInsub[index] = 1
                        elif (item >= self.subsize_w and index % 2 == 0):
                            polyInsub[index] = self.subsize_w
                        elif (item >= self.subsize_h and index % 2 == 1):
                            polyInsub[index] = self.subsize_h
                    outline = ' '.join(list(map(str, polyInsub)))
                    if (half_iou > self.thresh):
                        outline = outline + ' ' + obj['name'] + ' ' + str(
                            obj['difficult'])
                    else:
                        ## if the left part is too small, label as '2'
                        outline = outline + ' ' + obj['name'] + ' ' + '2'
                    f_out.write(outline + '\n')
                #else:
                #   mask_poly.append(inter_poly)
        self.saveimagepatches(resizeimg, subimgname, left, up)
Beispiel #20
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PuddleWorld arenas."""

import frozendict
from shapely import geometry

from aux_tasks.puddle_world import puddle_world

# Repeated shapes
_TOP_WALL = geometry.Polygon(((0.4, 1.0), (0.4, 0.6), (0.6, 0.6), (0.6, 1.0)))
_BOTTOM_WALL = geometry.Polygon(
    ((0.4, 0.0), (0.4, 0.4), (0.6, 0.4), (0.6, 0.0)))

EMPTY = ()

HYDROGEN = (
    puddle_world.SlowPuddle(
        puddle_world.circle(geometry.Point((0.5, 0.5)), 0.3)),
    puddle_world.SlowPuddle(
        puddle_world.circle(geometry.Point((0.5, 0.5)), 0.15)),
)

SUTTON = (
    puddle_world.SlowPuddle(
        geometry.LineString([(0.1, 0.75), (0.45, 0.75)]).buffer(0.1)),
Beispiel #21
0
def kdp_objects(kdpc, KDPmasked, ax, f, time_start, month, d_beg, h_beg,
                min_beg, sec_beg, d_end, h_end, min_end, sec_end, rlons, rlats,
                max_lons_c, max_lats_c, kdplev, proj):
    kdp_areas = []
    kdp_centroid_lon = []
    kdp_centroid_lat = []
    kdp_max = []
    kdp_storm_lon = []
    kdp_storm_lat = []
    if np.max(KDPmasked) > kdplev:
        for level in kdpc.collections:
            for contour_poly in level.get_paths():
                for n_contour, contour in enumerate(
                        contour_poly.to_polygons()):
                    contour_a = np.asarray(contour[:])
                    xa = contour_a[:, 0]
                    ya = contour_a[:, 1]
                    polygon_new = geometry.Polygon([(i[0], i[1])
                                                    for i in zip(xa, ya)])
                    if n_contour == 0:
                        polygon = polygon_new
                    else:
                        polygon = polygon.difference(polygon_new)
                try:
                    pr_area = (transform(proj, polygon).area *
                               units('m^2')).to('km^2')
                except:
                    continue
                boundary = np.asarray(polygon.boundary.xy)
                polypath = Path(boundary.transpose())
                coord_map = np.vstack(
                    (rlons[0, :, :].flatten(), rlats[0, :, :].flatten())
                ).T  # create an Mx2 array listing all the coordinates in field
                mask_kdp = polypath.contains_points(coord_map).reshape(
                    rlons[0, :, :].shape)
                if pr_area > 2 * units('km^2'):
                    g = Geod(ellps='sphere')
                    dist_kdp = np.zeros((np.asarray(max_lons_c).shape[0]))
                    for i in range(dist_kdp.shape[0]):
                        distance_kdp = g.inv(polygon.centroid.x,
                                             polygon.centroid.y, max_lons_c[i],
                                             max_lats_c[i])
                        dist_kdp[i] = distance_kdp[2] / 1000.

                    try:
                        if np.min(np.asarray(dist_kdp)) < 15.0 and np.max(
                            (np.max(KDPmasked[mask_kdp])) > 1.5):
                            kdp_path = polypath
                            kdp_areas.append((pr_area))
                            kdp_centroid_lon.append((polygon.centroid.x))
                            kdp_centroid_lat.append((polygon.centroid.y))
                            kdp_storm_lon.append((max_lons_c[np.where(
                                dist_kdp == np.min(dist_kdp))[0][0]]))
                            kdp_storm_lat.append((max_lats_c[np.where(
                                dist_kdp == np.min(dist_kdp))[0][0]]))
                            kdp_max.append((np.max(KDPmasked[mask_kdp])))
                            patch = PathPatch(polypath,
                                              facecolor='green',
                                              alpha=.5,
                                              edgecolor='green',
                                              linewidth=3)
                            ax.add_patch(patch)
                            #Add polygon to placefile
                            f.write('TimeRange: ' + str(time_start.year) +
                                    '-' + str(month) + '-' + str(d_beg) + 'T' +
                                    str(h_beg) + ':' + str(min_beg) + ':' +
                                    str(sec_beg) + 'Z ' +
                                    str(time_start.year) + '-' + str(month) +
                                    '-' + str(d_end) + 'T' + str(h_end) + ':' +
                                    str(min_end) + ':' + str(sec_end) + 'Z')
                            f.write('\n')
                            f.write("Color: 000 139 000 \n")
                            f.write('Line: 3, 0, "KDP Foot Outline" \n')
                            for i in range(len(kdp_path.vertices)):
                                f.write("%.5f" % (kdp_path.vertices[i][1]))
                                f.write(", ")
                                f.write("%.5f" % (kdp_path.vertices[i][0]))
                                f.write('\n')
                            f.write("End: \n \n")
                            f.flush()
                    except:
                        print('kdp fail')
    return kdp_areas, kdp_centroid_lon, kdp_centroid_lat, kdp_storm_lon, kdp_storm_lat, kdp_max, ax, f
# --
# Use centroids to do voronoi tesselation

subset = dfcnt[['lon', 'lat']]
points = [tuple(x) for x in subset.values]
vPolys = pytess.voronoi(points)

# --
# Plot Voronoi polygons to evaluate result

plt.figure()
for i in range(len(vPolys)):
    print(i)
    verts = vPolys[i][1]
    pList = [Point(i) for i in verts]
    poly = geometry.Polygon([[p.x, p.y] for p in pList])
    x, y = poly.exterior.xy
    plt.plot(x,
             y,
             color='#6699cc',
             alpha=0.7,
             linewidth=.8,
             solid_capstyle='round',
             zorder=2)

plt.show()

# --
# Plot tesselation polygons over beijing polygons

fig = plt.figure()
Beispiel #23
0
def is_annotation_near_edge(al,
                            ann_minX,
                            ann_maxX,
                            ann_minY,
                            ann_maxY,
                            ann_minZ,
                            ann_maxZ,
                            distance=100,
                            min_edge_sections=4):
    """function to test if annotation is near the 'edge' of a dataset

    Parameters
    ----------
    al: dict
        annotation dictionary
    ann_minX: float
        minimum X of bounding box of data
    ann_maxX: float
        maximum X of bounding box of data
    ann_minY: float
        minimum Y of bounding box of data
    ann_maxY: float
        maximum Y of bounding box of data
    ann_minZ: float
        mininmum Z of bounding box of data
    ann_maxZ: float
        maximum Z of bounding box of data
    distance: int
        x,y distance from edge to be considered near edge (default 100)
    min_edge_section: int
        if annotation is in fewer than these number of sections
        and touches z border of dataset it will be considered in edge (default 4)
    
    Returns
    -------
    bool:
        True/False if this annotation is near edge
    """
    boundary = geometry.Polygon(
        np.array([[ann_minX, ann_minY], [ann_minX, ann_maxY],
                  [ann_maxX, ann_maxY], [ann_maxX, ann_minY]]))
    try:
        b2 = boundary.buffer(-distance)
    except:
        print(distance)
        print(ann_minX, ann_minY, ann_minX, ann_maxY)
        assert False
    for area in al['areas']:
        poly1 = geometry.Polygon(area['global_path'])
        try:
            if (not b2.contains(poly1)):
                return True
        except:
            print(area['global_path'])
            print()
            assert False
    zvals = np.unique(np.array([area['z'] for area in al['areas']]))
    if len(zvals) < min_edge_sections:
        if ann_minZ in zvals:
            return True
        if ann_maxZ in zvals:
            return True

    return False
Beispiel #24
0
    date:    Sunday, 10 February 2019

    description: Simple tests for reprojection
"""

import unittest

import ddt
from shapely import geometry

from cogj.reproject import reproject
from cogj import FeatureCollection, Feature

GEOMS = (geometry.Point(0, 0), geometry.MultiPoint([[0, 0], [1, 1]]),
         geometry.LineString([[0, 0], [1, 1]]),
         geometry.Polygon([[0, 0], [1, 1], [0, 1], [0, 0]]),
         geometry.MultiLineString([[[0, 0], [1, 1]], [[1, 0], [0, 1]]]),
         geometry.MultiPolygon([
             geometry.Polygon([[2, 2], [4, 7], [5, 5], [2, 2]]),
             geometry.Polygon([[0, 0], [1, 1], [0, 1], [0, 0]])
         ]),
         Feature(geometry=geometry.Polygon([[0, 0], [1, 1], [0, 1], [0, 0]]),
                 properties={'a property': 1}),
         FeatureCollection([
             geometry.Polygon([[0, 0], [1, 1], [0, 1], [0, 0]]),
             geometry.Point(0, 0),
             geometry.LineString([[0, 0], [1, 1]])
         ]))


@ddt.ddt
Beispiel #25
0
    csvwriter.writerow(['method', 'value', 'name', 'dir'])
    for entry in histData:
        csvwriter.writerow(
            [entry['method'], entry['val'], entry['name'], entry['dir']])

###############################################################################
## Process the results for miniature maps #####################################
###############################################################################

# # build the voronoi
interstates = pickle.load(
    open(thisRepo + "data/interim/InterstatesWithCityIDs.p", "rb"))
CONUS_lonlat = np.loadtxt(open(thisRepo + "/data/Raw/CONUS.geo.csv", "rb"),
                          delimiter=",",
                          skiprows=1)
CONUS_poly = shp.Polygon(CONUS_lonlat)
CONUS_box = np.array([[-180.0, 90.0], [180.0, 90.0], [180.0, -90.0],
                      [-180.0, -90.0]])
#CONUS_lonlat

interstateLonLats = np.empty([0, 2])
interstatePointIDs = []
for model in models:
    thisInterstateId = model['id']
    thisPath = model['path']
    actualInterstate = interstates.loc[thisInterstateId]
    actualPath = actualInterstate['Path']
    for seg in thisPath:
        pathIndex = seg['id']
        globalIndex = seg['pid']
        thisLon = actualPath.loc[pathIndex, 'lon']
def test_polygon_interiors():

    ax = plt.subplot(211, projection=ccrs.PlateCarree())
    ax.coastlines()
    ax.set_global()

    pth = Path([[0, -45], [60, -45], [60, 45], [0, 45], [0, 45], [10, -20],
                [10, 20], [40, 20], [40, -20], [10, 20]],
               [1, 2, 2, 2, 79, 1, 2, 2, 2, 79])

    patches_native = []
    patches = []
    for geos in cpatch.path_to_geos(pth):
        for pth in cpatch.geos_to_path(geos):
            patches.append(mpatches.PathPatch(pth))

        # buffer by 10 degrees (leaves a small hole in the middle)
        geos_buffered = geos.buffer(10)
        for pth in cpatch.geos_to_path(geos_buffered):
            patches_native.append(mpatches.PathPatch(pth))

    # Set high zorder to ensure the polygons are drawn on top of coastlines.
    collection = PatchCollection(patches_native,
                                 facecolor='red',
                                 alpha=0.4,
                                 transform=ax.projection,
                                 zorder=10)
    ax.add_collection(collection)

    collection = PatchCollection(patches,
                                 facecolor='yellow',
                                 alpha=0.4,
                                 transform=ccrs.Geodetic(),
                                 zorder=10)

    ax.add_collection(collection)

    # test multiple interior polygons
    ax = plt.subplot(212,
                     projection=ccrs.PlateCarree(),
                     xlim=[-5, 15],
                     ylim=[-5, 15])
    ax.coastlines()

    exterior = np.array(sgeom.box(0, 0, 12, 12).exterior.coords)
    interiors = [
        np.array(sgeom.box(1, 1, 2, 2, ccw=False).exterior.coords),
        np.array(sgeom.box(1, 8, 2, 9, ccw=False).exterior.coords)
    ]
    poly = sgeom.Polygon(exterior, interiors)

    patches = []
    for pth in cpatch.geos_to_path(poly):
        patches.append(mpatches.PathPatch(pth))

    collection = PatchCollection(patches,
                                 facecolor='yellow',
                                 alpha=0.4,
                                 transform=ccrs.Geodetic(),
                                 zorder=10)
    ax.add_collection(collection)
Beispiel #27
0
def path_to_geos(path, force_ccw=False):
    """
    Create a list of Shapely geometric objects from a
    :class:`matplotlib.path.Path`.

    Parameters
    ----------
    path
        A :class:`matplotlib.path.Path` instance.

    Other Parameters
    ----------------
    force_ccw
        Boolean flag determining whether the path can be inverted to enforce
        ccw. Defaults to False.

    Returns
    -------
    A list of instances of the following type(s):
        :class:`shapely.geometry.polygon.Polygon`,
        :class:`shapely.geometry.linestring.LineString` and/or
        :class:`shapely.geometry.multilinestring.MultiLineString`.

    """
    # Convert path into numpy array of vertices (and associated codes)
    path_verts, path_codes = path_segments(path, curves=False)

    # Split into subarrays such that each subarray consists of connected
    # line segments based on the start of each one being marked by a
    # matplotlib MOVETO code.
    verts_split_inds = np.where(path_codes == Path.MOVETO)[0]
    verts_split = np.split(path_verts, verts_split_inds)
    codes_split = np.split(path_codes, verts_split_inds)

    # Iterate through the vertices generating a list of
    # (external_geom, [internal_polygons]) tuples.
    other_result_geoms = []
    collection = []
    for path_verts, path_codes in zip(verts_split, codes_split):
        if len(path_verts) == 0:
            continue

        verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :],
                                     axis=1)
        if all(verts_same_as_first):
            geom = sgeom.Point(path_verts[0, :])
        elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY:
            geom = sgeom.Polygon(path_verts[:-1, :])
        elif (matplotlib.__version__ < '2.2.0' and
                # XXX A path can be given which does not end with close poly,
                # in that situation, we have to guess?
                path_verts.shape[0] > 3 and verts_same_as_first[-1]):
            geom = sgeom.Polygon(path_verts)
        else:
            geom = sgeom.LineString(path_verts)

        # If geom is a Polygon and is contained within the last geom in
        # collection, add it to its list of internal polygons, otherwise
        # simply append it as a new external geom.
        if geom.is_empty:
            pass
        elif (len(collection) > 0 and
                isinstance(collection[-1][0], sgeom.Polygon) and
                isinstance(geom, sgeom.Polygon) and
                collection[-1][0].contains(geom.exterior)):
            collection[-1][1].append(geom.exterior)
        elif isinstance(geom, sgeom.Point):
            other_result_geoms.append(geom)
        else:
            collection.append((geom, []))

    # Convert each (external_geom, [internal_polygons]) pair into a
    # a shapely Polygon that encapsulates the internal polygons, if the
    # external geom is a LineString leave it alone.
    geom_collection = []
    for external_geom, internal_polys in collection:
        if internal_polys:
            # XXX worry about islands within lakes
            geom = sgeom.Polygon(external_geom.exterior, internal_polys)
        else:
            geom = external_geom

        # Correctly orientate the polygon (ccw)
        if isinstance(geom, sgeom.Polygon):
            if force_ccw and not geom.exterior.is_ccw:
                geom = sgeom.polygon.orient(geom)

        geom_collection.append(geom)

    # If the geom_collection only contains LineStrings combine them
    # into a single MultiLinestring.
    if geom_collection and all(isinstance(geom, sgeom.LineString) for
                               geom in geom_collection):
        geom_collection = [sgeom.MultiLineString(geom_collection)]

    # Remove any zero area Polygons
    def not_zero_poly(geom):
        return ((isinstance(geom, sgeom.Polygon) and not geom._is_empty and
                 geom.area != 0) or
                not isinstance(geom, sgeom.Polygon))

    result = list(filter(not_zero_poly, geom_collection))

    return result + other_result_geoms
Beispiel #28
0
def shapely_insert(x, y):
    """Return a shapely object from x and y coordinates."""
    return geo.Polygon(np.transpose((x, y)))
Beispiel #29
0
def get_grts_geometry(grts_id,
                      return_proj='wgs84',
                      return_type='poly',
                      sample_frame='Conus'):
    """

    Parameters
    ----------
    grts_id: int
             The GRTS ID of the cell we want the geometry for
    return_proj: None, proj, str ['wgs84']
            The projection to use for the return geometry
            None = The geometry will be returned in the native frame projection
            A valid proj4 projection will be used for the transform
            If you pass the string 'wgs84' the geometry will be in wgs84
    return_type: str
            'geometry' a shapely geometry will be returned
            'bounds' a list in the format [minx, miny, maxx, maxy] will be returned.
    sample_frame: str
        Sample frame to look for a match in. ['Alaska', 'Canada', 'Conus', 'Hawaii', 'Mexico', 'PuertoRico']

    Returns
    -------

        List or shapely geometry
    """
    sample_frame = normalize_grid_frame(sample_frame)
    spec = FRAME_SPECS[sample_frame]

    if 'df' not in spec:
        spec['df'] = _load_lookup(sample_frame)
    df = spec['df']

    matching_row = df[df.GRTS_ID == grts_id]['frame_id']
    if matching_row.shape[0] == 0:
        raise Exception(
            f'The provided grts_ID ({grts_id}) does not have a match in the {sampling_frame} frame.'
        )
    frame_id = int(matching_row)

    row = int(frame_id / spec['cols'])
    col = int(frame_id % spec['cols'])

    min_x = spec['bounds'][0] + (col * spec['meters'])
    min_y = spec['bounds'][1] + (row * spec['meters'])

    max_x = min_x + spec['meters']
    max_y = min_y + spec['meters']

    if return_proj == 'wgs84':
        min_x, min_y = transform_coords(min_x, min_y, in_proj=spec['crs'])
        max_x, max_y = transform_coords(max_x, max_y, in_proj=spec['crs'])
    elif type(return_proj) == Proj:
        min_x, min_y = transform_coords(min_x,
                                        min_y,
                                        in_proj=spec['crs'],
                                        out_proj=return_proj)
        max_x, max_y = transform_coords(max_x,
                                        max_y,
                                        in_proj=spec['crs'],
                                        out_proj=return_proj)
    elif return_proj is not None:
        raise Exception(
            f'The provided return_proj({return_proj}) must be one of "wgs84" or None, or a valid pyproj.Proj"'
        )

    if return_type == 'bounds':
        return [min_x, min_y, max_x, max_y]

    elif return_type == 'poly':
        from shapely import geometry
        pointlist = [(min_x, min_y), (min_x, max_y), (max_x, max_y),
                     (max_x, min_y)]
        poly = geometry.Polygon(pointlist)
        return poly

    else:
        raise Exception(
            f'The provided return_type({return_type}) must be one of "bounds" or "poly"'
        )
def OSM_to_MultiPolygon(osm_layer,lat,lon,filter=None):
    multilist=[]
    excludelist=[]
    todo=len(osm_layer.dicosmfirst['w'])+len(osm_layer.dicosmfirst['r'])
    step=int(todo/100)+1
    done=0
    for wayid in osm_layer.dicosmfirst['w']:
        if done%step==0: UI.progress_bar(1,int(100*done/todo))
        if osm_layer.dicosmw[wayid][0]!=osm_layer.dicosmw[wayid][-1]: 
            UI.logprint("Non closed way starting at",osm_layer.dicosmn[osm_layer.dicosmw[wayid][0]],", skipped.")
            done+=1
            continue
        way=numpy.round(numpy.array([osm_layer.dicosmn[nodeid] for nodeid in osm_layer.dicosmw[wayid]],dtype=numpy.float64)-numpy.array([[lon,lat]],dtype=numpy.float64),7) 
        try:
            pol=geometry.Polygon(way)
            if not pol.area: continue
            if not pol.is_valid:
                UI.logprint("Invalid OSM way starting at",osm_layer.dicosmn[osm_layer.dicosmw[wayid][0]],", skipped.")
                done+=1
                continue
        except Exception as e:
            UI.vprint(2,e)
            done+=1
            continue
        if filter and filter(pol,wayid,osm_layer.dicosmtags['w']):
            excludelist.append(pol)
        else:
            multilist.append(pol) 
        done+=1
    for relid in osm_layer.dicosmfirst['r']:
        if done%step==0: UI.progress_bar(1,int(100*done/todo))
        try:
            multiout=[geometry.Polygon(numpy.round(numpy.array([osm_layer.dicosmn[nodeid] \
                                        for nodeid in nodelist],dtype=numpy.float64)-numpy.array([lon,lat],dtype=numpy.float64),7))\
                                        for nodelist in osm_layer.dicosmr[relid]['outer']]
            multiout=ops.cascaded_union([geom for geom in multiout if geom.is_valid])
            multiin=[geometry.Polygon(numpy.round(numpy.array([osm_layer.dicosmn[nodeid]\
                                        for nodeid in nodelist],dtype=numpy.float64)-numpy.array([lon,lat],dtype=numpy.float64),7))\
                                        for nodelist in osm_layer.dicosmr[relid]['inner']]
            multiin=ops.cascaded_union([geom for geom in multiin if geom.is_valid])
        except Exception as e:
            UI.logprint(e)
            done+=1
            continue
        multipol = multiout.difference(multiin)
        if filter and filter(multipol,relid,osm_layer.dicosmtags['r']):
            targetlist=excludelist
        else:
            targetlist=multilist 
        for pol in multipol.geoms if ('Multi' in multipol.geom_type or 'Collection' in multipol.geom_type) else [multipol]:
            if not pol.area: 
                done+=1
                continue
            if not pol.is_valid: 
                UI.logprint("Relation",relid,"contains an invalid polygon which was discarded") 
                done+=1
                continue
            targetlist.append(pol)  
        done+=1
    if filter:
        ret_val=(geometry.MultiPolygon(multilist),geometry.MultiPolygon(excludelist))
        UI.vprint(2,"    Total number of geometries:",len(ret_val[0].geoms),len(ret_val[1].geoms))
    else:
        ret_val=geometry.MultiPolygon(multilist)
        UI.vprint(2,"    Total number of geometries:",len(ret_val.geoms))
    UI.progress_bar(1,100)
    return ret_val