Example #1
0
    def geoframe(self, simplify=None, predicate=None, crs=None, epsg=None):
        """
        Return geopandas dataframe

        :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
        :param predicate: A single-argument function to select which records to include in the output.
        :param crs: Coordinate reference system information
        :param epsg: Specifiy the CRS as an EPGS number.
        :return: A Geopandas GeoDataFrame
        """
        import geopandas
        from shapely.wkt import loads
        from fiona.crs import from_epsg

        if crs is None and epsg is None and self.epsg is not None:
            epsg = self.epsg

        if crs is None:
            try:
                crs = from_epsg(epsg)
            except TypeError:
                raise TypeError('Must set either crs or epsg for output.')

        df = self.dataframe(predicate=predicate)
        geometry = df['geometry']

        if simplify:
            s = geometry.apply(lambda x: loads(x).simplify(simplify))
        else:
            s = geometry.apply(lambda x: loads(x))

        df['geometry'] = geopandas.GeoSeries(s)

        return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry')
Example #2
0
 def test_linestring_multilinestring_result(self):
     geom = 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))'
     limit_to = LimitPolygonGeometry(loads(geom))
     geom = limit_to.intersection(loads('LINESTRING(-10 -20, 5 10, 20 -20)'))
     assert isinstance(geom, list)
     assert geom[0].almost_equals(loads('LINESTRING(0 0, 5 10)'))
     assert geom[1].almost_equals(loads('LINESTRING(5 10, 10 0)'))
Example #3
0
    def geoframe(self, sql, simplify=None, crs=None, epsg=4326):
        """
        Return geopandas dataframe

        :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
        :param crs: Coordinate reference system information
        :param epsg: Specifiy the CRS as an EPGS number.
        :return: A Geopandas GeoDataFrame
        """
        import geopandas
        from shapely.wkt import loads
        from fiona.crs import from_epsg

        if crs is None:
            try:
                crs = from_epsg(epsg)
            except TypeError:
                raise TypeError('Must set either crs or epsg for output.')

        df = self.dataframe(sql)
        geometry = df['geometry']

        if simplify:
            s = geometry.apply(lambda x: loads(x).simplify(simplify))
        else:
            s = geometry.apply(lambda x: loads(x))

        df['geometry'] = geopandas.GeoSeries(s)

        return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry')
Example #4
0
 def clean(self,value):
     ## check the geometry is valid
     try:
         ## try to load the form input from WKT
         geom = shapely_wkt.loads(value)
         ## convert to the url format
         ret = reverse_wkt(value)
     ## try to find the AOI code
     except ReadingError:
         try:
             ## return the meta code
             user_meta = models.UserGeometryMetadata.objects.filter(code=value)
             ## confirm it exists in the database
             if len(user_meta) == 0: raise(ValidationError)
             ## convert to shapely geometries. first, return the actual
             ## geometries
             geom = models.UserGeometryData.objects.filter(user_meta=user_meta)
             geom = [shapely_wkt.loads(geom.geom.wkt) for geom in geom]
             ## convert to format acceptable for the url
             ret = value
         except ValidationError:
             raise(ValidationError('Unable to parse WKT or locate unique geometry code.'))
     ## check that spatial operations will return data
     ogeom = shapely_wkt.loads(dataset.spatial_extent.wkt)
     igeom = reduce_to_multipolygon(geom)
     if not keep(igeom=ogeom,target=igeom):
         raise(ValidationError('Input geometry will return an empty intersection.'))
     
     return(ret)
Example #5
0
 def test_linestring_contained(self):
     geom = 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))'
     limit_to = LimitPolygonGeometry(loads(geom))
     test_geom = loads('LINESTRING(1 1, 9 9)')
     geom = limit_to.intersection(test_geom)
     # should return unmodified input geometry
     assert geom is test_geom
Example #6
0
def get_extent_by_resource_id(resource_id):
    session = DBSession()
    resource = session.query(Resource).filter(Resource.id == resource_id).first()

    extent = None
    for res in resource.children:
        if res.identity != VectorLayer.identity or (res.keyname and res.keyname.startswith('real_')):
            continue

        table_info = TableInfo.from_layer(res)
        table_info.setup_metadata(tablename=res._tablename)

        columns = [db.func.st_astext(db.func.st_extent(db.text('geom')).label('box'))]
        query = sql.select(columns=columns, from_obj=table_info.table)
        extent_str = session.connection().scalar(query)

        if extent_str:
            if not extent:
                extent = loads(extent_str).bounds
            else:
                new_extent = loads(extent_str).bounds
                extent = extent_union(extent, new_extent)

    session.close()

    return extent_buff(extent, 2000)
    def testStillSimple (self):
        # 147639834
        way= wkt.loads ('''
POLYGON ((662012.67         5329425.33,
          662013.5600000001 5329427.77,

          662020.5699999999 5329417.4,
          662024.14         5329413.74,
          662032.37         5329406.88,

          662034.6          5329405.05,
          662032.71         5329403.53,

          662027.59         5329407.64,
          662019.6800000001 5329414.2,
          662016.23         5329419.54,
          662012.67         5329425.33))''')
        skel= wkt.loads ('''
MULTILINESTRING ((662012.67 5329425.33, 662013.8453861615 5329425.448829351),
                 (662016.23 5329419.54, 662017.2442048641 5329420.179304471),
                 (662019.6800000001 5329414.2, 662020.7605526489 5329415.159279451),
                 (662027.59 5329407.64, 662028.3729275202 5329408.599534022),
                 (662032.71 5329403.53, 662032.7093037122 5329405.059336644),
                 (662034.6 5329405.05, 662032.7093037122 5329405.059336644),
                 (662032.37 5329406.88, 662031.6052956476 5329405.955395874),
                 (662024.14 5329413.74, 662023.2924827472 5329412.823571924),
                 (662020.5699999999 5329417.4, 662019.6022560901 5329416.611499771),
                 (662013.5600000001 5329427.77, 662013.8453861615 5329425.448829351),
                 (662013.8453861615 5329425.448829351, 662017.2442048641 5329420.179304471),
                 (662032.7093037122 5329405.059336644, 662031.6052956476 5329405.955395874),
                 (662017.2442048641 5329420.179304471, 662019.6022560901 5329416.611499771),
                 (662031.6052956476 5329405.955395874, 662028.3729275202 5329408.599534022),
                 (662028.3729275202 5329408.599534022, 662023.2924827472 5329412.823571924),
                 (662019.6022560901 5329416.611499771, 662020.7605526489 5329415.159279451),
                 (662023.2924827472 5329412.823571924, 662020.7605526489 5329415.159279451))''')
        medials= wkt.loads ('''
MULTILINESTRING ((662032.7093037122 5329405.059336644,
                  662031.6052956476 5329405.955395874,
                  662028.3729275202 5329408.599534022,
                  662023.2924827472 5329412.823571924,
                  662020.7605526489 5329415.159279451,
                  662019.6022560901 5329416.611499771,
                  662017.2442048641 5329420.179304471,
                  662013.8453861615 5329425.448829351))''')

        result= centerlines.extend_medials (way, skel, medials)

        # honestly, painstakingly calculated from the shape's coords!
        expected= wkt.loads ('''
MULTILINESTRING ((662033.655        5329404.29,
                  662032.7093037122 5329405.059336644,
                  662031.6052956476 5329405.955395874,
                  662028.3729275202 5329408.599534022,
                  662023.2924827472 5329412.823571924,
                  662020.7605526489 5329415.159279451,
                  662019.6022560901 5329416.611499771,
                  662017.2442048641 5329420.179304471,
                  662013.8453861615 5329425.448829351,
                  662013.115        5329426.55))''')
        self.assertEqual (result, expected, '\n%s\n%s' % (result, expected))
Example #8
0
    def vector2tiles(cls, vector, pcov=0.0, ptile=0.0, tilelist=None):
        """ Return matching tiles and coverage % for provided vector """
        import osr
        # set spatial filter on tiles vector to speedup
        ogrgeom = ogr.CreateGeometryFromWkt(vector.WKT())
        tvector = cls.vector()
        tlayer = tvector.layer
        vsrs = osr.SpatialReference(vector.Projection())
        trans = osr.CoordinateTransformation(vsrs, tlayer.GetSpatialRef())
        ogrgeom.Transform(trans)
        tlayer.SetSpatialFilter(ogrgeom)

        # geometry of desired site (transformed)
        geom = loads(ogrgeom.ExportToWkt())

        tiles = {}
        # step through tiles vector
        tlayer.ResetReading()
        feat = tlayer.GetNextFeature()
        while feat is not None:
            tgeom = loads(feat.GetGeometryRef().ExportToWkt())
            area = geom.intersection(tgeom).area
            if area != 0:
                tile = cls.feature2tile(feat)
                tiles[tile] = (area / geom.area, area / tgeom.area)
            feat = tlayer.GetNextFeature()
        remove_tiles = []
        if tilelist is None:
            tilelist = tiles.keys()
        for t in tiles:
            if (tiles[t][0] < (pcov / 100.0)) or (tiles[t][1] < (ptile / 100.0)) or t not in tilelist:
                remove_tiles.append(t)
        for t in remove_tiles:
            tiles.pop(t, None)
        return tiles
Example #9
0
def write_extent(bbox, nsmap):
    ''' Generate BBOX extent '''

    from shapely.wkt import loads

    if bbox is not None:
        extent = etree.Element(util.nspath_eval('gmd:extent', nsmap))
        ex_extent = etree.SubElement(extent, util.nspath_eval('gmd:EX_Extent', nsmap))
        ge = etree.SubElement(ex_extent, util.nspath_eval('gmd:geographicElement', nsmap))
        gbb = etree.SubElement(ge, util.nspath_eval('gmd:EX_GeographicBoundingBox', nsmap))
        west = etree.SubElement(gbb, util.nspath_eval('gmd:westBoundLongitude', nsmap))
        east = etree.SubElement(gbb, util.nspath_eval('gmd:eastBoundLongitude', nsmap))
        south = etree.SubElement(gbb, util.nspath_eval('gmd:southBoundLatitude', nsmap))
        north = etree.SubElement(gbb, util.nspath_eval('gmd:northBoundLatitude', nsmap))

        if bbox.find('SRID') != -1:  # it's EWKT; chop off 'SRID=\d+;'
            bbox2 = loads(bbox.split(';')[-1]).envelope.bounds
        else:
            bbox2 = loads(bbox).envelope.bounds

        etree.SubElement(west, util.nspath_eval('gco:Decimal', nsmap)).text = str(bbox2[0])
        etree.SubElement(south, util.nspath_eval('gco:Decimal', nsmap)).text = str(bbox2[1])
        etree.SubElement(east, util.nspath_eval('gco:Decimal', nsmap)).text = str(bbox2[2])
        etree.SubElement(north, util.nspath_eval('gco:Decimal', nsmap)).text = str(bbox2[3])
        return extent
    return None
    def testRectangle (self):
        # 147639890
        way= wkt.loads ('''
POLYGON ((661994.3 5329434.18, 661995.1899999999 5329436.47,
          662006.21 5329433.42, 662005.66 5329431.28,
          661994.3 5329434.18))''')
        skel= wkt.loads ('''
MULTILINESTRING ((661994.3 5329434.18, 661995.9207244834 5329435.013669997),
                 (662005.66 5329431.28, 662004.8565612 5329432.636764912),
                 (662006.21 5329433.42, 662004.8565612 5329432.636764912),
                 (661995.1899999999 5329436.47, 661995.9207244834 5329435.013669997),
                 (662004.8565612 5329432.636764912, 661995.9207244834 5329435.013669997))''')
        medials= wkt.loads ('''
MULTILINESTRING ((662004.8565612 5329432.636764912,
                  661995.9207244834 5329435.013669997))''')

        result= centerlines.extend_medials (way, skel, medials)

        # honestly, painstakingly calculated from the shape's coords!
        expected= wkt.loads ('''
MULTILINESTRING ((662005.935 5329432.35,
                  662004.8565612 5329432.636764912,
                  661995.9207244834 5329435.013669997,
                  661994.745 5329435.324999999))''')
        self.assertEqual (result, expected, '\n%s\n%s' % (result, expected))
def main(opts):
    pattern = loads(open(opts.input, "r").read())
    extent = loads(open(opts.extent, "r").read())

    if not contains.matches(extent.relate(pattern)):
        print "ERROR: pattern must be contained within the extent"
        return

    c = pattern.centroid
    (xs, ys) = extent.boundary.xy
    (minx, maxx, miny, maxy) = (min(xs) - c.x, max(xs) - c.x, min(ys) - c.y, max(ys) - c.y)

    outputFile = open(opts.output, "w")

    geoms = []

    while len(geoms) < opts.number:
        dx = random.uniform(minx, maxx)
        dy = random.uniform(miny, maxy)

        geom = translate(pattern, xoff=dx, yoff=dy)

        if contains.matches(extent.relate(geom)):
            # Check that it is within the extent
            overlap = False
            for g in geoms:
                if intersects.matches(g.relate(geom)):
                    overlap = True
            if overlap == False:
                geoms.append(geom)

    for geom in geoms:
        outputFile.write(dumps(geom) + "\n")
    outputFile.close()
Example #12
0
    def test_add_datasets_as_list(self):
        item = self.item
        geoms = []

        # create dataset 1 & 2 together
        vals1 = [(datetime.datetime(2014,1,1,12,0,0) + datetime.timedelta(days=i), i) for i in range(0,100)]
        dv1 = DataValues(vals1)
        geometry1 = 'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'
        geom = Geometry()
        geom.set_geom_from_wkt(geometry1)
        geom.type(ElementType.Polygon)
        geom.srs(utils.get_srs_from_epsg(self.srscode))
        geom.datavalues(dv1)
        geoms.append(geom)

        vals2 = [(datetime.datetime(2014,1,1,12,0,0) + datetime.timedelta(days=i), i) for i in range(0,100)]
        dv2 = DataValues(vals2)
        geometry2 = 'POLYGON ((40 20, 50 50, 30 50, 20 30, 40 20))'
        geom = Geometry()
        geom.set_geom_from_wkt(geometry2)
        geom.type(ElementType.Polygon)
        geom.srs(utils.get_srs_from_epsg(self.srscode))
        geom.datavalues(dv2)
        geoms.append(geom)

        # add both datasets
        item.add_geometry(geoms)

        datasets = item.get_all_datasets()
        self.assertTrue(len(datasets.keys()) == 2)
        for g,ts in datasets.iteritems():
            if g.geom().almost_equals(loads(geometry1),5):
                self.assertTrue(g.datavalues() == dv1)
            elif g.geom().almost_equals(loads(geometry2),5):
                self.assertTrue(g.datavalues() == dv2)
Example #13
0
def get_spatial_overlay_rank(target_geometry, query_geometry):
    """Derive spatial overlay rank for geospatial search as per Lanfear (2006)
    http://pubs.usgs.gov/of/2006/1279/2006-1279.pdf"""
    
    from shapely.geometry.base import BaseGeometry
    #TODO: Add those parameters to config file
    kt = 1.0
    kq = 1.0
    if target_geometry is not None and query_geometry is not None:
        try:
            q_geom = loads(query_geometry)
            t_geom = loads(target_geometry)
            Q = q_geom.area
            T = t_geom.area
            if any(item == 0.0 for item in [Q, T]):
                    LOGGER.warn('Geometry has no area')
                    return '0'
            X = t_geom.intersection(q_geom).area
            if kt == 1.0 and kq == 1.0:
                LOGGER.debug('Spatial Rank: %s', str((X/Q)*(X/T)))
                return str((X/Q)*(X/T))
            else:
                LOGGER.debug('Spatial Rank: %s', str(((X/Q)**kq)*((X/T)**kt)))
                return str(((X/Q)**kq)*((X/T)**kt))
        except Exception as err:
                LOGGER.warn('Cannot derive spatial overlay ranking %s', err)
                return '0'
    return '0'
Example #14
0
def test_spatial_geometry():
    poly = Polygon(((1, 2), (1, 3), (2, 3), (2, 2), (1, 2)))

    # with epsg undefined
    filter = spatial.Spatial(
        spatial.Spatial.GEOMETRY,
        MappedClass.geometry_column(),
        geometry=dumps(poly),
        tolerance=1
    )
    filter = filter.to_sql_expr()
    params = filter.compile().params
    assert str(filter) == '(expand(geomfromtext(:geomfromtext_1, :geomfromtext_2), :expand_1) && "table".geom) AND distance("table".geom, geomfromtext(:geomfromtext_1, :geomfromtext_2)) <= :distance_1'
    assert wkt.loads(params["geomfromtext_1"]).equals(poly)
    assert params["geomfromtext_2"] == 4326
    assert params["expand_1"] == 1
    assert params["distance_1"] == 1

    # with epsg defined
    filter = spatial.Spatial(
        spatial.Spatial.GEOMETRY,
        MappedClass.geometry_column(),
        geometry=dumps(poly),
        tolerance=1,
        epsg=900913
    )
    filter = filter.to_sql_expr()
    params = filter.compile().params
    assert str(filter) == '(expand(geomfromtext(:geomfromtext_1, :geomfromtext_2), :expand_1) && transform("table".geom, :transform_1)) AND distance(transform("table".geom, :transform_1), geomfromtext(:geomfromtext_1, :geomfromtext_2)) <= :distance_1'
    assert wkt.loads(params["geomfromtext_1"]).equals(poly)
    assert params["geomfromtext_2"] == 900913
    assert params["expand_1"] == 1
    assert params["transform_1"] == 900913
    assert params["distance_1"] == 1
Example #15
0
    def assign_ref_shapes(self):

        # dictionary that containes pairs rank: Polygon
        _shapes = {}

        # extract API region
        where_clause = "FIPS_API = '%s'" % (self.get_5d_api())
        table = 'GISCoreData.dbo.API_Regions_WM'
        polygon_WKT = queryWKT(table, where_clause)
        if polygon_WKT: 
            poly = loads(polygon_WKT)
            _shapes['Api_region'] = poly

        #exctract section
        where_clause = "StateCode LIKE '%s' AND TWN LIKE '%s' AND TWNDIR LIKE '%s' AND RNG LIKE '%s' AND RNGDIR LIKE '%s'AND SECTION LIKE '%s'" % (self.state_code, self.twnshp, self.twnshp_dir, self.range_, self.range_dir, self.section)
        table = 'GISCoreData.dbo.PLSS_SEC_%i' % self.mcode
        polygon_WKT = queryWKT(table, where_clause)
        if polygon_WKT: 
            poly = loads(polygon_WKT)
            _shapes['Section'] = poly

        # qqsection if exists
        if self.qqsection:
            where_clause = "TWN LIKE '%s' AND TWNDIR LIKE '%s' AND RNG LIKE '%s' AND RNGDIR LIKE '%s'AND SECTION LIKE '%s' AND qqsection like '%s%s'" % (self.twnshp, self.twnshp_dir, self.range_, self.range_dir, self.section, self.qsection, self.qqsection)
            table = 'GISCoreData.dbo.PLSS_QQ_%i' % self.mcode
            polygon_WKT = queryWKT(table, where_clause)
            if polygon_WKT: 
                poly = loads(polygon_WKT)
                _shapes['qqSection'] = poly

        #TODO: add more conditions

        self.reference_shapes = _shapes
Example #16
0
  def test_box_filter(self):
      from mapfish.protocol import create_geom_filter
      request = FakeRequest(
          {"bbox": "-180,-90,180,90", "tolerance": "1"}
      )
      filter = create_geom_filter(request, MappedClass)
      compiled_filter = filter.compile(engine)
      params = compiled_filter.params
      filter_str = _compiled_to_string(compiled_filter)
      eq_(filter_str, '(ST_Expand(GeomFromWKB(%(GeomFromWKB_1)s, %(GeomFromWKB_2)s), %(ST_Expand_1)s) && "table".geom) AND (ST_Expand("table".geom, %(ST_Expand_2)s) && GeomFromWKB(%(GeomFromWKB_3)s, %(GeomFromWKB_4)s)) AND ST_Distance("table".geom, GeomFromWKB(%(GeomFromWKB_5)s, %(GeomFromWKB_6)s)) <= %(ST_Distance_1)s')
      assert wkb.loads(str(params["GeomFromWKB_1"])).equals(wkt.loads('POLYGON ((-180 -90, -180 90, 180 90, 180 -90, -180 -90))'))
      assert params["GeomFromWKB_2"] == 4326
      assert params["ST_Expand_1"] == 1
      assert params["ST_Distance_1"] == 1
 
      request = FakeRequest(
          {"bbox": "-180,-90,180,90", "tolerance": "1", "epsg": "900913"}
      )
      filter = create_geom_filter(request, MappedClass)
      compiled_filter = filter.compile(engine)
      params = compiled_filter.params
      filter_str = _compiled_to_string(compiled_filter)
      eq_(filter_str, '(ST_Expand(GeomFromWKB(%(GeomFromWKB_1)s, %(GeomFromWKB_2)s), %(ST_Expand_1)s) && ST_Transform("table".geom, %(param_1)s)) AND (ST_Expand(ST_Transform("table".geom, %(param_2)s), %(ST_Expand_2)s) && GeomFromWKB(%(GeomFromWKB_3)s, %(GeomFromWKB_4)s)) AND ST_Distance(ST_Transform("table".geom, %(param_3)s), GeomFromWKB(%(GeomFromWKB_5)s, %(GeomFromWKB_6)s)) <= %(ST_Distance_1)s')
      assert wkb.loads(str(params["GeomFromWKB_1"])).equals(wkt.loads('POLYGON ((-180 -90, -180 90, 180 90, 180 -90, -180 -90))'))
      assert params["GeomFromWKB_2"] == 900913
      assert params["ST_Expand_1"] == 1
      assert params["param_1"] == 900913
      assert params["ST_Distance_1"] == 1
def plot_view(result):
    # Determine bounding box if no clipping boundary was supplied
    if not result['bbox']:
        result['bbox'] = bbox_of_view(result)

    ax = plt.subplot(111)
    # plt.box(on=None)
    m = Basemap(resolution='i',
                projection='merc',
                llcrnrlat=result['bbox']['ymin'],
                urcrnrlat=result['bbox']['ymax'],
                llcrnrlon=result['bbox']['xmin'],
                urcrnrlon=result['bbox']['xmax'],
                lat_ts=(result['bbox']['xmin'] +
                        result['bbox']['xmax']) / 2)
    m.drawcoastlines()

    try:
        for el in result['results']:
            vectors = get_vectors_from_postgis_map(m, loads(el['geom']))
            lines = LineCollection(vectors, antialiaseds=(1, ))
            lines.set_facecolors('black')
            lines.set_edgecolors('white')
            lines.set_linewidth(1)
            ax.add_collection(lines)
        m.fillcontinents(color='coral', lake_color='aqua')
    # If AttributeError assume geom_type 'Point', simply collect all
    # points and perform scatterplot
    except AttributeError:
        xy = m([loads(point['geom']).x for point in result['results']],
               [loads(point['geom']).y for point in result['results']])
        plt.scatter(xy[0], xy[1])

    plt.show()
Example #18
0
def test_spatial_within():
    # with epsg undefined
    filter = spatial.Spatial(
        spatial.Spatial.WITHIN,
        MappedClass.geometry_column(),
        lon=40, lat=5, tolerance=1
    )
    filter = filter.to_sql_expr()
    params = filter.compile().params
    assert str(filter) == '(expand(geomfromtext(:geomfromtext_1, :geomfromtext_2), :expand_1) && "table".geom) AND distance("table".geom, geomfromtext(:geomfromtext_1, :geomfromtext_2)) <= :distance_1'
    assert wkt.loads(params["geomfromtext_1"]).equals(wkt.loads('POINT (40 5)'))
    assert params["geomfromtext_2"] == 4326
    assert params["expand_1"] == 1
    assert params["distance_1"] == 1
 
    # with epsg defined
    filter = spatial.Spatial(
        spatial.Spatial.WITHIN,
        MappedClass.geometry_column(),
        lon=40, lat=5, tolerance=1, epsg=900913 
    )
    filter = filter.to_sql_expr()
    params = filter.compile().params
    assert str(filter) == '(expand(geomfromtext(:geomfromtext_1, :geomfromtext_2), :expand_1) && transform("table".geom, :transform_1)) AND distance(transform("table".geom, :transform_1), geomfromtext(:geomfromtext_1, :geomfromtext_2)) <= :distance_1'
    assert wkt.loads(params["geomfromtext_1"]).equals(wkt.loads('POINT (40 5)'))
    assert params["geomfromtext_2"] == 900913
    assert params["expand_1"] == 1
    assert params["transform_1"] == 900913
    assert params["distance_1"] == 1
Example #19
0
def test_spatial_box():
    # with epsg undefined
    filter = spatial.Spatial(
        spatial.Spatial.BOX,
        MappedClass.geometry_column(),
        box=[-180, -90, 180, 90],
        tolerance=1
    )
    filter = filter.to_sql_expr()
    params = filter.compile().params
    assert str(filter) == '(expand(geomfromtext(:geomfromtext_1, :geomfromtext_2), :expand_1) && "table".geom) AND distance("table".geom, geomfromtext(:geomfromtext_1, :geomfromtext_2)) <= :distance_1'
    assert wkt.loads(params["geomfromtext_1"]).equals(wkt.loads('POLYGON ((-180 -90, -180 90, 180 90, 180 -90, -180 -90))'))
    assert params["geomfromtext_2"] == 4326
    assert params["expand_1"] == 1
    assert params["distance_1"] == 1

    # with epsg defined
    filter = spatial.Spatial(
        spatial.Spatial.BOX,
        MappedClass.geometry_column(),
        box=[-180, -90, 180, 90],
        tolerance=1,
        epsg=900913
    )
    filter = filter.to_sql_expr()
    params = filter.compile().params
    assert str(filter) == '(expand(geomfromtext(:geomfromtext_1, :geomfromtext_2), :expand_1) && transform("table".geom, :transform_1)) AND distance(transform("table".geom, :transform_1), geomfromtext(:geomfromtext_1, :geomfromtext_2)) <= :distance_1'
    assert wkt.loads(params["geomfromtext_1"]).equals(wkt.loads('POLYGON ((-180 -90, -180 90, 180 90, 180 -90, -180 -90))'))
    assert params["geomfromtext_2"] == 900913
    assert params["expand_1"] == 1
    assert params["transform_1"] == 900913
    assert params["distance_1"] == 1
Example #20
0
def value_to_shape(value):
    """Transforms input into a Shapely object"""
    if not value:
        return wkt.loads('GEOMETRYCOLLECTION EMPTY')
    if isinstance(value, basestring):
        # We try to do this before parsing json exception
        # exception are ressource costly
        if '{' in value:
            geo_dict = geojson.loads(value)
            shape_to_return = asShape(geo_dict)
        elif value:
            # if value is empty sting we return False to be orm coherent,
            #may be we should return an empty shapely
            shape_to_return = wkt.loads(value)
        else:
            return False
    elif hasattr(value, 'wkt'):
        #Nasty but did not find equivalent of base string for shapely
        if 'shapely.geometry' in str(type(value)):
            shape_to_return = value
        else:
            shape_to_return = wkt.loads(value.wkt)
    else:
        raise TypeError('Write/create/search geo type must be wkt/geojson '
                        'string or must respond to wkt')
    return shape_to_return
Example #21
0
    def _filter_overlap(self, scenes, wkt_geometry, min_overlap=0):
        """Filter scenes based on the minimum overlap to the area of interest

        Args:
            scenes: List of scenes to filter
            wkt_geometry: Wkt Geometry representation of the area of interest
            min_overlap: Minimum overlap (0-1) in decimal format between scene geometry and area of interest

        Returns:
            Filtered list of scenes

        """
        site = loads(wkt_geometry)

        filtered = []

        for scene in scenes:
            footprint = loads(scene['footprint'])
            intersect = site.intersection(footprint)
            overlap = intersect.area / site.area
            # print str(overlap)
            if overlap > min_overlap or (
                    site.area / footprint.area > 1 and intersect.area / footprint.area > min_overlap):
                scene['_script_overlap'] = overlap * 100
                filtered.append(scene)

        return filtered
Example #22
0
    def test_serialize(self):
        parser = nrml_parsers.SourceModelParser(MIXED_SRC_MODEL)
        source_model = parser.parse()

        inp = models.Input(
            owner=helpers.default_user(),
            digest='fake',
            path='fake',
            input_type='source',
            size=0
        )
        inp.save()

        db_writer = source_input.SourceDBWriter(
            inp, source_model, MESH_SPACING, BIN_WIDTH, AREA_SRC_DISC
        )
        db_writer.serialize()

        # Check that everything was saved properly.

        # First, check the Input:
        # refresh the record
        [inp] = models.Input.objects.filter(id=inp.id)
        self.assertEquals(source_model.name, inp.name)

        # re-reparse the test file for comparisons:
        nrml_sources = list(
            nrml_parsers.SourceModelParser(MIXED_SRC_MODEL).parse()
        )

        parsed_sources = list(models.ParsedSource.objects.filter(input=inp.id))

        # compare pristine nrml sources to those stored in pickled form in the
        # database (by unpickling them first, of course):
        for i, ns in enumerate(nrml_sources):
            self.assertTrue(*helpers.deep_eq(ns, parsed_sources[i].nrml))

        # now check that the ParsedSource geometry is correct
        # it should be the same as the 'rupture-enclosing' geometry for the
        # nhlib representation of each source
        for i, (ns, ps) in enumerate(zip(nrml_sources, parsed_sources)):
            nhlib_src = source_input.nrml_to_nhlib(
                ns, MESH_SPACING, BIN_WIDTH, AREA_SRC_DISC
            )

            nhlib_poly = nhlib_src.get_rupture_enclosing_polygon()
            # nhlib tests the generation of wkt from a polygon, so we can trust
            # that it is well-formed.

            # Since we save the rupture enclosing polygon as geometry (not wkt)
            # in the database, the WKT we get back from the DB might have
            # slightly different coordinate values (a difference in precision).
            # shapely can help us compare two polygons (generated from wkt)
            # at a specific level of precision (default=6 digits after the
            # decimal point).
            expected_poly = wkt.loads(ps.polygon.wkt)
            actual_poly = wkt.loads(nhlib_poly.wkt)

            self.assertTrue(expected_poly.almost_equals(actual_poly))
Example #23
0
    def test_operations(self):
        point = Point(0.0, 0.0)

        # General geometry
        self.assertEqual(point.area, 0.0)
        self.assertEqual(point.length, 0.0)
        self.assertAlmostEqual(point.distance(Point(-1.0, -1.0)),
                               1.4142135623730951)

        # Topology operations

        # Envelope
        self.assertIsInstance(point.envelope, Point)

        # Intersection
        self.assertIsInstance(point.intersection(Point(-1, -1)),
                              GeometryCollection)

        # Buffer
        self.assertIsInstance(point.buffer(10.0), Polygon)
        self.assertIsInstance(point.buffer(10.0, 32), Polygon)

        # Simplify
        p = loads('POLYGON ((120 120, 121 121, 122 122, 220 120, 180 199, '
                  '160 200, 140 199, 120 120))')
        expected = loads('POLYGON ((120 120, 140 199, 160 200, 180 199, '
                         '220 120, 120 120))')
        s = p.simplify(10.0, preserve_topology=False)
        self.assertTrue(s.equals_exact(expected, 0.001))

        p = loads('POLYGON ((80 200, 240 200, 240 60, 80 60, 80 200),'
                  '(120 120, 220 120, 180 199, 160 200, 140 199, 120 120))')
        expected = loads(
            'POLYGON ((80 200, 240 200, 240 60, 80 60, 80 200),'
            '(120 120, 220 120, 180 199, 160 200, 140 199, 120 120))')
        s = p.simplify(10.0, preserve_topology=True)
        self.assertTrue(s.equals_exact(expected, 0.001))

        # Convex Hull
        self.assertIsInstance(point.convex_hull, Point)

        # Differences
        self.assertIsInstance(point.difference(Point(-1, 1)), Point)

        self.assertIsInstance(point.symmetric_difference(Point(-1, 1)),
                              MultiPoint)

        # Boundary
        self.assertIsInstance(point.boundary, GeometryCollection)

        # Union
        self.assertIsInstance(point.union(Point(-1, 1)), MultiPoint)

        self.assertIsInstance(point.representative_point(), Point)

        self.assertIsInstance(point.centroid, Point)

        # Relate
        self.assertEqual(point.relate(Point(-1, -1)), 'FF0FFF0F2')
Example #24
0
def query_spatial(bbox_data_wkt, bbox_input_wkt, predicate, distance):
    """Perform spatial query

    Parameters
    ----------
    bbox_data_wkt: str
        Well-Known Text representation of the data being queried
    bbox_input_wkt: str
        Well-Known Text representation of the input being queried
    predicate: str
        Spatial predicate to use in query
    distance: int or float or str
        Distance parameter for when using either of ``beyond`` or ``dwithin``
        predicates.

    Returns
    -------
    str
        Either ``true`` or ``false`` depending on the result of the spatial
        query

    Raises
    ------
    RuntimeError
        If an invalid predicate is used

    """

    try:
        bbox1 = loads(bbox_data_wkt.split(';')[-1])
        bbox2 = loads(bbox_input_wkt)
        if predicate == 'bbox':
            result = bbox1.intersects(bbox2)
        elif predicate == 'beyond':
            result = bbox1.distance(bbox2) > float(distance)
        elif predicate == 'contains':
            result = bbox1.contains(bbox2)
        elif predicate == 'crosses':
            result = bbox1.crosses(bbox2)
        elif predicate == 'disjoint':
            result = bbox1.disjoint(bbox2)
        elif predicate == 'dwithin':
            result = bbox1.distance(bbox2) <= float(distance)
        elif predicate == 'equals':
            result = bbox1.equals(bbox2)
        elif predicate == 'intersects':
            result = bbox1.intersects(bbox2)
        elif predicate == 'overlaps':
            result = bbox1.intersects(bbox2) and not bbox1.touches(bbox2)
        elif predicate == 'touches':
            result = bbox1.touches(bbox2)
        elif predicate == 'within':
            result = bbox1.within(bbox2)
        else:
            raise RuntimeError(
                'Invalid spatial query predicate: %s' % predicate)
    except (AttributeError, ValueError, ReadingError):
        result = False
    return "true" if result else "false"
Example #25
0
 def test_linestring_mixed_result(self):
     geom = 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))'
     limit_to = LimitPolygonGeometry(loads(geom))
     geom = limit_to.intersection(loads('LINESTRING(0 0, 5 -10, 5 10)'))
     # point and linestring, point not returned
     assert isinstance(geom, list)
     assert len(geom) == 1
     assert geom[0].almost_equals(loads('LINESTRING(5 0, 5 10)'))
Example #26
0
    def get_route_data(self, start_coords, end_coords):
        """Executes function for getting pgrouting ways vertices from provided
        coordinates, executes function for getting route with pgrouting.
        Converts route to shapely and ogr geometry, creates buffer around the
        route and executes function for calculating numerical attribute data
        like driving time and length.

        :arg start_coords: dictionary with route starting location coordinates,
            e.g. {"x": 15.5, "y": 45.5}
        :type start_coords: dictionary

        :arg end_coords: dictionary with route ending location coordinates,
            e.g. {"x": 15.5, "y": 45.5}
        :type end_coords: dictionary

        :returns: dictionary with pgrouting route data
        :rtype: dictionary

        """
        start_vertex_id, end_vertex_id = self.get_way_vertices_from_coords(
            start_coords=start_coords,
            end_coords=end_coords
        )

        raw_route, colnames = self.get_route_from_pgrouting(
            start_vertex_id=start_vertex_id,
            end_vertex_id=end_vertex_id
        )

        route_ogr = self.create_multiline_from_linesegments(
            raw_route=raw_route,
            colnames=colnames
        )
        route_shapely = shapely_wkt.loads(route_ogr.ExportToWkt())

        route_buffer_ogr = UTILITY.create_route_buffer(route=route_ogr)
        route_buffer_shapely = shapely_wkt.loads(
            route_buffer_ogr.ExportToWkt()
        )

        driving_time = self.sum_cost(
            raw_route=raw_route,
            colnames=colnames
        )

        route_length = self.sum_route_length(
            raw_route=raw_route,
            colnames=colnames
        )

        return {
            'route_ogr': route_ogr,
            'route_shapely': route_shapely,
            'route_buffer_ogr': route_buffer_ogr,
            'route_buffer_shapely': route_buffer_shapely,
            'driving_time': driving_time,
            'len': route_length,
        }
Example #27
0
 def test_polygon_multipolygon_result(self):
     geom = 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))'
     limit_to = LimitPolygonGeometry(loads(geom))
     test_geom = loads('POLYGON((0 -10, 0 5, 2.5 -5, 5 -1, 7.5 -5, 10 5, 10 -10, 0 -10))')
     geom = limit_to.intersection(test_geom)
     # similar to above, but point does not touch the box, so we should get
     # a single multipolygon
     assert geom.almost_equals(loads(
         'MULTIPOLYGON(((1.25 0, 0 0, 0 5, 1.25 0)),'
         '((10 0, 8.75 0, 10 5, 10 0)))'))
Example #28
0
 def test_polygon_mixed_result(self):
     geom = 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))'
     limit_to = LimitPolygonGeometry(loads(geom))
     test_geom = loads('POLYGON((0 -10, 0 5, 2.5 -5, 5 0, 7.5 -5, 10 5, 10 -10, 0 -10))')
     geom = limit_to.intersection(test_geom)
     # point and two polygons, point not returned
     assert isinstance(geom, list)
     assert len(geom) == 2
     assert geom[0].almost_equals(loads('POLYGON((1.25 0, 0 0, 0 5, 1.25 0))'))
     assert geom[1].almost_equals(loads('POLYGON((10 0, 8.75 0, 10 5, 10 0))'))
 def get_st_closest_point(self, a, b):
     result = self.engine.execute('''SELECT
         ST_AsText(ST_ClosestPoint(foo.a, foo.b)) AS a_b,
         ST_AsText(ST_ClosestPoint(foo.b, foo.a)) As b_a
         FROM (
             SELECT '%s'::geometry As a, '%s'::geometry As b
             ) AS foo;''' % (
         a.wkt, b.wkt
     )).fetchall()
     return wkt.loads(result[0][0]), wkt.loads(result[0][1])
Example #30
0
    def test_get_field_write_target(self):
        p1 = 'Polygon ((-116.94238466549290933 52.12861711455555991, -82.00526805089285176 61.59075286434307372, ' \
             '-59.92695130138864101 31.0207758265680269, -107.72286778108455962 22.0438778075388484, ' \
             '-122.76523743459291893 37.08624746104720771, -116.94238466549290933 52.12861711455555991))'
        p2 = 'Polygon ((-63.08099655131782413 21.31602121140134898, -42.70101185946779765 9.42769680782217279, ' \
             '-65.99242293586783603 9.912934538580501, -63.08099655131782413 21.31602121140134898))'
        p1 = wkt.loads(p1)
        p2 = wkt.loads(p2)

        mp1 = MultiPolygon([p1, p2])
        mp2 = mp1.buffer(0.1)
        geoms = [mp1, mp2]
        gvar = GeometryVariable(name='gc', value=geoms, dimensions='elementCount')
        gc = gvar.convert_to(node_dim_name='n_node')
        field = gc.parent
        self.assertEqual(field.grid.node_dim.name, 'n_node')

        actual = DriverESMFUnstruct._get_field_write_target_(field)
        self.assertEqual(field.grid.node_dim.name, 'n_node')
        self.assertNotEqual(id(field), id(actual))
        self.assertEqual(actual['numElementConn'].dtype, np.int32)
        self.assertEqual(actual['elementConn'].dtype, np.int32)
        self.assertNotIn(field.grid.cindex.name, actual)
        self.assertEqual(actual['nodeCoords'].dimensions[0].name, 'nodeCount')

        path = self.get_temporary_file_path('foo.nc')
        actual.write(path)

        try:
            import ESMF
        except ImportError:
            pass
        else:
            _ = ESMF.Mesh(filename=path, filetype=ESMF.FileFormat.ESMFMESH)

        path2 = self.get_temporary_file_path('foo2.nc')
        driver = DriverKey.NETCDF_ESMF_UNSTRUCT
        field.write(path2, driver=driver)

        # Test the polygons are equivalent when read from the ESMF unstructured file.
        rd = ocgis.RequestDataset(path2, driver=driver)
        self.assertEqual(rd.driver.key, driver)
        efield = rd.get()
        self.assertEqual(efield.driver.key, driver)
        grid_actual = efield.grid
        self.assertEqual(efield.driver.key, driver)
        self.assertEqual(grid_actual.parent.driver.key, driver)
        self.assertEqual(grid_actual.x.ndim, 1)

        for g in grid_actual.archetype.iter_geometries():
            self.assertPolygonSimilar(g[1], geoms[g[0]])

        ngv = grid_actual.archetype.convert_to()
        self.assertIsInstance(ngv, GeometryVariable)
Example #31
0
def test_error_handler_wrong_type():
    with pytest.raises(TypeError):
        loads(1)
Example #32
0
def geom_from_wkt(data, srid=None):
    g = wkt.loads(data)
    g.__class__ = globals()[g.__class__.__name__]
    g._srid = srid
    return g
Example #33
0
 def __init__(self, maz: int, polygon: str):
     self.maz = maz
     self.polygon = Polygon(loads(polygon))
Example #34
0
simpledec = re.compile(r"\d*\.\d+")


def mround(match):
    return "{:.0f}".format(float(match.group()))


fids = ['cyano_daymap_20190717.shp']

for fid in fids:
    fid = fid.replace('union_bloom', 'daymap')
    print(fid)
    shapes = gp.read_file(fid)
    shapes.geometry = shapes.geometry.apply(
        lambda x: loads(re.sub(simpledec, mround, x.wkt)))
    shapes.to_file(fid)

# def change_coord(tup):
#     return QgsPoint(int(tup[0]), int(tup[1]))
#
#
# def convert_polygon_type(geom):
#     new_points = []
#     if geom.wkbType() == QgsWKBTypes.Polygon:
#         polys = geom.asPolygon()
#         for p in polys:
#             if isinstance(p, QgsPoint):
#                 point = change_coord(p)
#                 new_points.append(point)
#             elif isinstance(p, list):
Example #35
0
def load_graphml(filename, folder=None):
    """
    Load a GraphML file from disk and convert the node/edge attributes to correct data types.

    Parameters
    ----------
    filename : string
        the name of the graphml file (including file extension)
    folder : string
        the folder containing the file, if None, use default data folder

    Returns
    -------
    networkx multidigraph
    """
    start_time = time.time()

    # read the graph from disk
    if folder is None:
        folder = globals.data_folder
    path = '{}/{}'.format(folder, filename)
    G = nx.MultiDiGraph(nx.read_graphml(path, node_type=int))

    # convert graph crs attribute from saved string to correct dict data type
    G.graph['crs'] = ast.literal_eval(G.graph['crs'])

    if 'streets_per_node' in G.graph:
        G.graph['streets_per_node'] = ast.literal_eval(G.graph['streets_per_node'])

    # convert numeric node tags from string to numeric data types
    log('Converting node and edge attribute data types')
    for node, data in G.nodes(data=True):
        data['osmid'] = int(data['osmid'])
        data['x'] = float(data['x'])
        data['y'] = float(data['y'])

    # convert numeric, bool, and list node tags from string to correct data types
    for u, v, key, data in G.edges(keys=True, data=True):

        # first parse oneway to bool and length to float - they should always have only 1 value each
        data['oneway'] = ast.literal_eval(data['oneway'])
        data['length'] = float(data['length'])

        # these attributes might have a single value, or a list if edge's topology was simplified
        for attr in ['highway', 'name', 'bridge', 'tunnel', 'lanes', 'ref', 'maxspeed', 'service', 'access', 'area', 'landuse', 'width', 'est_width']:
            # if this edge has this attribute, and it starts with '[' and ends with ']', then it's a list to be parsed
            if attr in data and data[attr][0] == '[' and data[attr][-1] == ']':
                # convert the string list to a list type, else leave as single-value string
                data[attr] = ast.literal_eval(data[attr])

        # osmid might have a single value or a list, but if single value, then parse int
        if 'osmid' in data:
            if data['osmid'][0] == '[' and data['osmid'][-1] == ']':
                data['osmid'] = ast.literal_eval(data['osmid'])
            else:
                data['osmid'] = int(data['osmid'])

        # if geometry attribute exists, load the string as well-known text to shapely LineString
        if 'geometry' in data:
            data['geometry'] = wkt.loads(data['geometry'])

    # remove node_default and edge_default metadata keys if they exist
    if 'node_default' in G.graph:
        del G.graph['node_default']
    if 'edge_default' in G.graph:
        del G.graph['edge_default']

    log('Loaded graph with {:,} nodes and {:,} edges in {:,.2f} seconds from "{}"'.format(len(list(G.nodes())),
                                                                                          len(list(G.edges())),
                                                                                          time.time()-start_time,
                                                                                          path))
    return G
Example #36
0
File: io.py Project: vargeus/osmnx
def _convert_edge_attr_types(G, node_type):
    """
    Convert graph edges' attributes' types from string to numeric.

    Parameters
    ----------
    G : networkx.MultiDiGraph
        input graph
    node_type : type
        convert osmid to this type

    Returns
    -------
    G : networkx.MultiDiGraph
    """
    # convert numeric, bool, and list edge attributes from string to correct data types
    for _, _, data in G.edges(data=True, keys=False):

        # parse length to float: should always have only 1 value
        data["length"] = float(data["length"])

        try:
            data["oneway"] = ast.literal_eval(data["oneway"])
        except KeyError:
            # may lack oneway if settings.all_oneway=True when you created graph
            pass
        except ValueError:
            # may have values it can't eval if settings.all_oneway=True
            pass

        # parse grade attrs to float: should always have only 1 value each
        if "grade" in data:
            data["grade"] = float(data["grade"])
        if "grade_abs" in data:
            data["grade_abs"] = float(data["grade_abs"])

        # these attributes might have a single value, or a list if edge's
        # topology was simplified
        for attr in [
            "highway",
            "name",
            "bridge",
            "tunnel",
            "lanes",
            "ref",
            "maxspeed",
            "service",
            "access",
            "area",
            "landuse",
            "width",
            "est_width",
        ]:
            # if this edge has this attribute, and it starts with '[' and ends
            # with ']', then it's a list to be parsed
            if attr in data and data[attr].startswith("[") and data[attr].endswith("]"):
                # try to convert the string list to a list type, else leave as
                # single-value string (and leave as string if error)
                try:
                    data[attr] = ast.literal_eval(data[attr])
                except Exception:
                    pass

        # osmid might have a single value or a list
        if "osmid" in data:
            if data["osmid"].startswith("[") and data["osmid"].endswith("]"):
                # if it's a list, eval list then convert each element to node_type
                data["osmid"] = [node_type(i) for i in ast.literal_eval(data["osmid"])]
            else:
                # if it's not a list, convert it to the node_type
                data["osmid"] = node_type(data["osmid"])

        # if geometry attribute exists, load the string as well-known text to
        # shapely LineString
        if "geometry" in data:
            data["geometry"] = wkt.loads(data["geometry"])

    return G
Example #37
0
    def download_quicklook(self, product_uuid, target_dir):
        """Downloads a quicklook of the satellite image to a target directory for a specific product_id.
        It performs a very rough geocoding of the quicklooks by shifting the image to the location of the footprint.

        :param product_uuid: UUID of the satellite image product (String).
        :param target_dir: Target directory that holds the downloaded images (String, Path)
        """
        if isinstance(target_dir, str):
            target_dir = Path(target_dir)

        if self.src == Datahub.STAC_local or self.src == Datahub.STAC_API:
            raise NotImplementedError(
                f"download_quicklook not supported for {self.src}. It is much easier to get the asset yourself now, "
                f"when it is a COG you can read in an overview.")

        elif self.src == Datahub.EarthExplorer:
            # query EarthExplorer for url, srcid and bounds of product
            meta_src = self.api.request(
                "metadata",
                **{
                    "datasetName": self.src.value,
                    "entityIds": [product_uuid],
                },
            )
            url = meta_src[0]["browseUrl"]
            bounds = geometry.shape(meta_src[0]["spatialFootprint"]).bounds
            product_srcid = meta_src[0]["displayId"]

        else:
            # query Scihub for url, srcid and bounds of product
            meta_src = self.api.get_product_odata(product_uuid)
            url = "https://scihub.copernicus.eu/apihub/odata/v1/Products('{}')/Products('Quicklook')/$value".format(
                product_uuid)
            bounds = wkt.loads(meta_src["footprint"]).bounds
            product_srcid = meta_src["title"]

        # download quicklook and crop no-data borders
        response = requests.get(url, auth=(self.user, self.pw))
        quicklook = np.asarray(Image.open(BytesIO(response.content)))
        # use threshold of 50 to overcome noise in JPEG compression
        xs, ys, zs = np.where(quicklook >= 50)
        quicklook = quicklook[min(xs):max(xs) + 1,
                              min(ys):max(ys) + 1,
                              min(zs):max(zs) + 1]
        Image.fromarray(quicklook).save(
            target_dir.joinpath(product_srcid + ".jpg"))

        # geocode quicklook
        quicklook_size = (quicklook.shape[1], quicklook.shape[0])
        dist_x = geometry.Point(bounds[0], bounds[1]).distance(
            geometry.Point(bounds[2], bounds[1])) / quicklook_size[0]
        dist_y = geometry.Point(bounds[0], bounds[1]).distance(
            geometry.Point(bounds[0], bounds[3])) / quicklook_size[1]
        ul_x, ul_y = bounds[0], bounds[3]
        with open(target_dir.joinpath(product_srcid + ".jpgw"),
                  "w") as out_file:
            out_file.write(str(dist_x) + "\n")
            out_file.write(str(0.0) + "\n")
            out_file.write(str(0.0) + "\n")
            out_file.write(str(-dist_y) + "\n")
            out_file.write(str(ul_x) + "\n")
            out_file.write(str(ul_y) + "\n")
Example #38
0
 def from_wkt(cls, value):
     # This looks-like-wkt business keeps Shapely from printing a
     # warning for non-WKT strings, which it does even though it
     # raises an exception.
     looks_like_wkt = value.startswith(cls.__name__.upper())
     return cls(wkt.loads(value)) if looks_like_wkt else None
Example #39
0
def brutusBKoverlapp(mittfilter=None, offisiell=False, kunEnTypeBK=None):
    """
    Finner de bruksklasse-objektene som overlapper med bruer. 

    finner overlapp mellom bruenes vegsegmenter og vegsegmenter for BK-typene "Normaltransport", "Spesialtransport" og "12/100 vegnett"
    Bruk evt nøkkelord kunEnTypeBK for å plukke ut en av disse. 
    
    Bruk nøkkelord offisiell=True for å hente uoffisielle BK-verdier (krever innlogging)

    Brusøket kan snevres inn  med nøkkelord mittfilter={}, ref dokumentasjon for spørring etter vegobjekter 
    https://nvdbapiles-v3.atlas.vegvesen.no/dokumentasjon/openapi/#/Vegobjekter/get_vegobjekter__vegobjekttypeid_ 

    ARGUMENTS: 
        None

    KEYWORDS
        mittfilter=None (default) Valgfritt dictionary med eventuelle filtre for bruenes egenskaper, veg- eller områdefilter m.m. 
            Se nvdbapiv3.nvdbFagdata eller API dokumentasjon 
            https://nvdbapiles-v3.atlas.vegvesen.no/dokumentasjon/openapi/#/Vegobjekter/get_vegobjekter__vegobjekttypeid_ 

        offisiell=False (default) | True. Angir om vi skal bruke offisielle eller uoffisielle bruksklassedata (krever innlogging)

        kunEnTypeBK = None (default) eller en tekststreng som angir hvilke type bruksklasse vi skal hente. Lovlige verdier: 
                None        : Henter alle BK-variantene
                'normal'    : Kun Bruksklasse, normaltransport 
                'spesial'   : Kun Bruksklasse, Spesialtransport
                'tolv65'    : Kun Bruksklasse, 12/65 mobilkran m.m.
                'tolv100'   : Kun Bruksklasse, 12/100 vegnett
    
    RETURNS
        geopandas geodataframe
    """

    filteret = {}
    # Kopierer mittfilter for å unngå sideefekter
    if mittfilter:
        filteret = deepcopy(mittfilter)

    # Kun Brukategori = vegbru
    if 'egenskap' in filteret:
        filteret['egenskap'] = '1263=7304 and ' + filteret['egenskap']
    else:
        filteret['egenskap'] = '1263=7304'

    brusok = nvdbapiv3.nvdbFagdata(60)
    # brusok.filter( filteret )
    bruer = pd.DataFrame(brusok.to_records(relasjoner=False))
    bruer = bruer[bruer['trafikantgruppe'] == 'K']

    #  BRUKSLAST/TOTALVEKT i utdraget.              I UTDRAG. Antar vi bruker BK normaltransport. Skal sammenligne BK og totalvekt.
    # Veggruppe - fra spesialtransport              I UTDRAG  Spesialtransport
    # SV12/65 godkjent J/N                          I UTDRAG  BK 12/65 (finnes eller finnes ikke)
    # SV12/00 godjkent J/N                          I UTDRAG  BK 12/100 (finnes / finnes ikke)
    # SV 12/100 restriksjoner (sakte/sentrisk etc)  - nikx
    # SV 12/100 avstand                             - niks

    if offisiell:
        normalprefiks = 'bk904_'
        spesialprefix = 'bk902_'
        tolv65prefix = 'bk891_'
        tolv100prefix = 'bk893_'

        normalsok = nvdbapiv3.nvdbFagdata(904)  #Normal
        spesialsok = nvdbapiv3.nvdbFagdata(902)  #Spesial
        tolv65sok = nvdbapiv3.nvdbFagdata(891)  # 12/65
        tolv100sok = nvdbapiv3.nvdbFagdata(893)  # 12/100 vegnett

    else:
        normalprefiks = 'bk905_'
        spesialprefix = 'bk903_'
        tolv65prefix = 'bk892_'
        tolv100prefix = 'bk894_'

        normalsok = nvdbapiv3.nvdbFagdata(905)
        spesialsok = nvdbapiv3.nvdbFagdata(903)
        tolv65sok = nvdbapiv3.nvdbFagdata(892)
        tolv100sok = nvdbapiv3.nvdbFagdata(894)

        normalsok.forbindelse.login(miljo='prodles', username='******')
        spesialsok.forbindelse = normalsok.forbindelse
        tolv65sok.forbindelse = normalsok.forbindelse
        tolv100sok.forbindelse = normalsok.forbindelse

    normalsok.filter({'overlapp': '60'})
    spesialsok.filter({'overlapp': '60'})
    tolv65sok.filter({'overlapp': '60'})
    tolv100sok.filter({'overlapp': '60'})

    normal = pd.DataFrame(normalsok.to_records(relasjoner=False))
    spesial = pd.DataFrame(spesialsok.to_records(relasjoner=False))
    tolv65 = pd.DataFrame(tolv65sok.to_records(relasjoner=False))
    tolv100 = pd.DataFrame(tolv100sok.to_records(relasjoner=False))

    normal['bktall'] = normal['Bruksklasse'].apply(
        lambda x: splitBruksklasse_vekt(x)[0])
    normal['bkvekt'] = normal['Bruksklasse'].apply(
        lambda x: splitBruksklasse_vekt(x)[1])
    # normal['Maks vogntoglengde'] = normal['Maks vogntoglengde'].apply( lambda x : float( x.replace( ',', '.') ) if '.' in x )

    # 'Vegliste gjelder alltid',
    sletteliste = [
        'objekttype', 'nvdbId', 'versjon', 'startdato', 'detaljnivå',
        'typeVeg', 'kommune', 'fylke', 'veglenkeType', 'segmentlengde',
        'geometri', 'vegkategori', 'fase', 'vegnummer', 'adskilte_lop',
        'trafikantgruppe', 'Strekningsbeskrivelse'
    ]

    slettelliste_normal = ['Bruksklasse vinter']

    normal.drop(columns=sletteliste + slettelliste_normal, inplace=True)
    spesial.drop(columns=sletteliste + slettelliste_normal, inplace=True)
    tolv65.drop(columns=sletteliste, inplace=True)
    tolv100.drop(columns=sletteliste, inplace=True)

    bruprefix = 'bru_'
    bruer = bruer.add_prefix(bruprefix)
    brucol_nvdbId = bruprefix + 'nvdbId'
    sluttresultat = None
    # Overlapp bruer - normaltransport
    if kunEnTypeBK == None or kunEnTypeBK == 'normal':
        mellomresultat2 = nvdbgeotricks.finnoverlapp(bruer,
                                                     normal,
                                                     prefixA=bruprefix,
                                                     prefixB=normalprefiks,
                                                     join='left')
        mellomresultat2.drop(columns=[
            normalprefiks + 'veglenkesekvensid',
            normalprefiks + 'startposisjon', normalprefiks + 'sluttposisjon'
        ],
                             inplace=True)
        sluttresultat = mellomresultat2.copy()
    else:
        mellomresultat2 = bruer

    # Overlapp bruer - spesial
    if kunEnTypeBK == None or kunEnTypeBK == 'spesial':
        mellomresultat4 = nvdbgeotricks.finnoverlapp(mellomresultat2,
                                                     spesial,
                                                     prefixA=bruprefix,
                                                     prefixB=spesialprefix,
                                                     join='left')
        mellomresultat4.drop(columns=[
            spesialprefix + 'veglenkesekvensid',
            spesialprefix + 'startposisjon', spesialprefix + 'sluttposisjon'
        ],
                             inplace=True)
        sluttresultat = mellomresultat4.copy()
    else:
        mellomresultat4 = bruer

    # Overlapp bruer - 12/65
    if kunEnTypeBK == None or kunEnTypeBK == 'tolv65':
        mellomresultat6 = nvdbgeotricks.finnoverlapp(mellomresultat4,
                                                     tolv65,
                                                     prefixA=bruprefix,
                                                     prefixB=tolv65prefix,
                                                     join='left')
        mellomresultat6.drop(columns=[
            tolv65prefix + 'veglenkesekvensid', tolv65prefix + 'startposisjon',
            tolv65prefix + 'sluttposisjon'
        ],
                             inplace=True)
        sluttresultat = mellomresultat6.copy()
    else:
        mellomresultat6 = bruer

    # Overlapp bruer - 12/100
    if kunEnTypeBK == None or kunEnTypeBK == 'tolv100':
        mellomresultat8 = nvdbgeotricks.finnoverlapp(mellomresultat6,
                                                     tolv100,
                                                     prefixA=bruprefix,
                                                     prefixB=tolv100prefix,
                                                     join='left')
        mellomresultat8.drop(columns=[
            tolv100prefix + 'veglenkesekvensid',
            tolv100prefix + 'startposisjon', tolv100prefix + 'sluttposisjon'
        ],
                             inplace=True)
        sluttresultat = mellomresultat8.copy()

    # Lager geodataframe
    bruer['geometry'] = bruer['bru_geometri'].apply(lambda x: wkt.loads(x))
    bruer = gpd.GeoDataFrame(bruer, geometry='geometry', crs=5973)

    sluttresultat['geometry'] = sluttresultat['bru_geometri'].apply(
        lambda x: wkt.loads(x))
    minGdf = gpd.GeoDataFrame(sluttresultat, geometry='geometry', crs=5973)

    return minGdf
Example #40
0
    def warp(self, dem=None, proj="EPSG:4326", **kwargs):
        """Delayed warp across an entire AOI or Image

        Creates a new dask image by deferring calls to the warp_geometry on chunks

        Args:
            dem (ndarray): optional. A DEM for warping to specific elevation planes
            proj (str): optional. An EPSG proj string to project the image data into ("EPSG:32612")

        Returns:
            daskarray: a warped image as deferred image array
        """
        try:
            img_md = self.rda.metadata["image"]
            x_size = img_md["tileXSize"]
            y_size = img_md["tileYSize"]
        except (AttributeError, KeyError):
            x_size = kwargs.get("chunk_size", 256)
            y_size = kwargs.get("chunk_size", 256)

        # Create an affine transform to convert between real-world and pixels
        if self.proj is None:
            from_proj = "EPSG:4326"
        else:
            from_proj = self.proj

        try:
            # NOTE: this only works on images that have rda rpcs metadata
            center = wkt.loads(self.rda.metadata["image"]["imageBoundsWGS84"]).centroid
            g = box(*center.buffer(self.rda.metadata["rpcs"]["gsd"] / 2).bounds)
            tfm = partial(pyproj.transform, pyproj.Proj(init="EPSG:4326"), pyproj.Proj(init=proj))
            gsd = kwargs.get("gsd", ops.transform(tfm, g).area ** 0.5)
            current_bounds = wkt.loads(self.rda.metadata["image"]["imageBoundsWGS84"]).bounds
        except (AttributeError, KeyError, TypeError):
            tfm = partial(pyproj.transform, pyproj.Proj(init=self.proj), pyproj.Proj(init=proj))
            gsd = kwargs.get("gsd", (ops.transform(tfm, shape(self)).area / (self.shape[1] * self.shape[2])) ** 0.5)
            current_bounds = self.bounds

        tfm = partial(pyproj.transform, pyproj.Proj(init=from_proj), pyproj.Proj(init=proj))
        itfm = partial(pyproj.transform, pyproj.Proj(init=proj), pyproj.Proj(init=from_proj))
        output_bounds = ops.transform(tfm, box(*current_bounds)).bounds
        gtf = Affine.from_gdal(output_bounds[0], gsd, 0.0, output_bounds[3], 0.0, -1 * gsd)

        ll = ~gtf * (output_bounds[:2])
        ur = ~gtf * (output_bounds[2:])
        x_chunks = int((ur[0] - ll[0]) / x_size) + 1
        y_chunks = int((ll[1] - ur[1]) / y_size) + 1

        num_bands = self.shape[0]

        try:
            dtype = img_md["dataType"]
        except:
            dtype = 'uint8'

        daskmeta = {
            "dask": {},
            "chunks": (num_bands, y_size, x_size),
            "dtype": dtype,
            "name": "warp-{}".format(self.name),
            "shape": (num_bands, y_chunks * y_size, x_chunks * x_size)
        }

        def px_to_geom(xmin, ymin):
            xmax = int(xmin + x_size)
            ymax = int(ymin + y_size)
            bounds = list((gtf * (xmin, ymax)) + (gtf * (xmax, ymin)))
            return box(*bounds)

        full_bounds = box(*output_bounds)

        dasks = []
        if isinstance(dem, GeoDaskImage):
            if dem.proj != proj:
                dem = dem.warp(proj=proj, dem=dem)
            dasks.append(dem.dask)

        for y in range(y_chunks):
            for x in range(x_chunks):
                xmin = x * x_size
                ymin = y * y_size
                geometry = px_to_geom(xmin, ymin)
                daskmeta["dask"][(daskmeta["name"], 0, y, x)] = (self._warp, geometry, gsd, dem, proj, dtype, 5)
        daskmeta["dask"], _ = optimization.cull(HighLevelGraph.merge(daskmeta["dask"], *dasks),
                                                list(daskmeta["dask"].keys()))

        gi = mapping(full_bounds)
        gt = AffineTransform(gtf, proj)
        image = GeoDaskImage(daskmeta, __geo_interface__=gi, __geo_transform__=gt)
        return image[box(*output_bounds)]
Example #41
0
def get_exposure(oqparam):
    """
    Read the full exposure in memory and build a list of
    :class:`openquake.risklib.asset.Asset` instances.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :returns:
        an :class:`Exposure` instance
    """
    out_of_region = 0
    if oqparam.region_constraint:
        region = wkt.loads(oqparam.region_constraint)
    else:
        region = None
    all_cost_types = set(oqparam.all_cost_types)
    fname = oqparam.inputs['exposure']
    exposure, assets_node = _get_exposure(fname, all_cost_types)
    relevant_cost_types = all_cost_types - set(['occupants'])
    asset_refs = set()
    ignore_missing_costs = set(oqparam.ignore_missing_costs)

    for idx, asset_node in enumerate(assets_node):
        values = {}
        deductibles = {}
        insurance_limits = {}
        retrofitteds = {}
        with context(fname, asset_node):
            asset_id = asset_node['id'].encode('utf8')
            if asset_id in asset_refs:
                raise read_nrml.DuplicatedID(asset_id)
            asset_refs.add(asset_id)
            exposure.asset_refs.append(asset_id)
            taxonomy = asset_node['taxonomy']
            if 'damage' in oqparam.calculation_mode:
                # calculators of 'damage' kind require the 'number'
                # if it is missing a KeyError is raised
                number = asset_node.attrib['number']
            else:
                # some calculators ignore the 'number' attribute;
                # if it is missing it is considered 1, since we are going
                # to multiply by it
                try:
                    number = asset_node['number']
                except KeyError:
                    number = 1
                else:
                    if 'occupants' in all_cost_types:
                        values['occupants_None'] = number
            location = asset_node.location['lon'], asset_node.location['lat']
            if region and not geometry.Point(*location).within(region):
                out_of_region += 1
                continue
            tagnode = getattr(asset_node, 'tags', None)
            assets_by_tag = exposure.assets_by_tag
            if tagnode is not None:
                # fill missing tagvalues with "?" and raise an error for
                # unknown tagnames
                with context(fname, tagnode):
                    dic = tagnode.attrib.copy()
                    for tagname in assets_by_tag.tagnames:
                        try:
                            tagvalue = dic.pop(tagname)
                        except KeyError:
                            tagvalue = '?'
                        else:
                            if tagvalue in '?*':
                                raise ValueError(
                                    'Invalid tagvalue="%s"' % tagvalue)
                        tag = '%s=%s' % (tagname, tagvalue)
                        assets_by_tag[tag].append(idx)
                    if dic:
                        raise ValueError(
                            'Unknown tagname %s or <tagNames> not '
                            'specified in the exposure' % ', '.join(dic))
            exposure.assets_by_tag['taxonomy=' + taxonomy].append(idx)
        try:
            costs = asset_node.costs
        except AttributeError:
            costs = Node('costs', [])
        try:
            occupancies = asset_node.occupancies
        except AttributeError:
            occupancies = Node('occupancies', [])
        for cost in costs:
            with context(fname, cost):
                cost_type = cost['type']
                if cost_type in relevant_cost_types:
                    values[cost_type] = cost['value']
                    retrovalue = cost.attrib.get('retrofitted')
                    if retrovalue is not None:
                        retrofitteds[cost_type] = retrovalue
                    if oqparam.insured_losses:
                        deductibles[cost_type] = cost['deductible']
                        insurance_limits[cost_type] = cost['insuranceLimit']

        # check we are not missing a cost type
        missing = relevant_cost_types - set(values)
        if missing and missing <= ignore_missing_costs:
            logging.warn(
                'Ignoring asset %s, missing cost type(s): %s',
                asset_id, ', '.join(missing))
            for cost_type in missing:
                values[cost_type] = None
        elif missing and 'damage' not in oqparam.calculation_mode:
            # missing the costs is okay for damage calculators
            with context(fname, asset_node):
                raise ValueError("Invalid Exposure. "
                                 "Missing cost %s for asset %s" % (
                                     missing, asset_id))
        tot_occupants = 0
        for occupancy in occupancies:
            with context(fname, occupancy):
                exposure.time_events.add(occupancy['period'])
                occupants = 'occupants_%s' % occupancy['period']
                values[occupants] = occupancy['occupants']
                tot_occupants += values[occupants]
        if occupancies:  # store average occupants
            values['occupants_None'] = tot_occupants / len(occupancies)
        area = float(asset_node.attrib.get('area', 1))
        ass = asset.Asset(idx, taxonomy, number, location, values, area,
                          deductibles, insurance_limits, retrofitteds,
                          exposure.cost_calculator)
        exposure.assets.append(ass)
    if region:
        logging.info('Read %d assets within the region_constraint '
                     'and discarded %d assets outside the region',
                     len(exposure.assets), out_of_region)
        if len(exposure.assets) == 0:
            raise RuntimeError('Could not find any asset within the region!')

    # sanity checks
    values = any(len(ass.values) + ass.number for ass in exposure.assets)
    assert values, 'Could not find any value??'
    return exposure
Example #42
0
 def from_wkt(cls, value):
     return cls(wkt.loads(value))
def search_by_shapefile(shapefile, filters, startDate, endDate):
    with collection(shapefile, 'r') as in_shp:
        s = shape(in_shp[0]['geometry'])
        wkt_string = s.to_wkt()
        results = gbdx.catalog.search(searchAreaWkt=wkt_string,
                                      startDate=startDate,
                                      endDate=endDate,
                                      types=["DigitalGlobeAcquisition"],
                                      filters=filters)
        #return results
        #Make a shapefile of the results
        schema3 = {
            'geometry': 'Polygon',
            'properties': {
                'available': 'str',
                'catalogID': 'str',
                'browseURL': 'str',
                'cloudCover': 'float',
                'imageBands': 'str',
                'multiResolution': 'float',
                'offNadirAngle': 'float',
                'ordered': 'str',
                'panResolution': 'float',
                'sensorPlatformName': 'str',
                'sunAzimuth': 'float',
                'sunElevation': 'float',
                'targetAzimuth': 'float',
                'timestamp': 'str',
                'timestampWkt': 'str',
                'vendorName': 'str'
            }
        }
        with collection("rawSearchResults.shp", "w", "ESRI Shapefile",
                        schema3) as output:
            for poly in results:
                output.write({
                    'geometry':
                    mapping(loads(poly['properties']['footprintWkt'])),
                    'properties': {
                        'available':
                        poly['properties']['available'],
                        'catalogID':
                        poly['properties']['catalogID'],
                        'browseURL':
                        poly['properties']['browseURL'],
                        'cloudCover':
                        float(poly['properties']['cloudCover']),
                        'imageBands':
                        poly['properties']['imageBands'],
                        'multiResolution':
                        float(poly['properties']['multiResolution']),
                        'offNadirAngle':
                        float(poly['properties']['offNadirAngle']),
                        'ordered':
                        poly['properties']['ordered'],
                        'panResolution':
                        float(poly['properties']['panResolution']),
                        'sensorPlatformName':
                        poly['properties']['sensorPlatformName'],
                        'sunAzimuth':
                        float(poly['properties']['sunAzimuth']),
                        'sunElevation':
                        float(poly['properties']['sunElevation']),
                        'targetAzimuth':
                        float(poly['properties']['targetAzimuth']),
                        'timestamp':
                        poly['properties']['timestamp'],
                        'timestampWkt':
                        poly['properties']['timestampWkt'],
                        'vendorName':
                        poly['properties']['vendorName']
                    }
                })
            print results
            return results
Example #44
0
def test_error_handler_for_bytes():
    with pytest.raises(TypeError):
        loads(b'POINT (10 10)')
Example #45
0
def job_geom(request):
    id = request.matchdict['job']
    session = DBSession()
    job = session.query(Job).get(id)
    return FeatureCollection([Feature(id=id, geometry=loads(job.geometry))])
Example #46
0
                if isinstance(value, str):
                    value = value.strip()

                point = {
                    'layer_id':
                    layer_id,
                    'source_object_id':
                    source_row[object_id_field]
                    if method != 'seg_id' else None,
                    'seg_id':
                    source_row[seg_id_field] if seg_id_field else None,
                    'value':
                    value or '',
                    'geom':
                    source_row[source_geom_field]
                    if method != 'seg_id' else loads('POINT(0 0)').wkt,
                }
                points.append(point)

if WRITE_OUT:
    print('Writing service area polygons...')
    poly_table.write(polys)

    print('Writing service area single-value lines...')
    line_single_table.write(line_singles)

    print('Writing service area line dual-value lines...')
    line_dual_table.write(line_duals)

    print('Writing service area points...')
    point_table.write(points)
Example #47
0
from shapely.wkt import loads
from geojson import dumps
import pprint

msas = {}
for line in open(
        '/Users/jlenaghan/Data/WalmartBlackFriday/OutputV1/PolysToMSA.csv',
        'r'):
    uid, msa = line.strip().split(',')
    msas[uid] = msa

for line in open(
        '/Users/jlenaghan/Data/WalmartBlackFriday/AnalysisDataSets/prod_walmarts_wkts.psv',
        'r'):
    sid, name, wkt, msa = line.strip().split('|')
    msa = msas[sid]
    if msa == 'losangeles' or msa == 'riverside':
        wkt = wkt.replace('"', '')
        x = loads(wkt)
        gdata = dumps(x)
        print gdata
Example #48
0
def getS1Cat2Gdf(apihub, opener, query):
    """
    Get the data from the scihub catalogue
    and write it to a GeoPandas GeoDataFrame
    """

    # create empty GDF
    colNames = ['identifier', 'polarisationmode', 'orbitdirection',
                'acquisitiondate', 'relativeorbitnumber', 'orbitnumber',
                'producttype', 'slicenumber', 'size', 'beginposition',
                'endposition', 'lastrelativeorbitnumber', 'lastorbitnumber',
                'uuid', 'platformidentifier', 'missiondatatakeid',
                'swathidentifier', 'ingestiondate', 'sensoroperationalmode',
                'footprint']
    crs = {'init': 'epsg:4326'}
    gdfFull = gpd.GeoDataFrame(columns=colNames, crs=crs,
                               geometry='footprint')

    # we need this for the paging
    index = 0
    rows = 99
    next_page = 1

    while next_page:

        # construct the final url
        url = apihub + query + "&rows={}&start={}".format(rows, index)
        
        try:
            # get the request
            req = opener.open(url)
        except URLError as e:
            if hasattr(e, 'reason'):
                print(' We failed to connect to the server.')
                print(' Reason: ', e.reason)
                sys.exit()
            elif hasattr(e, 'code'):
                print(' The server couldn\'t fulfill the request.')
                print(' Error code: ', e.code)
                sys.exit()
        else:
            # write the request to to the response variable 
            # (i.e. the xml coming back from scihub)
            response = req.read().decode('utf-8')

            # parse the xml page from the response
            dom = xml.dom.minidom.parseString(response)

        acqList = []
        # loop thorugh each entry (with all metadata)
        for node in dom.getElementsByTagName('entry'):

            # we get all the date entries
            dict_date = {s.getAttribute('name'):dateutil.parser.parse(s.firstChild.data).astimezone(dateutil.tz.tzutc()) for s in node.getElementsByTagName('date')}

            # we get all the int entries
            dict_int = {s.getAttribute('name'):s.firstChild.data for s in node.getElementsByTagName('int')}

            # we create a filter for the str entries (we do not want all) and get them
            dict_str = {s.getAttribute('name'):s.firstChild.data for s in node.getElementsByTagName('str')}
            
            # merge the dicts and append to the catalogue list
            acq = dict(dict_date,**dict_int,**dict_str)

            # fill in emtpy fields in dict by using identifier
            if not 'swathidentifier' in acq.keys():
                acq['swathidentifier'] = acq['identifier'].split("_")[1]
            if not 'producttype' in acq.keys():
                acq['producttype'] = acq['identifier'].split("_")[2]
            if not 'slicenumber' in acq.keys():
                acq['slicenumber'] = 0

            # append all scenes from this page to a list
            acqList.append([acq['identifier'], acq['polarisationmode'], 
                    acq['orbitdirection'], acq['beginposition'].strftime('%Y%m%d'),
                    acq['relativeorbitnumber'], acq['orbitnumber'], 
                    acq['producttype'],acq['slicenumber'], acq['size'],
                    acq['beginposition'].isoformat(), acq['endposition'].isoformat(), 
                    acq['lastrelativeorbitnumber'], acq['lastorbitnumber'],
                    acq['uuid'], acq['platformidentifier'], 
                    acq['missiondatatakeid'], acq['swathidentifier'], 
                    acq['ingestiondate'].isoformat(), acq['sensoroperationalmode'],
                    loads(acq['footprint'])])
               
        # transofmr all results from that page to a gdf
        gdf = gpd.GeoDataFrame(acqList, columns=colNames, crs=crs, geometry='footprint')
        
        # append the gdf to the full gdf
        gdfFull = gdfFull.append(gdf)
        
        # retrieve next page and set index up by 99 entries
        next_page = scihub.nextPage(dom)
        index += rows
    
    return gdfFull
print(file_gpd)

file_gpd.to_file(outpath,driver='GeoJSON')

file_gpd.to_file(outpath2)


# gpol2 = gpd.read_file('/home/ubuntu/sanit3Dsdi/tests/ground_pol_31984.geojson')


###################### part2

small_bbox_wkt = 'POLYGON((-40.3397827083 -20.3176282943, -40.3353356058 -20.3176282943, -40.3353356058 -20.3214263926, -40.3397827083 -20.3214263926, -40.3397827083 -20.3176282943))'

bbox_polygon = wkt.loads(small_bbox_wkt)

crs_proj = 'EPSG:31984'

data2 = {'name':['bbox'],'geometry':[bbox_polygon]}

bbox_gdf = gpd.GeoDataFrame(data2,crs="EPSG:4326")

bbox_proj = bbox_gdf.to_crs(crs_proj)

bbox_proj['id'] = ['0']


ground_pol = bbox_proj.symmetric_difference(file_gpd.unary_union)

# ground_pol['id'] = ['0']
Example #50
0
 "textual_float": pd.Series([1.1, 2.0], dtype=np.float64),
 "textual_float_nan": pd.Series([1.1, 2.0, np.nan], dtype=np.float64),
 "mixed": pd.Series([True, False, None], dtype=hasnan_bool_name),
 "uuid_series_str": pd.Series(
     [
         uuid.UUID("0b8a22ca-80ad-4df5-85ac-fa49c44b7ede"),
         uuid.UUID("aaa381d6-8442-4f63-88c8-7c900e9a23c6"),
         uuid.UUID("00000000-0000-0000-0000-000000000000"),
     ],
 ),
 "ip_str": pd.Series(
     [IPv4Address("127.0.0.1"), IPv4Address("127.0.0.1")],
 ),
 "geometry_string_series": pd.Series(
     [
         wkt.loads("POINT (-92 42)"),
         wkt.loads("POINT (-92 42.1)"),
         wkt.loads("POINT (-92 42.2)"),
     ],
 ),
 "email_address_str": pd.Series(
     [FQDA("test", "example.com"), FQDA("info", "example.eu")],
 ),
 "str_url": pd.Series(
     [
         urlparse("http://www.cwi.nl:80/%7Eguido/Python.html"),
         urlparse("https://github.com/dylan-profiling/hurricane"),
     ],
 ),
 "path_series_windows_str": pd.Series(
     [
Example #51
0
    def _read_risk_data(self):
        # read the exposure (if any), the risk model (if any) and then the
        # site collection, possibly extracted from the exposure.
        oq = self.oqparam
        self.load_crmodel()  # must be called first

        if oq.hazard_calculation_id:
            with util.read(oq.hazard_calculation_id) as dstore:
                haz_sitecol = dstore['sitecol'].complete
        else:
            haz_sitecol = readinput.get_site_collection(oq)
            if hasattr(self, 'rup'):
                # for scenario we reduce the site collection to the sites
                # within the maximum distance from the rupture
                haz_sitecol, _dctx = self.cmaker.filter(
                    haz_sitecol, self.rup)
                haz_sitecol.make_complete()

            if 'site_model' in oq.inputs:
                self.datastore['site_model'] = readinput.get_site_model(oq)

        oq_hazard = (self.datastore.parent['oqparam']
                     if self.datastore.parent else None)
        if 'exposure' in oq.inputs:
            exposure = self.read_exposure(haz_sitecol)
            self.datastore['assetcol'] = self.assetcol
            self.datastore['cost_calculator'] = exposure.cost_calculator
            if hasattr(readinput.exposure, 'exposures'):
                self.datastore['assetcol/exposures'] = (
                    numpy.array(exposure.exposures, hdf5.vstr))
        elif 'assetcol' in self.datastore.parent:
            assetcol = self.datastore.parent['assetcol']
            if oq.region:
                region = wkt.loads(oq.region)
                self.sitecol = haz_sitecol.within(region)
            if oq.shakemap_id or 'shakemap' in oq.inputs:
                self.sitecol, self.assetcol = self.read_shakemap(
                    haz_sitecol, assetcol)
                self.datastore['assetcol'] = self.assetcol
                logging.info('Extracted %d/%d assets',
                             len(self.assetcol), len(assetcol))
                nsites = len(self.sitecol)
                if (oq.spatial_correlation != 'no' and
                        nsites > MAXSITES):  # hard-coded, heuristic
                    raise ValueError(CORRELATION_MATRIX_TOO_LARGE % nsites)
            elif hasattr(self, 'sitecol') and general.not_equal(
                    self.sitecol.sids, haz_sitecol.sids):
                self.assetcol = assetcol.reduce(self.sitecol)
                self.datastore['assetcol'] = self.assetcol
                logging.info('Extracted %d/%d assets',
                             len(self.assetcol), len(assetcol))
            else:
                self.assetcol = assetcol
        else:  # no exposure
            self.sitecol = haz_sitecol
            if self.sitecol:
                logging.info('Read N=%d hazard sites and L=%d hazard levels',
                             len(self.sitecol), len(oq.imtls.array))

        if oq_hazard:
            parent = self.datastore.parent
            if 'assetcol' in parent:
                check_time_event(oq, parent['assetcol'].occupancy_periods)
            elif oq.job_type == 'risk' and 'exposure' not in oq.inputs:
                raise ValueError('Missing exposure both in hazard and risk!')
            if oq_hazard.time_event and oq_hazard.time_event != oq.time_event:
                raise ValueError(
                    'The risk configuration file has time_event=%s but the '
                    'hazard was computed with time_event=%s' % (
                        oq.time_event, oq_hazard.time_event))

        if oq.job_type == 'risk':
            tmap_arr, tmap_lst = logictree.taxonomy_mapping(
                self.oqparam.inputs.get('taxonomy_mapping'),
                self.assetcol.tagcol.taxonomy)
            self.crmodel.tmap = tmap_lst
            if len(tmap_arr):
                self.datastore['taxonomy_mapping'] = tmap_arr
            taxonomies = set(taxo for items in self.crmodel.tmap
                             for taxo, weight in items if taxo != '?')
            # check that we are covering all the taxonomies in the exposure
            missing = taxonomies - set(self.crmodel.taxonomies)
            if self.crmodel and missing:
                raise RuntimeError('The exposure contains the taxonomies %s '
                                   'which are not in the risk model' % missing)
            if len(self.crmodel.taxonomies) > len(taxonomies):
                logging.info('Reducing risk model from %d to %d taxonomies',
                             len(self.crmodel.taxonomies), len(taxonomies))
                self.crmodel = self.crmodel.reduce(taxonomies)
                self.crmodel.tmap = tmap_lst
            self.crmodel.vectorize_cons_model(self.assetcol.tagcol)

        if hasattr(self, 'sitecol') and self.sitecol:
            if 'site_model' in oq.inputs:
                assoc_dist = (oq.region_grid_spacing * 1.414
                              if oq.region_grid_spacing else 5)  # Graeme's 5km
                sm = readinput.get_site_model(oq)
                self.sitecol.complete.assoc(sm, assoc_dist)
            self.datastore['sitecol'] = self.sitecol.complete
        # used in the risk calculators
        self.param = dict(individual_curves=oq.individual_curves,
                          avg_losses=oq.avg_losses)

        # compute exposure stats
        if hasattr(self, 'assetcol'):
            arr = self.assetcol.array
            num_assets = list(general.countby(arr, 'site_id').values())
            self.datastore['assets_by_site'] = get_stats(num_assets)
            num_taxos = self.assetcol.num_taxonomies_by_site()
            self.datastore['taxonomies_by_site'] = get_stats(num_taxos)
            save_exposed_values(
                self.datastore, self.assetcol, oq.loss_names, oq.aggregate_by)
Example #52
0
def create_mask(poly, params, poly_type="polygon"):
   '''
   takes a Well Known Text polygon or line 
   and produces a masking array for use with numpy
   @param poly - WKT polygon or line
   @param variable - WCS variable to mask off
   @param type - one from [polygon, line]
   '''
   current_app.logger.debug('##########')
   current_app.logger.debug(poly)
   loaded_poly = wkt.loads(poly)
   wcs_envelope = loaded_poly.envelope
   bounds =  wcs_envelope.bounds
   bb = ','.join(map(str,bounds))
   current_app.logger.debug('testing polygon for strangeness')
   current_app.logger.debug('bound = %s' % bb)
   params['bbox']._value = bb
   params['url'] = createURL(params)
   variable = params['coverage'].value
   #wcs_url = wcs_base_url % (bounds[0],bounds[1],bounds[2],bounds[3])
   wcs_url = params['url'].value
   current_app.logger.debug(wcs_url)
   #testfile=urllib.URLopener()
   #testfile.retrieve(wcs_url,"%s.nc" % variable)
   try:
      resp = contactWCSServer(wcs_url)
   except urllib2.HTTPError:
      params["vertical"]._value = params["vertical"].value[1:]
      params['url'] = createURL(params)
      wcs_url = params['url'].value
      resp = contactWCSServer(wcs_url)
   tfile = saveOutTempFile(resp)
   to_be_masked = netCDF.Dataset(tfile, 'r+')

   chl = to_be_masked.variables[variable][:]

   latvals = to_be_masked.variables[str(getCoordinateVariable(to_be_masked, 'Lat').dimensions[0])][:]
   lonvals = to_be_masked.variables[str(getCoordinateVariable(to_be_masked, 'Lon').dimensions[0])][:]

   from shapely.geometry import Polygon
   minlat = min(latvals)
   maxlat = max(latvals)
   minlon = min(lonvals)
   maxlon = max(lonvals)

   lonlat_poly = Polygon([[minlon,maxlat],[maxlon,maxlat],[maxlon,minlat],[minlon,minlat],[minlon,maxlat]])
   #print '#'*50
   #print lonlat_poly
   overlap_poly = loaded_poly.intersection(lonlat_poly)
   poly = poly[trim_sizes[poly_type]]

   poly = poly.split(',')
   poly = [x.split() for x in poly]



   #found_lats = [find_closest(latvals, float(x[1])) for x in poly]
   #found_lons = [find_closest(lonvals, float(x[0])) for x in poly]
   if overlap_poly.type == "MultiPolygon":
      found = []
      for poly in overlap_poly:
         found_lats = [find_closest(latvals, float(x)) for x in poly.exterior.xy[1]]
         found_lons = [find_closest(lonvals, float(x)) for x in poly.exterior.xy[0]]
         found.append(zip(found_lons,found_lats))


   elif overlap_poly.type == "MultiLineString":
      found = []
      for poly in overlap_poly:
         found_lats = [find_closest(latvals, float(x)) for x in poly.xy[1]]
         found_lons = [find_closest(lonvals, float(x)) for x in poly.xy[0]]
         found.append(zip(found_lons,found_lats))

   else:
      if poly_type is 'line':
         found_lats = [find_closest(latvals, float(x)) for x in overlap_poly.xy[1]]
         found_lons = [find_closest(lonvals, float(x)) for x in overlap_poly.xy[0]]
      else:
         found_lats = [find_closest(latvals, float(x)) for x in overlap_poly.exterior.xy[1]]
         found_lons = [find_closest(lonvals, float(x)) for x in overlap_poly.exterior.xy[0]]

      #found = zip(overlap_poly.exterior.xy[0],overlap_poly.exterior.xy[1])
      found = zip(found_lons,found_lats)
   current_app.logger.debug('#'*40)
   current_app.logger.debug(found)

   # img = Image.new('L', (chl.shape[2],chl.shape[1]), 0)
   img = Image.new('L', (chl.shape[to_be_masked.variables[variable].dimensions.index(str(getCoordinateVariable(to_be_masked, 'Lon').dimensions[0]))],chl.shape[to_be_masked.variables[variable].dimensions.index(str(getCoordinateVariable(to_be_masked, 'Lat').dimensions[0]))]), 0)

   if overlap_poly.type == "MultiPolygon":
      for f in found:
         ImageDraw.Draw(img).polygon(f,  outline=2, fill=2)
   elif overlap_poly.type == "MultiLineString":
      for f in found:
         ImageDraw.Draw(img).polygon(f,  outline=2, fill=2)
   else:
      if poly_type == 'polygon':
         ImageDraw.Draw(img).polygon(found,  outline=2, fill=2)
      if poly_type == 'line':
         ImageDraw.Draw(img).line(found,   fill=2)

   masker = np.array(img)

   #fig = plt.figure()
   masked_variable = []
   for i in range(chl.shape[0]):
      #print i
      masked_variable.append(np.ma.masked_array(chl[i,:], mask=[x != 2 for x in masker]))
      masked_variable[i].filled(-999)
   #    a = fig.add_subplot(1,5,i+1)
   #    imgplot = plt.imshow(masked_variable)

   # plt.show()
   return masked_variable, to_be_masked, masker, tfile, variable
Example #53
0
    def add_airspace(self, country_code, airspace_class, name, base, top, geom_str):
        try:
            geom = loads(geom_str)
        except ReadingError:
            print name + "(" + airspace_class + ") is not a polygon (maybe not enough points?)"
            return False

        # orient polygon clockwise
        geom = polygon.orient(geom, sign=-1)

        if not airspace_class:
            print name + " has no airspace class"
            return False

        base = self.normalise_height(base, name)
        top = self.normalise_height(top, name)

        flightlevel_re = re.compile(r'^FL (\d+)$')
        match = flightlevel_re.match(base)
        if match and int(match.group(1)) >= 200:
            print name + " has it's base above FL 200 and is therefore disregarded"
            return False

        airspace = Airspace()
        airspace.country_code = country_code
        airspace.airspace_class = airspace_class
        airspace.name = name
        airspace.base = base
        airspace.top = top

        # Check geometry type, disregard everything except POLYGON
        if geom.geom_type != 'Polygon':
            print name + " is not a polygon (it's a " + geom.geom_type + ")"
            return False

        wkb = from_shape(geom, srid=4326)

        # Try to fix invalid (self-intersecting) geometries
        valid_dump = (func.ST_Dump(func.ST_MakeValid(wkb))).geom
        valid_query = db.session.query(func.ST_SetSRID(valid_dump, 4326)).order_by(func.ST_Area(valid_dump).desc()).first()

        if not valid_query:
            print 'Error importing ' + name
            print 'Could not validate geometry'
            return False
        else:
            wkb = valid_query[0]

        geom_type = db.session.query(func.ST_GeometryType(wkb)).first()[0]

        if geom_type != 'ST_Polygon':
            print name + " got some errors makeing it valid..."
            return False

        tolerance = 0.0000001
        simplify = lambda x: func.ST_SimplifyPreserveTopology(x, tolerance)

        airspace.the_geom = case(
            [
                (func.ST_IsValid(wkb), wkb),
                (func.ST_IsValid(simplify(wkb)), simplify(wkb)),
            ],
            else_=None)

        db.session.add(airspace)

        return True
Example #54
0
def get_coordinates(geometry):
    if geometry:
        geo_as_string = wkt.loads(geometry)
        lat = geo_as_string.centroid.y
        lon = geo_as_string.centroid.x
        return lat, lon
Example #55
0
def test_read_mapchete_input(mapchete_input):
    """Read Mapchete files as input files."""
    config = MapcheteConfig(mapchete_input.path)
    area = config.area_at_zoom(5)
    testpolygon = "POLYGON ((4 1, 3 1, 2 1, 2 4, 3 4, 3 2, 4 2, 4 1))"
    assert area.equals(loads(testpolygon))
Example #56
0
    log_file=True,
    use_cache=True,
    data_folder=".temp/data",
    logs_folder=".temp/logs",
    imgs_folder=".temp/imgs",
    cache_folder=".temp/cache",
)

# define queries to use throughout tests
location_point = (37.791427, -122.410018)
address = "600 Montgomery St, San Francisco, California, USA"
place1 = {"city": "Piedmont", "state": "California", "country": "USA"}
place2 = "Bunker Hill, Los Angeles, California"
p = ("POLYGON ((-122.262 37.869, -122.255 37.869, -122.255 37.874,"
     "-122.262 37.874, -122.262 37.869))")
polygon = wkt.loads(p)


def test_logging():
    # test OSMnx's logger
    ox.log("test a fake default message")
    ox.log("test a fake debug", level=lg.DEBUG)
    ox.log("test a fake info", level=lg.INFO)
    ox.log("test a fake warning", level=lg.WARNING)
    ox.log("test a fake error", level=lg.ERROR)

    ox.citation()
    ox.ts(style="date")
    ox.ts(style="time")

Example #57
0
def get_series():
    return [
        # Int Series
        pd.Series([1, 2, 3], name="int_series"),
        pd.Series(range(10), name="int_range"),
        pd.Series([1, 2, 3], name="Int64_int_series", dtype="Int64"),
        pd.Series([1, 2, 3, np.nan],
                  name="Int64_int_nan_series",
                  dtype="Int64"),
        pd.Series([1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0],
                  name="int_series_boolean"),
        # Count
        pd.Series(np.array([1, 2, 3, 4], dtype=np.uint32), name="np_uint32"),
        # Categorical
        pd.Series([1, 2, 3], name="categorical_int_series", dtype="category"),
        pd.Series(
            pd.Categorical(
                ["A", "B", "C", "C", "B", "A"],
                categories=["A", "B", "C"],
                ordered=False,
            ),
            name="categorical_char",
        ),
        pd.Series([1.0, 2.0, 3.1],
                  dtype="category",
                  name="categorical_float_series"),
        pd.Series(["Georgia", "Sam"],
                  dtype="category",
                  name="categorical_string_series"),
        pd.Series(
            [np.complex(0, 0),
             np.complex(1, 2),
             np.complex(3, -1)],
            name="categorical_complex_series",
            dtype="category",
        ),
        # Ordinal
        pd.Series(
            pd.Categorical(["A", "B", "C", "C", "B", "A"],
                           categories=["A", "B", "C"],
                           ordered=True),
            name="ordinal",
        ),
        # Float Series
        pd.Series([1.0, 2.1, 3.0], name="float_series"),
        pd.Series([1.0, 2.5, np.nan], name="float_nan_series"),
        pd.Series([1.0, 2.0, 3.0, 4.0], name="float_series2"),
        pd.Series(np.array([1.2, 2, 3, 4], dtype=np.float),
                  name="float_series3"),
        pd.Series([1, 2, 3.05, 4], dtype=float, name="float_series4"),
        pd.Series([np.nan, 1.2], name="float_series5"),
        pd.Series([np.nan, 1.1], dtype=np.single, name="float_series6"),
        pd.Series([np.inf, np.NINF, np.PINF, 1000000.0, 5.5],
                  name="float_with_inf"),
        pd.Series([np.inf, np.NINF, np.Infinity, np.PINF], name="inf_series"),
        pd.Series([1, 2, np.nan], name="int_nan_series"),
        # Nan Series
        pd.Series([np.nan], name="nan_series"),
        pd.Series([np.nan, np.nan, np.nan, np.nan], name="nan_series_2"),
        # String Series
        pd.Series(["Patty", "Valentine"], name="string_series"),
        pd.Series(["1941-05-24", "13/10/2016"],
                  name="timestamp_string_series"),
        pd.Series(["mack", "the", "finger"], name="string_unicode_series"),
        pd.Series(
            np.array(["upper", "hall"], dtype=np.unicode_),
            name="string_np_unicode_series",
        ),
        pd.Series(["1.0", "2.0", np.nan], name="string_num_nan"),
        pd.Series(["1.0", "2.0", "3.0"], name="string_num"),
        pd.Series(["1.0", "45.67", np.nan], name="string_flt_nan"),
        pd.Series(["1.0", "45.67", "3.5"], name="string_flt"),
        pd.Series(
            ["POINT (-92 42)", "POINT (-92 42.1)", "POINT (-92 42.2)"],
            name="geometry_string_series",
        ),
        pd.Series(
            [
                "I was only robbing the register,",
                "I hope you understand",
                "One of us had better call up the cops",
                "In the hot New Jersey night",
                np.nan,
            ],
            name="string_str_nan",
        ),
        pd.Series(["True", "False", None], name="string_bool_nan"),
        pd.Series(range(20), name="int_str_range").astype("str"),
        pd.Series(["1937-05-06", "20/4/2014"], name="string_date"),
        pd.Series(
            [
                "http://www.cwi.nl:80/%7Eguido/Python.html",
                "https://github.com/pandas-profiling/pandas-profiling",
            ],
            name="str_url",
        ),
        pd.Series(
            [r"C:\\home\\user\\file.txt", r"C:\\home\\user\\test2.txt"],
            name="path_series_windows_str",
        ),
        pd.Series(
            [r"/home/user/file.txt", r"/home/user/test2.txt"],
            name="path_series_linux_str",
        ),
        # Bool Series
        pd.Series([True, False], name="bool_series"),
        pd.Series([True, False, None], name="bool_nan_series"),
        pd.Series([True, False, None],
                  name="nullable_bool_series",
                  dtype="Bool"),
        pd.Series([True, False, False, True], dtype=bool, name="bool_series2"),
        pd.Series(np.array([1, 0, 0, 1], dtype=np.bool), name="bool_series3"),
        # Complex Series
        pd.Series(
            [np.complex(0, 0),
             np.complex(1, 2),
             np.complex(3, -1)],
            name="complex_series",
        ),
        pd.Series(
            [
                np.complex(0, 0),
                np.complex(1, 2),
                np.complex(3, -1),
                np.complex(np.nan, np.nan),
            ],
            name="complex_series_nan",
        ),
        pd.Series(["(1+1j)", "(2+2j)", "(10+100j)"], name="str_complex"),
        pd.Series(
            [np.complex(0, 0),
             np.complex(1, 2),
             np.complex(3, -1), np.nan],
            name="complex_series_nan_2",
        ),
        pd.Series(
            [complex(0, 0),
             complex(1, 2),
             complex(3, -1), np.nan],
            name="complex_series_py_nan",
        ),
        pd.Series([complex(0, 0), complex(1, 2),
                   complex(3, -1)],
                  name="complex_series_py"),
        pd.Series(
            [
                np.complex(0, 0),
                np.complex(1, 0),
                np.complex(3, 0),
                np.complex(-1, 0)
            ],
            name="complex_series_float",
        ),
        # Datetime Series
        pd.Series(
            [pd.datetime(2017, 3, 5, 12, 2),
             pd.datetime(2019, 12, 4)],
            name="timestamp_series",
        ),
        pd.Series(
            [
                pd.datetime(2017, 3, 5),
                pd.datetime(2019, 12, 4, 3, 2, 0), pd.NaT
            ],
            name="timestamp_series_nat",
        ),
        pd.Series(
            [pd.datetime(2017, 3, 5),
             pd.datetime(2019, 12, 4), pd.NaT],
            name="date_series_nat",
        ),
        pd.Series(
            pd.date_range(
                start="2013-05-18 12:00:00",
                periods=2,
                freq="H",
                tz="Europe/Brussels",
                name="timestamp_aware_series",
            )),
        pd.to_datetime(
            pd.Series(
                [
                    datetime.date(2011, 1, 1),
                    datetime.date(2012, 1, 2),
                    datetime.date(2013, 1, 1),
                ],
                name="datetime",
            )),
        # Timedelta Series
        pd.Series([pd.Timedelta(days=i) for i in range(3)],
                  name="timedelta_series"),
        pd.Series(
            [pd.Timedelta(days=i) for i in range(3)] + [pd.NaT],
            name="timedelta_series_nat",
        ),
        # Geometry Series
        pd.Series(
            [
                wkt.loads("POINT (-92 42)"),
                wkt.loads("POINT (-92 42.1)"),
                wkt.loads("POINT (-92 42.2)"),
            ],
            name="geometry_series",
        ),
        # Path Series
        pd.Series(
            [
                PurePosixPath("/home/user/file.txt"),
                PurePosixPath("/home/user/test2.txt"),
            ],
            name="path_series_linux",
        ),
        pd.Series(
            [
                PureWindowsPath("C:\\home\\user\\file.txt"),
                PureWindowsPath("C:\\home\\user\\test2.txt"),
            ],
            name="path_series_windows",
        ),
        # Url Series
        pd.Series(
            [
                urlparse("http://www.cwi.nl:80/%7Eguido/Python.html"),
                urlparse("https://github.com/dylan-profiling/hurricane"),
            ],
            name="url_series",
        ),
        # UUID Series
        pd.Series(
            [
                uuid.UUID("0b8a22ca-80ad-4df5-85ac-fa49c44b7ede"),
                uuid.UUID("aaa381d6-8442-4f63-88c8-7c900e9a23c6"),
                uuid.UUID("00000000-0000-0000-0000-000000000000"),
            ],
            name="uuid_series",
        ),
        pd.Series(
            [
                "0b8a22ca-80ad-4df5-85ac-fa49c44b7ede",
                "aaa381d6-8442-4f63-88c8-7c900e9a23c6",
                "00000000-0000-0000-0000-000000000000",
            ],
            name="uuid_series_str",
        ),
        # Object Series
        pd.Series([[1, ""], [2, "Rubin"], [3, "Carter"]],
                  name="mixed_list[str,int]"),
        pd.Series(
            [{
                "why": "did you"
            }, {
                "bring him": "in for he"
            }, {
                "aint": "the guy"
            }],
            name="mixed_dict",
        ),
        pd.Series(
            [pd.to_datetime, pd.to_timedelta, pd.read_json, pd.to_pickle],
            name="callable",
        ),
        pd.Series([pd, wkt, np], name="module"),
        pd.Series(["1.1", "2"], name="textual_float"),
        pd.Series(["1.1", "2", "NAN"], name="textual_float_nan"),
        # Empty
        pd.Series([], name="empty"),
        pd.Series([], name="empty_float", dtype=float),
        pd.Series([], name="empty_int64", dtype="Int64"),
        pd.Series([], name="empty_object", dtype="object"),
        pd.Series([], name="empty_bool", dtype=bool),
        # IP
        pd.Series([IPv4Address("127.0.0.1"),
                   IPv4Address("127.0.0.1")],
                  name="ip"),
        pd.Series(["127.0.0.1", "127.0.0.1"], name="ip_str"),
    ]
Example #58
0
            version = task.version
            filter = and_(TileHistory.x == task.x, TileHistory.y == task.y,
                          TileHistory.job_id == job.id)
            task = session.query(TileHistory)\
                       .filter(filter)\
                       .order_by(TileHistory.version.desc())\
                       .first()
            if task is not None and version == task.version:
                prev_task = session.query(Tile).get(
                    (task.x, task.y, task.job_id, task.zoom))

    admin = user.is_admin() if user else False
    stats = get_stats(job)
    return dict(job=job,
                user=user,
                bbox=loads(job.geometry).bounds,
                tile=current_task,
                prev_task=prev_task,
                admin=admin,
                stats=stats)


@view_config(route_name='job_geom', renderer='geojson', permission='edit')
def job_geom(request):
    id = request.matchdict['job']
    session = DBSession()
    job = session.query(Job).get(id)
    return FeatureCollection([Feature(id=id, geometry=loads(job.geometry))])


@view_config(route_name='job_tiles', renderer='geojson', permission='edit')
Example #59
0
def filter_query_all_filters(model, q, filters, user):
    """
    Return a query filtered with the cruved and all
    the filters available in the synthese form
    parameters:
        - q (SQLAchemyQuery): an SQLAchemy query
        - filters (dict): a dict of filter
        - user (User): a user object from User
        - allowed datasets (List<int>): an array of ID dataset where the users have autorization

    """
    q = filter_query_with_cruved(model, q, user)

    if "observers" in filters:
        q = q.filter(
            model.observers.ilike("%" + filters.pop("observers")[0] + "%"))

    if "id_organism" in filters:
        id_datasets = (DB.session.query(CorDatasetActor.id_dataset).filter(
            CorDatasetActor.id_organism.in_(filters.pop("id_organism"))).all())
        formated_datasets = [d[0] for d in id_datasets]
        q = q.filter(model.id_dataset.in_(formated_datasets))

    if "date_min" in filters:
        q = q.filter(model.date_min >= filters.pop("date_min")[0])

    if "date_max" in filters:
        # set the date_max at 23h59 because a hour can be set in timestamp
        date_max = datetime.datetime.strptime(
            filters.pop("date_max")[0], '%Y-%m-%d')
        date_max = date_max.replace(hour=23, minute=59, second=59)
        q = q.filter(model.date_max <= date_max)

    if "id_acquisition_framework" in filters:
        q = q.join(
            TAcquisitionFramework,
            model.id_acquisition_framework ==
            TAcquisitionFramework.id_acquisition_framework,
        )
        q = q.filter(
            TAcquisitionFramework.id_acquisition_framework.in_(
                filters.pop("id_acquisition_framework")))

    if "geoIntersection" in filters:
        # Intersect with the geom send from the map
        ors = []
        for str_wkt in filters["geoIntersection"]:
            # if the geom is a circle
            if "radius" in filters:
                radius = filters.pop("radius")[0]
                wkt = loads(str_wkt)
                wkt = circle_from_point(wkt, float(radius))
            else:
                wkt = loads(str_wkt)
            geom_wkb = from_shape(wkt, srid=4326)
            ors.append(model.the_geom_4326.ST_Intersects(geom_wkb))

        q = q.filter(or_(*ors))
        filters.pop("geoIntersection")

    if "period_start" in filters and "period_end" in filters:
        period_start = filters.pop("period_start")[0]
        period_end = filters.pop("period_end")[0]
        q = q.filter(
            or_(
                func.gn_commons.is_in_period(
                    func.date(model.date_min),
                    func.to_date(period_start, "DD-MM"),
                    func.to_date(period_end, "DD-MM"),
                ),
                func.gn_commons.is_in_period(
                    func.date(model.date_max),
                    func.to_date(period_start, "DD-MM"),
                    func.to_date(period_end, "DD-MM"),
                ),
            ))
    q, filters = filter_taxonomy(model, q, filters)

    # generic filters
    join_on_cor_area = False
    for colname, value in filters.items():
        if colname.startswith("area"):
            if not join_on_cor_area:
                q = q.join(CorAreaSynthese,
                           CorAreaSynthese.id_synthese == model.id_synthese)
            q = q.filter(CorAreaSynthese.id_area.in_(value))
            join_on_cor_area = True
        else:
            col = getattr(model.__table__.columns, colname)
            q = q.filter(col.in_(value))
    return q
Example #60
0
enddate = '20200430'
frequency = 1 # every nth day in date range will be downloaded

# Download list of Sentinel S5-P NO2 products in region of interest
products = api.query(AOI,
                     date=(startdate,enddate),
                     platformname='Sentinel-5',
                     producttype='L2__NO2___', # useful data types 'L2__SO2___' and 'L2__NO2___'
                     processingmode='Offline', # 'Near real time' or 'Offline'
                     )

# Convert to pandas dataframe for ease of use
products_df = api.to_dataframe(products)

# Convert AOI to shapely file
AOIshape = wkt.loads(AOI)

# Create empty list of overlaping geometries
differences = []

# Check which images don't have complete overlap with AOI
for image in range(len(products_df)):
    
    # Convert image footprint to shapely file
    footprint = products_df.iloc[image,:]['footprint']
    footprintshape = wkt.loads(footprint)

    # Calculate difference between AOI and image footprint
    difference = AOIshape.difference(footprintshape) 
    
    # Append tolist