Exemplo n.º 1
0
def test_aggregate_with_some_nans():
    ds = xr.Dataset(
        {
            'test': (['lon', 'lat'], np.array([[np.nan, 1], [2, np.nan]])),
            'lat_bnds': (['lat', 'bnds'], np.array([[-0.5, 0.5], [0.5, 1.5]])),
            'lon_bnds': (['lon', 'bnds'], np.array([[-0.5, 0.5], [0.5, 1.5]]))
        },
        coords={
            'lat': (['lat'], np.array([0, 1])),
            'lon': (['lon'], np.array([0, 1])),
            'bnds': (['bnds'], np.array([0, 1]))
        })

    # get aggregation mapping
    pix_agg = create_raster_polygons(ds)

    # Create polygon covering multiple pixels
    gdf = {
        'name': ['test'],
        'geometry': [Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])]
    }
    gdf = gpd.GeoDataFrame(gdf, crs="EPSG:4326")

    # Get pixel overlaps
    wm = get_pixel_overlaps(gdf, pix_agg)

    # Get aggregate
    agg = aggregate(ds, wm)

    # Should be 1.5; with one pixel valued 1, one pixel valued 2.
    assert np.allclose([agg.agg.test[0]], 1.5, rtol=1e-4)
Exemplo n.º 2
0
def test_aggregate_with_all_nans():
    ds = xr.Dataset(
        {
            'test':
            (['lon', 'lat'], np.array([[np.nan, np.nan], [np.nan, np.nan]])),
            'lat_bnds': (['lat', 'bnds'], np.array([[-0.5, 0.5], [0.5, 1.5]])),
            'lon_bnds': (['lon', 'bnds'], np.array([[-0.5, 0.5], [0.5, 1.5]]))
        },
        coords={
            'lat': (['lat'], np.array([0, 1])),
            'lon': (['lon'], np.array([0, 1])),
            'bnds': (['bnds'], np.array([0, 1]))
        })

    # get aggregation mapping
    pix_agg = create_raster_polygons(ds)

    # Create polygon covering multiple pixels
    gdf = {
        'name': ['test'],
        'geometry': [Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])]
    }
    gdf = gpd.GeoDataFrame(gdf, crs="EPSG:4326")

    # Get pixel overlaps
    wm = get_pixel_overlaps(gdf, pix_agg)

    # Get aggregate
    agg = aggregate(ds, wm)

    # Should only return nan
    # (this is not a great assert - but agg.agg.test[0] comes out as [array(nan)],
    # which... I'm not entirely sure how to reproduce. It quaks like a single nan,
    # but it's unclear to me how to get it to work)
    assert np.all([np.isnan(k) for k in agg.agg.test])
Exemplo n.º 3
0
def test_aggregate_with_weights(ds=ds):
    # Create polygon covering multiple pixels
    gdf = {
        'name': ['test'],
        'geometry': [Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])]
    }
    gdf = gpd.GeoDataFrame(gdf, crs="EPSG:4326")

    # add a simple weights grid (equator pixels have weight 1,
    # 1 N pixels have weight 0)
    weights = xr.DataArray(data=np.array([[1, 1], [0, 0]]),
                           dims=['lat', 'lon'],
                           coords=[ds.lat, ds.lon])

    # calculate the pix_agg variable tested above, to be used in the
    # tests below
    pix_agg = create_raster_polygons(ds, weights=weights)

    # Get pixel overlaps
    wm = get_pixel_overlaps(gdf, pix_agg)

    # Get aggregate
    agg = aggregate(ds, wm)

    # Since the "test" for the input ds has [0,2] for the two
    # equatorial pixels, the average should just be 1.0
    assert np.allclose([v for v in agg.agg.test.values], 1.0)
Exemplo n.º 4
0
def test_get_pixel_overlaps_gdf_wpreexisting_index(pix_agg=pix_agg):
    # Test to make sure it works with pre-existing indices in the gdf
    # Create polygon covering multiple pixels
    gdf_test = {
        'name': ['test'],
        'geometry': [Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])]
    }
    gdf_test = gpd.GeoDataFrame(gdf_test,
                                crs="EPSG:4326",
                                index=np.arange(10, 11))

    # Get pixel overlaps
    wm_out = get_pixel_overlaps(gdf_test, pix_agg)

    # The index error for an incorrectly-indexed gdf is thrown in aggregate()
    agg = aggregate(ds, wm_out)

    # this assert uses 2.1666 because of the weighting that creates
    # the pix_agg variable that this whole section has used. Doesn't really
    # matter, since this is testing an index error that would've
    # happened during aggregate() above.
    assert np.allclose([v for v in agg.agg.test.values], 2.1666, rtol=1e-4)
Exemplo n.º 5
0
def test_aggregate_basic_wdotproduct(ds=ds):
    # Create polygon covering multiple pixels, using the dot product option
    gdf = {
        'name': ['test'],
        'geometry': [Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])]
    }
    gdf = gpd.GeoDataFrame(gdf, crs="EPSG:4326")

    # calculate the pix_agg variable tested above, to be used in the
    # tests below
    pix_agg = create_raster_polygons(ds)

    # Get pixel overlaps
    wm = get_pixel_overlaps(gdf, pix_agg, impl='dot_product')

    # Get aggregate
    agg = aggregate(ds, wm, impl='dot_product')

    # This requires shifting rtol to 1e-4 for some reason, in that
    # it's actually 1.499981, whereas multiplying out
    # np.sum(agg.agg.rel_area[0]*np.array([0,1,2,3]))gives 1.499963...
    # Possibly worth examining more closely later
    assert np.allclose([v for v in agg.agg.test.values], 1.4999, rtol=1e-4)
Exemplo n.º 6
0
def test_aggregate_with_mismatched_grid():
    # This is to see if the subset_find call works

    ds = xr.Dataset(
        {
            'test':
            (['lon', 'lat'], np.array([[30, 40, 50], [10, 0, 1], [20, 2, 3]])),
            'lat_bnds': (['lat', 'bnds'],
                         np.array([[-1.5, -0.5], [-0.5, 0.5], [0.5, 1.5]])),
            'lon_bnds': (['lon', 'bnds'
                          ], np.array([[-1.5, -0.5], [-0.5, 0.5], [0.5, 1.5]]))
        },
        coords={
            'lat': (['lat'], np.array([-1, 0, 1])),
            'lon': (['lon'], np.array([-1, 0, 1])),
            'bnds': (['bnds'], np.array([0, 1]))
        })

    # Create polygon covering multiple pixels
    gdf = {
        'name': ['test'],
        'geometry': [Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])]
    }
    gdf = gpd.GeoDataFrame(gdf, crs="EPSG:4326")

    # calculate the pix_agg variable tested above, to be used in the
    # tests below
    pix_agg = create_raster_polygons(ds)

    # Get pixel overlaps
    wm = get_pixel_overlaps(gdf, pix_agg)

    # Get aggregate
    agg = aggregate(ds, wm)

    # On change in rtol, see note in test_aggregate_basic
    assert np.allclose([v for v in agg.agg.test.values], 1.4999, rtol=1e-4)