Exemple #1
0
def datasets():
    sac1 = load_example("Sacramento1")
    sac2 = load_example("Sacramento2")
    sac1 = geopandas.read_file(sac1.get_path("sacramentot2.shp"))
    sac2 = geopandas.read_file(sac2.get_path("SacramentoMSA2.shp"))
    sac1["pct_poverty"] = sac1.POV_POP / sac1.POV_TOT

    return sac1, sac2
def datasets():
    sac1 = load_example("Sacramento1")
    sac2 = load_example("Sacramento2")
    sac1 = geopandas.read_file(sac1.get_path("sacramentot2.shp"))
    sac2 = geopandas.read_file(sac2.get_path("SacramentoMSA2.shp"))
    sac1["pct_poverty"] = sac1.POV_POP / sac1.POV_TOT
    categories = ["cat", "dog", "donkey", "wombat", "capybara"]
    sac1["animal"] = (categories * ((len(sac1) // len(categories)) + 1))[: len(sac1)]

    return sac1, sac2
Exemple #3
0
def datasets():

    if not os.path.exists("nlcd_2011.tif"):
        p = quilt3.Package.browse("rasters/nlcd", "s3://spatial-ucr")
        p["nlcd_2011.tif"].fetch()

    sac1 = load_example("Sacramento1")
    sac2 = load_example("Sacramento2")

    sac1 = geopandas.read_file(sac1.get_path("sacramentot2.shp"))
    sac2 = geopandas.read_file(sac2.get_path("SacramentoMSA2.shp"))

    return sac1, sac2
Exemple #4
0
def datasets():
    if not QUILTMISSING:

        if not os.path.exists("nlcd_2011.tif"):
            p = quilt3.Package.browse("rasters/nlcd", "s3://spatial-ucr")
            p["nlcd_2011.tif"].fetch()
        sac1 = load_example('Sacramento1')
        sac2 = load_example('Sacramento2')
        sac1 = geopandas.read_file(sac1.get_path("sacramentot2.shp"))
        sac2 = geopandas.read_file(sac2.get_path("SacramentoMSA2.shp"))
        sac1['pct_poverty'] = sac1.POV_POP / sac1.POV_TOT

        return sac1, sac2
    else:
        pass
 def test_Multi_Gini_Seg(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     groups_list = ['WHITE', 'BLACK', 'ASIAN', 'HISP']
     df = s_map[groups_list]
     index = MultiGini(df, groups_list)
     np.testing.assert_almost_equal(index.statistic, 0.5456349992598081)
 def test_Multi_Information_Theory(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     groups_list = ['WHITE', 'BLACK', 'ASIAN', 'HISP']
     df = s_map[groups_list]
     index = MultiInfoTheory(df, groups_list)
     np.testing.assert_almost_equal(index.statistic, 0.1710160297858887)
Exemple #7
0
 def test_Relative_Clustering(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     s_map = s_map.to_crs(s_map.estimate_utm_crs())
     df = s_map[['geometry', 'HISP', 'TOT_POP']]
     index = RelativeClustering(df, 'HISP', 'TOT_POP')
     np.testing.assert_almost_equal(index.statistic, 0.5172753899054523)
Exemple #8
0
 def test_Distance_Decay_Isolation(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     df = s_map[['geometry', 'HISP', 'TOT_POP']]
     df = df.to_crs(df.estimate_utm_crs())
     index = DistanceDecayIsolation(df, 'HISP', 'TOT_POP')
     np.testing.assert_almost_equal(index.statistic, 0.20144710843904595)
 def test_Multi_Dissim(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     groups_list = ['WHITE_', 'BLACK_', 'ASIAN_', 'HISP_']
     df = s_map[groups_list]
     index = MultiDissim(df, groups_list)
     np.testing.assert_almost_equal(index.statistic, 0.41340872573177806)
 def test_Multi_Multi_Normalized_Exposure(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     groups_list = ['WHITE', 'BLACK', 'ASIAN', 'HISP']
     df = s_map[groups_list]
     index = MultiNormExposure(df, groups_list)
     np.testing.assert_almost_equal(index.statistic, 0.18821879029994157)
Exemple #11
0
 def test_Spatial_Prox_Prof(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     df = s_map[['geometry', 'HISP', 'TOT_POP']]
     df = df.to_crs(df.estimate_utm_crs())
     index = SpatialProxProf(df, 'HISP', 'TOT_POP')
     np.testing.assert_almost_equal(index.statistic, 0.22847334404621394)
 def test_Multi_Relative_Diversity(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     groups_list = ['WHITE_', 'BLACK_', 'ASIAN_', 'HISP_']
     df = s_map[groups_list]
     index = MultiRelativeDiversity(df, groups_list)
     np.testing.assert_almost_equal(index.statistic, 0.15820019878220337)
Exemple #13
0
 def test_Multi_Squared_Coefficient_of_Variation(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     groups_list = ['WHITE', 'BLACK', 'ASIAN', 'HISP']
     df = s_map[groups_list]
     index = MultiSquaredCoefVar(df, groups_list)
     np.testing.assert_almost_equal(index.statistic, 0.11875484641127525)
Exemple #14
0
 def test_Simpsons_Concentration(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     groups_list = ['WHITE', 'BLACK', 'ASIAN', 'HISP']
     df = s_map[groups_list]
     index = SimpsonsConcentration(df, groups_list)
     np.testing.assert_almost_equal(index.statistic, 0.49182413151957904)
Exemple #15
0
def test_moran_loc_bv_scatterplot():
    guerry = examples.load_example('Guerry')
    link_to_data = guerry.get_path('Guerry.shp')
    gdf = gpd.read_file(link_to_data)
    x = gdf['Suicids'].values
    y = gdf['Donatns'].values
    w = Queen.from_dataframe(gdf)
    w.transform = 'r'
    # Calculate Univariate and Bivariate Moran
    moran_loc = Moran_Local(y, w)
    moran_loc_bv = Moran_Local_BV(x, y, w)
    # try with p value so points are colored
    fig, _ = _moran_loc_bv_scatterplot(moran_loc_bv)
    plt.close(fig)

    # try with p value and different figure size
    fig, _ = _moran_loc_bv_scatterplot(moran_loc_bv,
                                       p=0.05,
                                       aspect_equal=False)
    plt.close(fig)

    assert_raises(ValueError, _moran_loc_bv_scatterplot, moran_loc, p=0.5)
    assert_warns(UserWarning,
                 _moran_loc_bv_scatterplot,
                 moran_loc_bv,
                 p=0.5,
                 scatter_kwds=dict(c='r'))
Exemple #16
0
    def test_Decomposition(self):
        s_map = gpd.read_file(
            load_example("Sacramento1").get_path("sacramentot2.shp"))
        index1 = Dissim(s_map, 'HISP', 'TOT_POP')
        index2 = Dissim(s_map, 'BLACK', 'TOT_POP')
        res = DecomposeSegregation(index1,
                                   index2,
                                   counterfactual_approach="composition")
        np.testing.assert_almost_equal(res.c_a, -0.16138819842911295)
        np.testing.assert_almost_equal(res.c_s, -0.005104643275796905)
        res.plot(plot_type='cdfs')
        res.plot(plot_type='maps')

        res = DecomposeSegregation(index1,
                                   index2,
                                   counterfactual_approach="share")
        np.testing.assert_almost_equal(res.c_a, -0.1543828579279878)
        np.testing.assert_almost_equal(res.c_s, -0.012109983776922045)
        res.plot(plot_type='cdfs')
        res.plot(plot_type='maps')

        res = DecomposeSegregation(index1,
                                   index2,
                                   counterfactual_approach="dual_composition")
        np.testing.assert_almost_equal(res.c_a, -0.16159526946235048)
        np.testing.assert_almost_equal(res.c_s, -0.004897572242559378)
        res.plot(plot_type='cdfs')
        res.plot(plot_type='maps')
 def test_Simpsons_Interaction(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     groups_list = ['WHITE', 'BLACK', 'ASIAN', 'HISP']
     df = s_map[groups_list]
     index = SimpsonsInteraction(df, groups_list)
     np.testing.assert_almost_equal(index.statistic, 0.508175868480421)
Exemple #18
0
def test_plot_local_autocorrelation():
    columbus = examples.load_example('Columbus')
    link_to_data = columbus.get_path('columbus.shp')
    df = gpd.read_file(link_to_data)

    y = df['HOVAL'].values
    w = Queen.from_dataframe(df)
    w.transform = 'r'

    moran_loc = Moran_Local(y, w)

    fig, _ = plot_local_autocorrelation(moran_loc, df, 'HOVAL', p=0.05)
    plt.close(fig)

    # also test with quadrant and mask
    fig, _ = plot_local_autocorrelation(moran_loc, df, 'HOVAL', p=0.05,
                                        region_column='POLYID',
                                        aspect_equal=False,
                                        mask=['1', '2', '3'], quadrant=1)
    plt.close(fig)
    
    # also test with quadrant and mask
    assert_raises(ValueError, plot_local_autocorrelation, moran_loc,
                  df, 'HOVAL', p=0.05, region_column='POLYID',
                 mask=['100', '200', '300'], quadrant=1)
    def test_Multi_Local_Simpson_Interaction(self):
        s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp"))
        groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
        df = s_map[groups_list]
        index = MultiLocalSimpsonInteraction(df, groups_list)
        np.testing.assert_almost_equal(index.statistics[0:10], np.array([0.15435993, 0.33391595, 0.49909747, 0.1299449 , 0.09805056,
																		 0.13128178, 0.04447356, 0.0398933 , 0.03723054, 0.11758548]))
Exemple #20
0
def test_plot_spatial_weights():
    # get data
    rio_grande_do_sul = examples.load_example('Rio Grande do Sul')
    gdf = gpd.read_file(rio_grande_do_sul.get_path('43MUE250GC_SIR.shp'))
    gdf.head()
    # calculate weights
    weights = Queen.from_dataframe(gdf)
    # plot weights
    fig, _ = plot_spatial_weights(weights, gdf)
    plt.close(fig)
    # calculate nonplanar_joins
    wnp = libpysal.weights.util.nonplanar_neighbors(weights, gdf)
    # plot new joins
    fig2, _ = plot_spatial_weights(wnp, gdf)
    plt.close(fig2)
    #customize
    fig3, _ = plot_spatial_weights(wnp, gdf, nonplanar_edge_kws=dict(color='#4393c3'))
    plt.close(fig3)
    # plot in existing figure
    fig4, axs = plt.subplots(1,3)
    plot_spatial_weights(wnp, gdf, ax=axs[0])
    plt.close(fig4)

    # uses a column as the index for spatial weights object
    weights_index = Queen.from_dataframe(gdf, idVariable="CD_GEOCMU")
    fig, _ = plot_spatial_weights(weights_index, gdf, indexed_on="CD_GEOCMU")
    plt.close(fig)
Exemple #21
0
def test_h3fy_diff_crs():
    sac1 = load_example("Sacramento1")
    sac1 = geopandas.read_file(sac1.get_path("sacramentot2.shp"))
    sac1 = sac1.to_crs(32710)
    sac_hex = h3fy(sac1)
    assert sac_hex.shape == (364, 1)
    assert sac_hex.crs.to_string() == "EPSG:32710"
Exemple #22
0
 def test_Modified_Gini(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     df = s_map[['geometry', 'HISP', 'TOT_POP']]
     index = ModifiedGini(df, 'HISP', 'TOT_POP', seed=1234, backend='loky')
     np.testing.assert_almost_equal(index.statistic,
                                    0.4217844443896344,
                                    decimal=3)
Exemple #23
0
def test_h3fy_nocrs():
    sac1 = load_example("Sacramento1")
    sac1 = geopandas.read_file(sac1.get_path("sacramentot2.shp"))
    sac1.crs = None
    try:
        sac_hex = h3fy(sac1, return_geoms=True)
    except ValueError:
        pass
 def test_Boundary_Spatial_Dissim(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     df = s_map[['geometry', 'HISP', 'TOT_POP']]
     index = BoundarySpatialDissim(df, 'HISP', 'TOT_POP')
     np.testing.assert_almost_equal(index.statistic,
                                    0.2638936888653678,
                                    decimal=2)
 def test_Modified_Gini_Seg(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     df = s_map[['geometry', 'HISP_', 'TOT_POP']]
     np.random.seed(1234)
     index = ModifiedGiniSeg(df, 'HISP_', 'TOT_POP')
     np.testing.assert_almost_equal(index.statistic,
                                    0.4217844443896344,
                                    decimal=3)
 def test_Bias_Corrected_Dissim(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     df = s_map[['geometry', 'HISP', 'TOT_POP']]
     np.random.seed(1234)
     index = BiasCorrectedDissim(df, 'HISP', 'TOT_POP')
     np.testing.assert_almost_equal(index.statistic,
                                    0.32136474449360836,
                                    decimal=3)
Exemple #27
0
def datasets():
    if not QUILTMISSING:

        if not os.path.exists("nlcd_2011.tif"):
            p = quilt3.Package.browse("rasters/nlcd", "s3://spatial-ucr")
            p["nlcd_2011.tif"].fetch()
        sac1 = load_example("Sacramento1")
        sac2 = load_example("Sacramento2")
        sac1 = geopandas.read_file(sac1.get_path("sacramentot2.shp"))
        sac2 = geopandas.read_file(sac2.get_path("SacramentoMSA2.shp"))
        sac1["pct_poverty"] = sac1.POV_POP / sac1.POV_TOT
        categories = ["cat", "dog", "donkey", "wombat", "capybara"]
        sac1["animal"] = (categories *
                          ((len(sac1) // len(categories)) + 1))[:len(sac1)]

        return sac1, sac2
    else:
        pass
 def test_Modified_Dissim(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     df = s_map[['geometry', 'HISP', 'TOT_POP']]
     np.random.seed(1234)
     index = ModifiedDissim(df, 'HISP', 'TOT_POP')
     np.testing.assert_almost_equal(index.statistic,
                                    0.31075891224250635,
                                    decimal=3)
Exemple #29
0
 def setUp(self):
     south = load_example('South')
     db = libpysal.io.open(south.get_path("south.dbf"), 'r')
     self.y_name = "HR90"
     self.y = np.array(db.by_col(self.y_name))
     self.y.shape = (len(self.y), 1)
     self.x_names = ["RD90", "PS90", "UE90", "DV90"]
     self.x = np.array([db.by_col(var) for var in self.x_names]).T
     self.w = Queen.from_shapefile(south.get_path("south.shp"))
     self.w.transform = 'r'
Exemple #30
0
 def test_Multi_Location_Quotient(self):
     s_map = gpd.read_file(
         load_example("Sacramento1").get_path("sacramentot2.shp"))
     groups_list = ['WHITE', 'BLACK', 'ASIAN', 'HISP']
     df = s_map[groups_list]
     index = MultiLocationQuotient(df, groups_list)
     np.testing.assert_almost_equal(
         index.statistics[0:3, 0:3],
         np.array([[1.36543221, 0.07478049, 0.16245651],
                   [1.18002164, 0., 0.14836683],
                   [0.68072696, 0.03534425, 0.]]))