def test_Spatial_Median_Rate(self): out_smr = sm.Spatial_Median_Rate(self.e, self.b, self.w).r out_smr_w = sm.Spatial_Median_Rate(self.e, self.b, self.w, aw=self.b).r out_smr2 = sm.Spatial_Median_Rate( self.e, self.b, self.w, iteration=2).r out_smr = [round(i, 7) for i in out_smr[:5]] out_smr_w = [round(i, 7) for i in out_smr_w[:5]] out_smr2 = [round(i, 7) for i in out_smr2[:5]] self.assertEquals(out_smr, self.smr) self.assertEquals(out_smr_w, self.smr_w) self.assertEquals(out_smr2, self.smr2)
def test_Spatial_Median_Rate(self): out_smr = sm.Spatial_Median_Rate(self.e, self.b, self.w).r out_smr_w = sm.Spatial_Median_Rate(self.e, self.b, self.w, aw=self.b).r out_smr2 = sm.Spatial_Median_Rate(self.e, self.b, self.w, iteration=2).r np.testing.assert_allclose(out_smr[:5].flatten(), self.smr, atol=ATOL, rtol=RTOL) np.testing.assert_allclose(out_smr_w[:5].flatten(), self.smr_w, atol=ATOL, rtol=RTOL) np.testing.assert_allclose(out_smr2[:5].flatten(), self.smr2, atol=ATOL, rtol=RTOL)
def test_Spatial_Median_Rate_tabular(self): out_smr = sm.Spatial_Median_Rate(self.df[self.ename], self.df[self.bname], self.w).r out_smr_w = sm.Spatial_Median_Rate(self.df[self.ename], self.df[self.bname], self.w, aw=self.df[self.bname]).r out_smr2 = sm.Spatial_Median_Rate(self.df[self.ename], self.df[self.bname], self.w, iteration=2).r self.assertIsInstance(out_smr, np.ndarray) self.assertIsInstance(out_smr_w, np.ndarray) self.assertIsInstance(out_smr2, np.ndarray) np.testing.assert_allclose(out_smr[:5].flatten(), self.smr, atol=ATOL, rtol=RTOL) np.testing.assert_allclose(out_smr_w[:5].flatten(), self.smr_w, atol=ATOL, rtol=RTOL) np.testing.assert_allclose(out_smr2[:5].flatten(), self.smr2, atol=ATOL, rtol=RTOL) out_smr = sm.Spatial_Median_Rate.by_col(self.df, self.ename, self.bname, self.w) out_smr_w = sm.Spatial_Median_Rate.by_col(self.df, self.ename, self.bname, self.w, aw=self.df[self.bname]) out_smr2 = sm.Spatial_Median_Rate.by_col(self.df, self.ename, self.bname, self.w, iteration=2) outcol = '{}-{}_spatial_median_rate'.format(self.ename, self.bname) np.testing.assert_allclose(out_smr[outcol].values[:5], self.smr, rtol=RTOL, atol=ATOL) np.testing.assert_allclose(out_smr_w[outcol].values[:5], self.smr_w, rtol=RTOL, atol=ATOL) np.testing.assert_allclose(out_smr2[outcol].values[:5], self.smr2, rtol=RTOL, atol=ATOL) @unittest.skipIf(PANDAS_EXTINCT, 'missing pandas') def test_Spatial_Smoother_multicol(self): """ test that specifying multiple columns works correctly. Since the function is shared over all spatial smoothers, we can only test one. """ enames = [self.ename, 'SID79'] bnames = [self.bname, 'BIR79'] out_df = sm.Spatial_Median_Rate.by_col(self.df, enames, bnames, self.w) outcols = [ '{}-{}_spatial_median_rate'.format(e, b) for e, b in zip(enames, bnames) ] smr79 = np.array( [0.00122129, 0.00176924, 0.00176924, 0.00240964, 0.00272035]) answers = [self.smr, smr79] for col, answer in zip(outcols, answer): self.assertIn(out_df.columns, col) np.testing.assert_allclose(out_df[col].values[:5], answer, rtol=RTOL, atol=ATOL) @unittest.skipIf(PANDAS_EXTINCT, 'missing pandas') def test_Smoother_multicol(self): """ test that non-spatial smoothers work with multicolumn queries """ enames = [self.ename, 'SID79'] bnames = [self.bname, 'BIR79'] out_df = sm.Excess_Risk.by_col(self.df, enames, bnames) outcols = [ '{}-{}_excess_risk'.format(e, b) for e, b in zip(enames, bnames) ] er79 = np.array( [0.000000, 2.796607, 0.8383863, 1.217479, 0.943811]) answers = [self.er, er79] for col, answer in zip(outcols, answer): self.assertIn(out_df.columns, col) np.testing.assert_allclose(out_df[col].values[:5], answer, rtol=RTOL, atol=ATOL)
elif sigs[i] > 0.0: sigs[i] = 4 # plot significant autocorrelation maps.plot_choropleth(shp_link, np.array(sigs), type='equal_interval', title='Significant Spatial Autocorrelation in 311 Calls', k=10, figsize=(6,9)) # spatial smoothing from pysal.esda import smoothing as sm pop = np.array(calls['pop']) all311 = np.array(calls['all_calls']) # Locally weighted median smoothing, weighted by pop rate = sm.Spatial_Median_Rate(all311, pop, w, aw=pop) # weights are populations y = rate.r # check for nans len(y[np.isnan(y)]) y[np.isnan(y)] = 0 maps.plot_choropleth(shp_link, y, type='fisher_jenks', title='All Calls by Census Area, 2011-2015\nSpatially Smoothed', k=20, figsize=(6,9)) # Locally weighted median smoothing, weighted by pop, iterated three times rate = sm.Spatial_Median_Rate(all311, pop, w, aw=pop, iteration=3) # weights are populations y = rate.r