def _dynamic_lisa_widget_update(rose, gdf, start_time, end_time, p=0.05, figsize=(13, 10)): """ Update rose values if widgets are used """ # determine rose object for (timex, timey), # which comes from interact widgets y1 = gdf[start_time].values y2 = gdf[end_time].values Y = np.array([y1, y2]).T rose_update = Rose(Y, rose.w, k=5) fig, _ = dynamic_lisa_composite(rose_update, gdf, p=p, figsize=figsize)
def _data_generation(): # get csv and shp shp_link = examples.get_path('us48.shp') df = gpd.read_file(shp_link) income_table = pd.read_csv(examples.get_path("usjoin.csv")) # calculate relative values for year in range(1969, 2010): income_table[str(year) + '_rel'] = ( income_table[str(year)] / income_table[str(year)].mean()) # merge gdf = df.merge(income_table,left_on='STATE_NAME',right_on='Name') # retrieve spatial weights and data for two points in time w = Queen.from_dataframe(gdf) w.transform = 'r' y1 = gdf['1969_rel'].values y2 = gdf['2000_rel'].values #calculate rose Object Y = np.array([y1, y2]).T rose = Rose(Y, w, k=5) return gdf, y1, rose
def _data_generation(): from giddy.directional import Rose # get csv and shp shp_link = examples.get_path("us48.shp") df = gpd.read_file(shp_link) income_table = pd.read_csv(examples.get_path("usjoin.csv")) # calculate relative values for year in range(1969, 2010): income_table[str(year) + "_rel"] = (income_table[str(year)] / income_table[str(year)].mean()) # merge gdf = df.merge(income_table, left_on="STATE_NAME", right_on="Name") # retrieve spatial weights and data for two points in time w = Queen.from_dataframe(gdf) w.transform = "r" y1 = gdf["1969_rel"].values y2 = gdf["2000_rel"].values # calculate rose Object Y = np.array([y1, y2]).T rose = Rose(Y, w, k=5) return gdf, y1, rose
# We will use a simple contiguity structure to define neighbors. The file # states48.gal encodes the adjacency structure of the 48 states. We read this in # and row-normalize the weights: gal = lps.open(lps.examples.get_path('states48.gal')) w = gal.read() w.transform = 'r' ########################################## # Visualization # ============== # # The Rose class creates a circular histogram that can be used to examine the distribution # of LISA Vectors across segments of the histogram: r4 = Rose(Y, w, k=4) ########################################## # LISA Vectors # ------------ # # The Rose class contains methods to carry out inference on the circular distribution of the LISA vectors. The first approach is based on a two-sided alternative where the null is that the distribution of the vectors across the segments reflects independence in the movements of the focal unit and its spatial lag. Inference is based on random spatial permutations under the null. r4.plot_vectors() # lisa vectors ########################################## # LISA Vectors Origin Standardized # ================================ # # As the LISA vectors combine the locations of a give LISA statistic in two different time periods, it can be useful # to standardize the vectors to look for directional biases in the movements:
# Take the first and last year of our income data as the interval to do # the directional directional analysis Y = rel[:, [0, -1]] # Set the random seed generator which is used in the permutation based # inference for the rose diagram so that we can replicate our example # results np.random.seed(100) # Call the rose function to construct the directional histogram for the # dynamic LISA statistics. We will use four circular sectors for our # histogram r4 = Rose(Y, w, k=4) # What are the cut-offs for our histogram - in radians r4.cuts # array([0. , 1.57079633, 3.14159265, 4.71238898, 6.28318531]) # We can test whether these counts are different than what would be # expected if there was no association between the movement of the # focal unit and its spatial lag. # To do so we call the `permute` method of the object r4.permute() # and then inspect the `p` attibute:
def dirAnalyofDynLISAs(data_df, zipPolygon): caseWeekly_unstack = data_df['CasesWeekly'].unstack(level=0) zip_codes = gpd.read_file(zipPolygon) data_df_zipGPD = zip_codes.merge(caseWeekly_unstack, left_on='zip', right_on=caseWeekly_unstack.index) # print(data_df_zipGPD) W = ps.lib.weights.Queen(data_df_zipGPD.geometry) W.transform = 'R' weeks = idx_weekNumber = data_df.index.get_level_values('Week Number') weeks = np.unique(weeks) valArray = data_df_zipGPD[weeks].to_numpy() valArray_fillNan = bfill(valArray).T valArray_fillNan[np.isnan(valArray_fillNan)] = 0 # print(valArray_fillNan,valArray_fillNan.shape) rvalArray = (valArray_fillNan.T / valArray_fillNan.mean(axis=1)) # print(rvalArray.shape) Y = rvalArray[:, [0, -1]] # print(Y.shape) np.random.seed(100) r4 = Rose(Y, W, k=4) plt.figure() r4.plot( ) #plt.scatter(Y[:,0],r4.lag[:,0],) the location of each point is the coordinates of starting relative income as x and the spatial lag of starting relative value as y r4.plot(Y[:, 0]) # condition on starting relative income r4.plot(attribute=r4.lag[:, 0] ) # condition on the spatial lag of starting relative income r4.plot_vectors( ) # lisa vectors r4.plot_vectors(arrows=False) r4.plot_origin() # origin standardized # did not understand the following part print("cuts:", r4.cuts) print("counts:", r4.counts) np.random.seed(1234) r4.permute(permutations=999) print("p:", r4.p) r4.permute(alternative='positive', permutations=999) print("alter-positive:", r4.p) print("expected-positive:", r4.expected_perm) r4.permute(alternative='negative', permutations=999) print("alter-negative:", r4.p)