def hexLat2W(nrows=5, ncols=5, **kwargs): """ Create a W object for a hexagonal lattice. Parameters ---------- nrows : int number of rows ncols : int number of columns **kwargs : keyword arguments optional arguments for :class:`pysal.weights.W` Returns ------- w : W instance of spatial weights class W Notes ----- Observations are row ordered: first k observations are in row 0, next k in row 1, and so on. Construction is based on shifting every other column of a regular lattice down 1/2 of a cell. Examples -------- >>> from libpysal.weights import lat2W, hexLat2W >>> w = lat2W() >>> w.neighbors[1] [0, 6, 2] >>> w.neighbors[21] [16, 20, 22] >>> wh = hexLat2W() >>> wh.neighbors[1] [0, 6, 2, 5, 7] >>> wh.neighbors[21] [16, 20, 22] """ if nrows == 1 or ncols == 1: print("Hexagon lattice requires at least 2 rows and columns") print("Returning a linear contiguity structure") return lat2W(nrows, ncols) n = nrows * ncols rid = [i // ncols for i in range(n)] cid = [i % ncols for i in range(n)] r1 = nrows - 1 c1 = ncols - 1 w = lat2W(nrows, ncols).neighbors for i in range(n): odd = cid[i] % 2 if odd: if rid[i] < r1: # odd col index above last row # new sw neighbor if cid[i] > 0: j = i + ncols - 1 w[i] = w.get(i, []) + [j] # new se neighbor if cid[i] < c1: j = i + ncols + 1 w[i] = w.get(i, []) + [j] else: # even col # nw jnw = [i - ncols - 1] # ne jne = [i - ncols + 1] if rid[i] > 0: w[i] if cid[i] == 0: w[i] = w.get(i, []) + jne elif cid[i] == c1: w[i] = w.get(i, []) + jnw else: w[i] = w.get(i, []) + jne w[i] = w.get(i, []) + jnw return W(w, **kwargs)
else: if predicate != 'intersects': raise ValueError( f'Predicate `{predicate}` requires geopandas >= 0.8.0.') tree = gdf.sindex for i, (ix, geom) in enumerate(gdf.geometry.iteritems()): hits = list(tree.intersection(geom.bounds)) hits.remove(i) possible = gdf.iloc[hits] ids = possible[possible.intersects(geom)].index.tolist() neighbors[ix] = ids if buffering: gdf.set_geometry(old_geometry_name, inplace=True) if drop: gdf.drop(columns=["_buffer"], inplace=True) return W(neighbors, **kwargs) if __name__ == "__main__": from libpysal.weights import lat2W assert (lat2W(5, 5).sparse.todense() == lat2SW(5, 5).todense()).all() assert (lat2W(5, 3).sparse.todense() == lat2SW(5, 3).todense()).all() assert (lat2W(5, 3, rook=False).sparse.todense() == lat2SW( 5, 3, "queen").todense()).all() assert (lat2W(50, 50, rook=False).sparse.todense() == lat2SW( 50, 50, "queen").todense()).all()
def __init__(self, n_move=10000, n_interact=1000, nx=50, GA=0.425, GB=0.425, threshold=0.3, PS=0.01, CS=1.5): ''' Initialize the simulation object. Args: n_move (int): number of moves to run n_interact (int): number of interactions to run nx (int): the width of the sauqre grid world GA (float): the proportion of agents in Group A GB (float): the proportion of agents in Group B threshold (float): threshold to move PS (float): ID strength change probability CS (float): ID strength change constant ''' self.n_move = n_move self.n_interact = n_interact self.nx = nx # Initialize the count of moves and interactions self.c_move = 0 self.c_interact = 0 # The total population is the size of the grid world self.npop = nx * nx # 2D array store some properties the agents # 0-Group ID, 1-Type, 2-Current ID strength, 3-Future ID strength self.population = np.zeros((nx * nx, 4)) # 2D array store the segregation data # 0-Total A, 1-Total B, 2-Total E, # 3-Initial average ID strength, 4-Ending average ID strength, # 5-Threshold self.popagg = np.zeros((3, 6)) # Proportion of different agents self.GA = GA self.GB = GB self.GE = 1 - GA - GB self.threshold = threshold self.PS = PS self.CS = CS # Randomly assign group ID to agents # 0-Group A, 1-Group B, 2-Group E (empty cell/agent) self.population[:, 0] = np.random.multinomial(1, [GA, GB, 1 - GA - GB], nx * nx).argmax(1) # Get the number of empty cell/agent self.NGE = np.sum(self.population[:, 0] == 2) # Randomly assign type and ID strength to agents # Type: 0-Open, 1-Netural, 2-Close # ID strength: -1 (weak) to 1 (strong) self.population[:, 1] = np.random.randint(0, 3, size=nx * nx) self.population[:, 2] = np.random.uniform(-1, 1, size=nx * nx) # Initialize the World self.world = lat2W(nx, nx, rook=False) # Initialize the average ID strength list self.avgS = []
def test_distance_band(self): w = lat2W(4, 4) self.assertEqual(16, w.n)
# Loop over all tifs in indir, clip each using clip_shp, and then calculate moran's i, append to list with fiona.open(clip_file_name, 'r') as clip_shp: shapes = [feature["geometry"] for feature in clip_shp] with rio.open(tif) as src: clipped_img, clipped_transform = rio.mask.mask(src, shapes, crop=True) clipped_meta = src.meta # Mask out nodata masked_clipped_img = np.ma.masked_array(clipped_img, clipped_img == 32767) # print(masked_clipped_img.shape[1]) # print(masked_clipped_img.shape[2]) # Calculate weights matrix for the raster, and calculate Moran's i print("Building weights matrix for {x}".format(x=tif)) weights = lat2W(masked_clipped_img.shape[1], masked_clipped_img.shape[2], rook=False, id_type='int') print("Calculating Moran's i for {x}".format(x=tif)) moran = Moran(masked_clipped_img, weights) # Add moran's i to csv stats_list.append((tif_name, moran.I)) # Write stats_list to csv with open(workspace + '_morani.csv', 'w') as csv_file: writer = csv.writer(csv_file) writer.writerow(csv_header) writer.writerows(stats_list)