def test_has_informative_repr(self): def my_function(x): return 150 bound = constraints.UpperBound(my_function, 100) assert repr(bound) == "<UpperBound(my_function >= 100)>"
def run_GerryChain_heuristic(G, population_deviation, k, iterations): my_updaters = {"population": updaters.Tally("TOTPOP", alias="population")} start = recursive_tree_part( G, range(k), sum(G.nodes[i]["TOTPOP"] for i in G.nodes()) / k, "TOTPOP", population_deviation / 2, 1) initial_partition = GeographicPartition(G, start, updaters=my_updaters) proposal = partial(recom, pop_col="TOTPOP", pop_target=sum(G.nodes[i]["TOTPOP"] for i in G.nodes()) / k, epsilon=population_deviation / 2, node_repeats=2) compactness_bound = constraints.UpperBound( lambda p: len(p["cut_edges"]), 1.5 * len(initial_partition["cut_edges"])) pop_constraint = constraints.within_percent_of_ideal_population( initial_partition, population_deviation / 2) my_chain = MarkovChain(proposal=proposal, constraints=[pop_constraint, compactness_bound], accept=accept.always_accept, initial_state=initial_partition, total_steps=iterations) min_cut_edges = sum(G[i][j]['edge_length'] for i, j in G.edges) print("In GerryChain heuristic, current # of cut edges: ", end='') print(min_cut_edges, ",", sep='', end=' ') for partition in my_chain: current_cut_edges = sum(G[i][j]['edge_length'] for i, j in partition["cut_edges"]) print(current_cut_edges, ",", sep='', end=' ') if current_cut_edges < min_cut_edges: best_partition = partition min_cut_edges = current_cut_edges print("Best heuristic solution has # cut edges =", min_cut_edges) return ([[i for i in G.nodes if best_partition.assignment[i] == j] for j in range(k)], min_cut_edges)
""" updater = { "population": updaters.Tally("TAPERSONS", alias="population"), "cut_edges",cut_edges, "BVAP":Election("BVAP",{"BVAP":"VAPBLACK","nBVAP":"nBVAP"}), "LTGOV":Election("LTGOV",{"D":"D_LTGOV","R":"R_LTGOV"}), "GOV":Election("GOV",{"D":"D_GOV","R":"R_GOV"}), "AG":Election("AG",{"D":"D_ATTGEN","R":"R_ATTGEN"}), } """ #print("parts",len(initial_partition)) #print(sorted(initial_partition["BVAP"].percents("BVAP"))) compactness_bound = constraints.UpperBound( lambda p: len(p["cut_edges"]), 12000) num_elections = 4 election_names = [ "BVAP", "LTGOV", "GOV", "AG"] election_columns = [ ["VAPBLACK", "nBVAP"],
# # } # ) # # ============================================================================= #------------------------------------------------------------------------------------------- #CHAIN FOR TOTPOP proposal = partial(recom, pop_col=pop_col, pop_target=tot_pop_col / num_districts, epsilon=0.02, node_repeats=1) compactness_bound = constraints.UpperBound( lambda p: len(p["cut_edges"]), 2 * len(starting_partition["cut_edges"])) chain = MarkovChain( proposal, constraints=[ constraints.within_percent_of_ideal_population(starting_partition, 0.10), compactness_bound #constraints.single_flip_contiguous#no_more_discontiguous ], accept=accept.always_accept, initial_state=starting_partition, total_steps=10000) #CHAIN FOR CPOP #proposal = partial( # recom, pop_col=ccol, pop_target=tot_ccol/num_districts, epsilon=0.02, node_repeats=1
ideal_population = sum( initial_partition["population"].values()) / len(initial_partition) # print(ideal_population) proposal = partial(recom, pop_col="TOT_POP", pop_target=ideal_population, epsilon=0.02, node_repeats=2) def cut_length(partition): return len(partition["cut_edges"]) compactness_bound = constraints.UpperBound(cut_length, 2 * cut_length(initial_partition)) chain = MarkovChain( proposal=proposal, constraints=[ constraints.within_percent_of_ideal_population(initial_partition, 0.02), compactness_bound, # single_flip_contiguous#no_more_discontiguous ], accept=accept.always_accept, initial_state=initial_partition, total_steps=1000, ) pop_vec = [] cut_vec = []
updaters) # ---- Chain Run ------ pop = 0 for n in graph.nodes: pop = pop + graph.nodes[n][pop_col] proposal = partial(recom, pop_col=pop_col, pop_target=pop / num_dist, epsilon=0.05, node_repeats=3) compactness_bound = constraints.UpperBound(lambda p: len(p["cut_edges"]), len(initial_partition["cut_edges"])) county_splits_bound = constraints.UpperBound( lambda p: calc_splits(p["county_splits"]), calc_splits(initial_partition["county_splits"])) chain = MarkovChain( proposal=proposal, constraints=[ constraints.within_percent_of_ideal_population(initial_partition, 0.05), compactness_bound, county_splits_bound ], accept=accept.always_accept, initial_state=initial_partition, total_steps=steps,
proposals = [] compactness_bounds = [] chains = [] for i in range(4): initial_partitions.append(Partition(graph_list[i], starts[i], updater)) proposals.append( partial(recom, pop_col="TOTPOP", pop_target=totpop[i] / num_districts, epsilon=0.02, node_repeats=1)) compactness_bounds.append( constraints.UpperBound(lambda p: len(p["cut_edges"]), 2 * len(initial_partitions[i]["cut_edges"]))) chains.append( MarkovChain( proposal=proposals[i], constraints=[ constraints.within_percent_of_ideal_population( initial_partitions[i], 0.05), compactness_bounds[i] #constraints.single_flip_contiguous#no_more_discontiguous ], accept=accept.always_accept, initial_state=initial_partitions[i], total_steps=1000)) cuts = [[], [], [], []] BVAPS = [[], [], [], []]
for part in chain_county_splits_requirements: split_parts = calc_splits(part["county_splits"]) if split_parts <= county_splits_ref: random_init_partition = part found_acceptable_splits_plan = True county_splits_bound_num = county_splits_ref break if split_parts <= accumulating_smallest_splits: random_init_partition = part if not (found_acceptable_splits_plan): random_init_partition = part county_splits_bound_num = calc_splits( random_init_partition["county_splits"]) county_splits_bound = constraints.UpperBound( lambda p: calc_splits(p["county_splits"]), county_splits_bound_num) # Cut Edges test cut_edges_partition = len(random_init_partition["cut_edges"]) if cut_edges_partition > compactness_ref: chain_cut_edges_requirements = MarkovChain( proposal=proposal, constraints=[ constraints.within_percent_of_ideal_population( random_init_partition, 0.05), racial_bound, county_splits_bound ], accept=biased_acceptance_cut_edges, initial_state=random_init_partition, total_steps=100000)
def MC_sample(jgraph, settings, save_part = True): """ :param jgraph: gerrychain Graph object :param settings: settings dictionary (possibly loaded from a yaml file) with election info, MC parameters, and constraints params (see settings.yaml file for an example of the structure needed) :param save_part: True is you want to save the partition as json :returns: a list of partitions sapmpled every interval step """ my_updaters = { "cut_edges": cut_edges, "population": updaters.Tally("TOTPOP", alias = "population"), "avg_pop_dist": avg_pop_dist, "pop_dist_pct" : pop_dist_pct, "area_land": updaters.Tally("ALAND10", alias = "area_land"), "area_water": updaters.Tally("AWATER10", alias = "area_water"), "Perimeter": updaters.Tally("perimeter", alias = "Perimeter"), "Area": updaters.Tally("area", alias = "Area") } num_elections = settings['num_elections'] election_names = settings['election_names'] election_columns = settings['election_columns'] num_steps = settings['num_steps'] interval = settings['interval'] pop_tol = settings['pop_tol'] MC_type = settings['MC_type'] elections = [ Election( election_names[i], {"Democratic": election_columns[i][0], "Republican": election_columns[i][1]}, ) for i in range(num_elections) ] election_updaters = {election.name: election for election in elections} my_updaters.update(election_updaters) initial_partition = Partition(jgraph, "CD", my_updaters) # by typing in "CD," we are saying to put every county into the congressional district that they belong to print('computed initial partition') ideal_population = sum(initial_partition["population"].values()) / len( initial_partition ) pop_constraint = constraints.within_percent_of_ideal_population(initial_partition, pop_tol) compactness_bound = constraints.UpperBound( lambda p: len(p["cut_edges"]), 2 * len(initial_partition["cut_edges"]) ) proposal = partial( recom, pop_col=pop_col, pop_target=ideal_population, epsilon=pop_tol, node_repeats=1) constraints_=[pop_constraint, compactness_bound] if MC_type == "flip": proposal = propose_random_flip constraints_=[single_flip_contiguous, pop_constraint, compactness_bound] chain = MarkovChain( proposal=proposal, constraints=constraints_, accept=always_accept, initial_state=initial_partition, total_steps=num_steps ) partitions=[] # recording partitions at each step for index, part in enumerate(chain): if index % interval == 0: print('Markov chain step '+str(index)) partitions += [part] if save_part: sd.dump_run(settings['partitions_path'], partitions) print('saved partitions to '+ settings['partitions_path']) return(partitions)
def test_bound_allows_equality(self): bound = constraints.UpperBound(lambda x: x, 100) assert bound(100) is True
def test_fails_values_above_bound(self): bound = constraints.UpperBound(lambda x: x, 100) assert bound(150) is False
def test_passes_values_below_bound(self): bound = constraints.UpperBound(lambda x: x, 100) assert bound(50) is True
def run_GerryChain_heuristic(G, population_deviation, k, threshold, iterations): my_updaters = {"population": updaters.Tally("TOTPOP", alias="population")} start = recursive_tree_part( G, range(k), sum(G.nodes[i]["TOTPOP"] for i in G.nodes()) / k, "TOTPOP", population_deviation / 2, 1) initial_partition = GeographicPartition(G, start, updaters=my_updaters) proposal = partial(recom, pop_col="TOTPOP", pop_target=sum(G.nodes[i]["TOTPOP"] for i in G.nodes()) / k, epsilon=population_deviation / 2, node_repeats=2) compactness_bound = constraints.UpperBound( lambda p: len(p["cut_edges"]), 1.5 * len(initial_partition["cut_edges"])) pop_constraint = constraints.within_percent_of_ideal_population( initial_partition, population_deviation / 2) my_chain = MarkovChain(proposal=proposal, constraints=[pop_constraint, compactness_bound], accept=accept.always_accept, initial_state=initial_partition, total_steps=iterations) min_cut_edges = sum(G[i][j]['edge_length'] for i, j in G.edges) print( "In GerryChain heuristic, current # of cut edges and minority districts: ", end='') print(min_cut_edges, ",", sep='', end=' ') max_of_minority_districts = -1 all_maps = [] pareto_frontier = [] obj_vals = [] for partition in my_chain: current_cut_edges = sum(G[i][j]['edge_length'] for i, j in partition["cut_edges"]) number_minority_district = 0 for district in range(k): total_pop_district = 0 total_pop_minority = 0 for node in partition.graph: if partition.assignment[node] == district: total_pop_district += G.node[node]["VAP"] total_pop_minority += G.node[node]["BVAP"] #total_pop_minority += G.node[node]["HVAP"] #total_pop_minority += G.node[node]["AMINVAP"] #total_pop_minority += G.node[node]["ASIANVAP"] #total_pop_minority += G.node[node]["NHPIVAP"] if (total_pop_minority > threshold * total_pop_district): number_minority_district += 1 if number_minority_district > max_of_minority_districts: max_of_minority_districts = number_minority_district if current_cut_edges < min_cut_edges: min_cut_edges = current_cut_edges print((current_cut_edges, number_minority_district), ",", sep='', end=' ') obj_vals.append([current_cut_edges, number_minority_district]) all_maps.append( [partition, current_cut_edges, number_minority_district]) print("Best heuristic solution has # cut edges =", min_cut_edges) print("Best heuristic solution has # minority districts =", max_of_minority_districts) all_maps.sort(key=lambda x: x[1]) all_maps.sort(key=lambda x: x[2], reverse=True) pareto_frontier.append(all_maps[0]) least_number_of_cut_edges = all_maps[0][1] for i in range(1, len(all_maps)): if all_maps[i][1] < least_number_of_cut_edges: pareto_frontier.append(all_maps[i]) least_number_of_cut_edges = all_maps[i][1] print("Pareto Frontier: ", pareto_frontier) optimal_maps = [] optimal_cut_edges = [] optimal_minority_districts = [] for plan in pareto_frontier: optimal_maps.append( [[i for i in G.nodes if plan[0].assignment[i] == j] for j in range(k)]) optimal_cut_edges.append(plan[1]) optimal_minority_districts.append(plan[2]) return (optimal_maps, optimal_cut_edges, optimal_minority_districts, obj_vals)