Ejemplo n.º 1
0
def test_single_flip_contiguity_equals_contiguity():
    import random
    random.seed(1887)

    def equality_validator(partition):
        val = partition["contiguous"] == partition["flip_check"]
        assert val
        return partition["contiguous"]

    df = gp.read_file("rundmcmc/testData/mo_cleaned_vtds.shp")

    with open("rundmcmc/testData/MO_graph.json") as f:
        graph_json = json.load(f)

    graph = networkx.readwrite.json_graph.adjacency_graph(graph_json)
    assignment = get_assignment_dict_from_df(df, "GEOID10", "CD")

    validator = Validator([equality_validator])
    updaters = {
        "contiguous": contiguous,
        "cut_edges": cut_edges,
        "flip_check": single_flip_contiguous
    }

    initial_partition = Partition(graph, assignment, updaters)
    accept = lambda x: True

    chain = MarkovChain(propose_random_flip,
                        validator,
                        accept,
                        initial_partition,
                        total_steps=100)
    list(chain)
Ejemplo n.º 2
0
def main():
    # Sketch:
    #   1. Load dataframe.
    #   2. Construct neighbor information.
    #   3. Make a graph from this.
    #   4. Throw attributes into graph.
    df = gp.read_file("./testData/mo_cleaned_vtds.shp")
    graph = networkx.readwrite.read_gpickle('example_graph.gpickle')

    add_data_to_graph(df, graph, ["PR_DV08", "PR_RV08", "P_08"], "GEOID10")

    assignment = get_assignment_dict(df, "GEOID10", "CD")

    updaters = {
        'd_votes': statistic_factory('PR_DV08', alias='d_votes'),
        'r_votes': statistic_factory('PR_RV08', alias='r_votes'),
        'cut_edges': cut_edges
    }
    initial_partition = Partition(graph, assignment, updaters)

    validator = Validator([contiguous])
    accept = lambda x: True

    chain = MarkovChain(propose_random_flip,
                        validator,
                        accept,
                        initial_partition,
                        total_steps=100)

    mm = []
    mt = []
    #eg=[]

    for state in chain:
        mm.append(
            mean_median2(state, data_column1='d_votes',
                         data_column2='r_votes'))
        mt.append(
            mean_thirdian2(state,
                           data_column1='d_votes',
                           data_column2='r_votes'))
        #eg.append(efficiency_gap(state, data_column1='d_votes',data_column2='r_votes))

    #print(graph.nodes(data=True))
    mm_outs = [mm]  #,eg]
    mt_outs = [mt]
    #eg_outs=[eg]

    with open('mm_chain_out', "w") as output:
        writer = csv.writer(output, lineterminator='\n')
        writer.writerows(mm_outs)

    with open('mt_chain_out', "w") as output:
        writer = csv.writer(output, lineterminator='\n')
        writer.writerows(mt_outs)
Ejemplo n.º 3
0
def test_vote_proportion_updater_returns_percentage_or_nan_on_later_steps():
    columns = ['D', 'R']
    graph = three_by_three_grid()
    attach_random_data(graph, columns)
    assignment = random_assignment(graph, 3)
    updaters = {**votes_updaters(columns), 'cut_edges': cut_edges}

    initial_partition = Partition(graph, assignment, updaters)

    chain = MarkovChain(propose_random_flip, Validator([no_vanishing_districts]),
                        lambda x: True, initial_partition, total_steps=10)
    for partition in chain:
        assert all(is_percentage_or_nan(value) for value in partition['D%'].values())
        assert all(is_percentage_or_nan(value) for value in partition['R%'].values())
Ejemplo n.º 4
0
def test_MarkovChain_runs_only_total_steps_times():
    initial = MockState()
    chain = MarkovChain(mock_proposal,
                        mock_is_valid,
                        mock_accept,
                        initial,
                        total_steps=10)
    counter = 0
    for state in chain:
        assert isinstance(state, MockState)
        if counter >= 10:
            assert False
        counter += 1
    if counter < 10:
        assert False
Ejemplo n.º 5
0
def read_chain(graph, iterations):
    is_valid = Validator([contiguous])
    chain = MarkovChain(propose_random_flip, is_valid, always_accept, graph, total_steps=iterations)
    partitions = []
    for step in chain:
        # print('parent = ')
        # print(step.parent.assignment)
        # print('current assignment')
        # print(step.assignment)
        # if step.flips:
        # if not (list(step.flips.keys())[0][0] == list(step.flips.keys())[0][1]):
        partitions.append(step.assignment)
        # print('Keys')
        # print(step.flips)
        # print(list(step.flips.keys())[0])
        # print(list(step.flips.keys())[0][0] == list(step.flips.keys())[0][1])
    print(partitions)
    newlist = [dict(s) for s in set(frozenset(d.items()) for d in partitions)]
    print(newlist)
    distance_matrix = bmt.build_distance_matrix(newlist)
    return distance_matrix
Ejemplo n.º 6
0
def set_up_chain(plan, total_steps, adjacency_type='queen'):
    graph = Graph.load(f"./PA_{adjacency_type}.json").graph

    assignment = {node: graph.nodes[node][plan] for node in graph.nodes}

    updaters = {
        **votes_updaters(elections["2016_Presidential"],
                         election_name="2016_Presidential"),
        **votes_updaters(elections["2016_Senate"], election_name="2016_Senate"), 'population':
        Tally('population', alias='population'),
        'perimeters':
        perimeters,
        'exterior_boundaries':
        exterior_boundaries,
        'interior_boundaries':
        interior_boundaries,
        'boundary_nodes':
        boundary_nodes,
        'cut_edges':
        cut_edges,
        'areas':
        Tally('area', alias='areas'),
        'polsby_popper':
        polsby_popper,
        'cut_edges_by_part':
        cut_edges_by_part
    }

    partition = Partition(graph, assignment, updaters)

    population_constraint = within_percent_of_ideal_population(partition, 0.01)
    compactness_constraint = SelfConfiguringLowerBound(L_minus_1_polsby_popper,
                                                       epsilon=0.1)

    is_valid = Validator(default_constraints +
                         [population_constraint, compactness_constraint])

    return partition, MarkovChain(propose_random_flip, is_valid, always_accept,
                                  partition, total_steps)
Ejemplo n.º 7
0
def main():
    graph = construct_graph(*ingest("./testData/wyoming_test.shp", "GEOID"))

    cd_data = get_list_of_data('./testData/wyoming_test.shp', ['CD', 'ALAND'])

    add_data_to_graph(cd_data, graph, ['CD', 'ALAND'])

    assignment = pull_districts(graph, 'CD')
    validator = Validator([contiguous])

    initial_partition = Partition(graph,
                                  assignment,
                                  aggregate_fields=['ALAND'])
    accept = lambda x: True

    chain = MarkovChain(propose_random_flip,
                        validator,
                        accept,
                        initial_partition,
                        total_steps=10)

    for step in chain:
        print(step.assignment)
Ejemplo n.º 8
0
plt.savefig(newdir + district_col + "_initial.png")
plt.close()

start_time = time.time()

print("setup chain")

print(initial_partition["perimeters"])
print(initial_partition["interior_boundaries"])

print(initial_partition["exterior_boundaries"])

# This builds the chain object for us to iterate over
chain = MarkovChain(proposal_method,
                    validator,
                    acceptance_method,
                    initial_partition,
                    total_steps=steps)

#for part in chain:#
#	print(part["perimeters"])

print("built chain")

# Post processing commands go below
# Adds election Scores

scores = {
    'L1 Reciprocal Polsby-Popper': L1_reciprocal_polsby_popper,
    'L -1 Polsby-Popper': L_minus_1_polsby_popper,
    'Worst Population': worst_pop,
Ejemplo n.º 9
0
from rundmcmc.validity import Validator, single_flip_contiguous
from rundmcmc.proposals import propose_random_flip
from rundmcmc.accept import always_accept
from rundmcmc.chain import MarkovChain
from rundmcmc.grid import Grid
import matplotlib.pyplot as plt

is_valid = Validator([single_flip_contiguous])

# Make a 20x20 grid
grid = Grid((20, 20))

chain = MarkovChain(propose_random_flip,
                    is_valid,
                    always_accept,
                    grid,
                    total_steps=5000)

pops = []
for partition in chain:
    # Grab the 0th districts population.
    pops.append(partition["population"][0])
    print(partition)

plt.style.use("ggplot")
plt.hist(pops)
plt.title("Population of district 0 over time")
plt.xlabel("Population")
plt.ylabel("Frequency")
plt.show()
Ejemplo n.º 10
0
def main():
    # Get the data, set the number of steps, and denote the column header
    # containing vote data.
    datapath = "./Prorated/Prorated.shp"
    graphpath = "./graphs/utah.json"
    steps = int(sys.argv[-1])
    r_header = "R"
    d_header = "D"

    # Generate a dataframe, graph, and then combine the two.
    df = gpd.read_file(datapath)
    graph = construct_graph(graphpath)
    add_data_to_graph(df, graph, [r_header, d_header], id_col="GEOID10")

    # Get the discrict assignment and add updaters.
    assignment = dict(
        zip(graph.nodes(), [graph.node[x]["CD"] for x in graph.nodes()]))
    updaters = {
        **votes_updaters([r_header, d_header]), "population":
        Tally("POP10", alias="population"),
        "perimeters":
        perimeters,
        "exterior_boundaries":
        exterior_boundaries,
        "interior_boundaries":
        interior_boundaries,
        "boundary_nodes":
        boundary_nodes,
        "cut_edges":
        cut_edges,
        "areas":
        Tally("ALAND10", alias="areas"),
        "polsby_popper":
        polsby_popper,
        "cut_edges_by_part":
        cut_edges_by_part
    }

    # Create an initial partition and a Pennsylvania-esque chain run.
    initial_partition = Partition(graph, assignment, updaters)
    validator = Validator(
        [refuse_new_splits, no_vanishing_districts, single_flip_contiguous])
    chain = MarkovChain(propose_random_flip,
                        validator,
                        always_accept,
                        initial_partition,
                        total_steps=steps)

    # Pick the scores we want to track.
    scores = {
        "Mean-Median":
        functools.partial(mean_median, proportion_column_name=r_header + "%"),
        "Mean-Thirdian":
        functools.partial(mean_thirdian,
                          proportion_column_name=d_header + "%"),
        "Efficiency Gap":
        functools.partial(efficiency_gap, col1=r_header, col2=d_header),
        "L1 Reciprocal Polsby-Popper":
        L1_reciprocal_polsby_popper
    }

    # Set initial scores, then allow piping and plotting things.
    initial_scores = {
        key: score(initial_partition)
        for key, score in scores.items()
    }
    table = pipe_to_table(chain, scores)
    fig, axes = plt.subplots(2, 2)

    # Configuring where the plots go.
    quadrants = {
        "Mean-Median": (0, 0),
        "Mean-Thirdian": (0, 1),
        "Efficiency Gap": (1, 0),
        "L1 Reciprocal Polsby-Popper": (1, 1)
    }

    # Plotting things!
    for key in scores:
        quadrant = quadrants[key]
        axes[quadrant].hist(table[key], bins=50)
        axes[quadrant].set_title(key)
        axes[quadrant].axvline(x=initial_scores[key], color="r")

    # Show the histogram.
    plt.savefig(f"./output/histograms/{steps}.png")
Ejemplo n.º 11
0
pop_limit = .3
population_constraint = within_percent_of_ideal_population(grid, pop_limit)

grid_validator2 = Validator([
    single_flip_contiguous, no_vanishing_districts, population_constraint,
    perimeter_constraint
])

grid_validator = Validator([fast_connected, no_vanishing_districts, grid_size])

dumb_validator = Validator([fast_connected, no_vanishing_districts])

chain = MarkovChain(propose_random_flip_no_loops,
                    grid_validator2,
                    always_accept,
                    grid,
                    total_steps=1000)

# Outputs .pngs for animating
newdir = "./Outputs/Grid_Plots/"
os.makedirs(os.path.dirname(newdir + "init.txt"), exist_ok=True)
with open(newdir + "init.txt", "w") as f:
    f.write("Created Folder")

counter = 0
for partition in chain:
    plt.matshow(partition.as_list_of_lists())
    plt.savefig(newdir + "g3_%04d.png" % counter)
    plt.close()
    counter += 1
Ejemplo n.º 12
0
from rundmcmc.make_graph import construct_graph
from rundmcmc.accept import always_accept
from rundmcmc.partition import Partition
from rundmcmc.updaters import cut_edges
from rundmcmc.chain import MarkovChain

# Some file that contains a graph with congressional district data.
path = "./45_rook.json"
steps = 1000

graph = construct_graph(path)
# Gross!
assignment = dict(
    zip(graph.nodes(), [graph.node[x]['CD'] for x in graph.nodes()]))

updaters = {'cut_edges': cut_edges}

initial_partition = Partition(graph, assignment, updaters)

validator = Validator(
    [refuse_new_splits, no_vanishing_districts, single_flip_contiguous])
chain = MarkovChain(propose_random_flip,
                    validator,
                    always_accept,
                    initial_partition,
                    total_steps=steps)

for i, partition in enumerate(chain):
    print("{}/{}".format(i + 1, steps))
    print(partition.assignment)
Ejemplo n.º 13
0
def read_basic_config(configFileName):
    """Reads basic configuration file and sets up a chain run

    :configFileName: relative path to config file
    :returns: Partition instance and MarkovChain instance

    """
    # set up the config file parser
    config = configparser.ConfigParser()
    config.read(configFileName)

    # SET UP GRAPH AND PARTITION SECTION
    # create graph and get global names for required graph attributes
    graph, POP, AREA, CD = gsource_gdata(config, 'GRAPH_SOURCE', 'GRAPH_DATA')

    voteDataList = vsource_vdata(graph, config, 'VOTE_DATA_SOURCE',
                                 'VOTE_DATA')
    # create a list of vote columns to update
    DataUpdaters = {v: updates.Tally(v) for v in voteDataList}
    # construct initial districting plan
    assignment = {x[0]: x[1][CD] for x in graph.nodes(data=True)}
    # set up validator functions and create Validator class instance
    validatorsUpdaters = []
    validators = []
    if config.has_section('VALIDITY') and len(list(
            config['VALIDITY'].keys())) > 0:
        validators = list(config['VALIDITY'].values())
        for i, x in enumerate(validators):
            if len(x.split(',')) == 1:
                validators[i] = getattr(valids, x)
            else:
                [y, z] = x.split(',')
                validators[i] = valids.WithinPercentRangeOfBounds(
                    getattr(valids, y), z)
        validatorsUpdaters.extend(
            [x.split(',')[0] for x in config['VALIDITY'].values()])

    validators = valids.Validator(validators)
    # add updaters required by this list of validators to list of updaters
    for x in validatorsUpdaters:
        DataUpdaters.update(dependencies(x, POP, AREA))
    # END SET UP GRAPH AND PARTITION SECTION

    # SET UP MARKOVCHAIN RUN SECTION
    # set up parameters for markovchain run
    chainparams = config['MARKOV_CHAIN']
    # number of steps to run
    num_steps = 1000
    if 'num_steps' in list(chainparams.keys()):
        num_steps = int(chainparams['num_steps'])
    # type of flip to use
    proposal = proposals.propose_random_flip
    if 'proposal' in list(chainparams.keys()):
        proposal = getattr(proposals, chainparams['proposal'])
    # acceptance function to use
    accept = accepts.always_accept
    if 'accept' in list(chainparams.keys()):
        accept = getattr(accepts, chainparams['accept'])
    # END SET UP MARKOVCHAIN RUN SECTION

    # SET UP DATA PROCESSOR FOR CHAIN RUN
    # get evaluation scores to compute and the columns to use for each
    escores, cfunc, elist, sVisType, outFName = escores_edata(
        config, "EVALUATION_SCORES", "EVALUATION_SCORES_DATA")

    # add evaluation scores updaters to list of updators
    for x in elist:
        DataUpdaters.update(dependencies(x, POP, AREA))

    # END SET UP DATA PROCESSOR FOR CHAIN RUN
    updaters = DataUpdaters

    # create markovchain instance
    initial_partition = Partition(graph, assignment, updaters)
    chain = MarkovChain(proposal, validators, accept, initial_partition,
                        num_steps)

    return chain, cfunc, escores, sVisType, outFName
Ejemplo n.º 14
0
# Makes a simple grid and runs the MCMC. Mostly for testing proposals

grid = Grid((20, 20))  # was (4,4)

pop_limit = .3
population_constraint = within_percent_of_ideal_population(grid, pop_limit)

grid_validator2 = Validator([fast_connected, no_vanishing_districts,
                             population_constraint])

grid_validator = Validator([fast_connected, no_vanishing_districts, grid_size])

dumb_validator = Validator([fast_connected, no_vanishing_districts])


chain = MarkovChain(reversible_chunk_flip, grid_validator2, always_accept,
                    grid, total_steps=100)

# Outputs .pngs for animating
newdir = "./Outputs/Grid_Plots/"
os.makedirs(os.path.dirname(newdir + "init.txt"), exist_ok=True)
with open(newdir + "init.txt", "w") as f:
    f.write("Created Folder")

i = 1
for partition in chain:
    plt.matshow(partition.as_list_of_lists())
    plt.savefig(newdir + "g3_%04d.png" % i)
    plt.close()
    i += 1

# To animate: