예제 #1
0
def example_partition():
    df = gp.read_file(os.path.join(TEST_DATA_PATH, "mo_cleaned_vtds.shp"))

    with open(os.path.join(TEST_DATA_PATH, "MO_graph.json")) as f:
        graph_json = json.load(f)

    graph = networkx.readwrite.json_graph.adjacency_graph(graph_json)

    assignment = get_assignment_dict(df, "GEOID10", "CD")

    add_data_to_graph(
        df,
        graph, ['PR_DV08', 'PR_RV08', 'POP100', 'ALAND10', 'COUNTYFP10'],
        id_col='GEOID10')

    updaters = {
        **votes_updaters(['PR_DV08', 'PR_RV08'], election_name='08'), 'population':
        Tally('POP100', alias='population'),
        'counties':
        county_splits('counties', 'COUNTYFP10'),
        'cut_edges':
        cut_edges,
        'cut_edges_by_part':
        cut_edges_by_part
    }
    return Partition(graph, assignment, updaters)
예제 #2
0
def main():
    # Sketch:
    #   1. Load dataframe.
    #   2. Construct neighbor information.
    #   3. Make a graph from this.
    #   4. Throw attributes into graph.
    df = gp.read_file("./testData/mo_cleaned_vtds.shp")
    graph = networkx.readwrite.read_gpickle('example_graph.gpickle')

    add_data_to_graph(df, graph, ["PR_DV08", "PR_RV08", "P_08"], "GEOID10")

    assignment = get_assignment_dict(df, "GEOID10", "CD")

    updaters = {
        'd_votes': statistic_factory('PR_DV08', alias='d_votes'),
        'r_votes': statistic_factory('PR_RV08', alias='r_votes'),
        'cut_edges': cut_edges
    }
    initial_partition = Partition(graph, assignment, updaters)

    validator = Validator([contiguous])
    accept = lambda x: True

    chain = MarkovChain(propose_random_flip,
                        validator,
                        accept,
                        initial_partition,
                        total_steps=100)

    mm = []
    mt = []
    #eg=[]

    for state in chain:
        mm.append(
            mean_median2(state, data_column1='d_votes',
                         data_column2='r_votes'))
        mt.append(
            mean_thirdian2(state,
                           data_column1='d_votes',
                           data_column2='r_votes'))
        #eg.append(efficiency_gap(state, data_column1='d_votes',data_column2='r_votes))

    #print(graph.nodes(data=True))
    mm_outs = [mm]  #,eg]
    mt_outs = [mt]
    #eg_outs=[eg]

    with open('mm_chain_out', "w") as output:
        writer = csv.writer(output, lineterminator='\n')
        writer.writerows(mm_outs)

    with open('mt_chain_out', "w") as output:
        writer = csv.writer(output, lineterminator='\n')
        writer.writerows(mt_outs)
예제 #3
0
def test_add_data_to_graph_can_handle_unset_index_when_id_col_is_passed():
    graph = networkx.Graph([('01', '02'), ('02', '03'), ('03', '01')])
    df = pandas.DataFrame({
        '16SenDVote': [20, 30, 50],
        'node': ['01', '02', '03']
    })

    add_data_to_graph(df, graph, ['16SenDVote'], id_col='node')

    assert graph.nodes['01']['16SenDVote'] == 20
    assert graph.nodes['02']['16SenDVote'] == 30
    assert graph.nodes['03']['16SenDVote'] == 50
예제 #4
0
def test_add_data_to_graph_can_handle_column_names_that_start_with_numbers():
    graph = networkx.Graph([('01', '02'), ('02', '03'), ('03', '01')])
    df = pandas.DataFrame({
        '16SenDVote': [20, 30, 50],
        'node': ['01', '02', '03']
    })
    df = df.set_index('node')

    add_data_to_graph(df, graph, ['16SenDVote'])

    assert graph.nodes['01']['16SenDVote'] == 20
    assert graph.nodes['02']['16SenDVote'] == 30
    assert graph.nodes['03']['16SenDVote'] == 50
예제 #5
0
def vsource_vdata(graph, config, voteSource, voteData):
    """Add data to graph from the config file VOTE_SOURCE and VOTE_DATA sections"""
    if not config.has_section(voteSource):
        return []

    configVoteSource = config[voteSource]
    configVoteData = config[voteData]
    source = configVoteSource['vSource']
    geoid = configVoteSource['vSourceID']

    cols_to_add = list(configVoteData.values())
    mdata = mgs.get_list_of_data(source, cols_to_add, geoid)
    mgs.add_data_to_graph(mdata, graph, cols_to_add, geoid)

    return list(configVoteData.values())
예제 #6
0
def main():
    graph = construct_graph(*ingest("./testData/wyoming_test.shp", "GEOID"))

    cd_data = get_list_of_data('./testData/wyoming_test.shp', ['CD', 'ALAND'])

    add_data_to_graph(cd_data, graph, ['CD', 'ALAND'])

    assignment = pull_districts(graph, 'CD')
    validator = Validator([contiguous])

    initial_partition = Partition(graph,
                                  assignment,
                                  aggregate_fields=['ALAND'])
    accept = lambda x: True

    chain = MarkovChain(propose_random_flip,
                        validator,
                        accept,
                        initial_partition,
                        total_steps=10)

    for step in chain:
        print(step.assignment)
예제 #7
0
def example_partition():
    df = gp.read_file("./testData/mo_cleaned_vtds.shp")

    with open("./testData/MO_graph.json") as f:
        graph_json = json.load(f)

    graph = networkx.readwrite.json_graph.adjacency_graph(graph_json)

    assignment = get_assignment_dict(df, "GEOID10", "CD")

    add_data_to_graph(
        df,
        graph, ['PR_DV08', 'PR_RV08', 'POP100', 'ALAND10', 'COUNTYFP10'],
        id_col='GEOID10')

    updaters = {
        **votes_updaters(['PR_DV08', 'PR_RV08'], election_name='08'), 'population':
        Tally('POP100', alias='population'),
        'areas':
        Tally('ALAND10', alias='areas'),
        'counties':
        county_splits('counties', 'COUNTYFP10'),
        'perimeters':
        perimeters,
        'exterior_boundaries':
        exterior_boundaries,
        'boundary_nodes':
        boundary_nodes,
        'polsby_popper':
        polsby_popper,
        'cut_edges':
        cut_edges,
        'cut_edges_by_part':
        cut_edges_by_part
    }
    return Partition(graph, assignment, updaters)
예제 #8
0
# Input the shapefile with vote data here
vote_path = "../testData/wes_with_districtings.shp"

# This inputs a shapefile with columns you want to add
df = gp.read_file(vote_path)
df = df.set_index(unique_label)

# This is the number of elections you want to analyze
num_elections = 2

# Names of shapefile voting data columns go here
election_names = ['2016_Presidential', '2016_Senate']
election_columns = [['T16PRESD', 'T16PRESR'], ['T16SEND', 'T16SENR']]

# This adds the data to the graph
add_data_to_graph(df, graph,
                  [cols for pair in election_columns for cols in pair])
# , id_col=unique_label)

# Desired proposal method
proposal_method = propose_random_flip

# Desired acceptance method
acceptance_method = always_accept

# Number of steps to run
steps = 1000

print("loaded data")

# Necessary updaters go here
updaters = {
예제 #9
0
파일: chain.py 프로젝트: apizzimenti/utah
def main():
    # Get the data, set the number of steps, and denote the column header
    # containing vote data.
    datapath = "./Prorated/Prorated.shp"
    graphpath = "./graphs/utah.json"
    steps = int(sys.argv[-1])
    r_header = "R"
    d_header = "D"

    # Generate a dataframe, graph, and then combine the two.
    df = gpd.read_file(datapath)
    graph = construct_graph(graphpath)
    add_data_to_graph(df, graph, [r_header, d_header], id_col="GEOID10")

    # Get the discrict assignment and add updaters.
    assignment = dict(
        zip(graph.nodes(), [graph.node[x]["CD"] for x in graph.nodes()]))
    updaters = {
        **votes_updaters([r_header, d_header]), "population":
        Tally("POP10", alias="population"),
        "perimeters":
        perimeters,
        "exterior_boundaries":
        exterior_boundaries,
        "interior_boundaries":
        interior_boundaries,
        "boundary_nodes":
        boundary_nodes,
        "cut_edges":
        cut_edges,
        "areas":
        Tally("ALAND10", alias="areas"),
        "polsby_popper":
        polsby_popper,
        "cut_edges_by_part":
        cut_edges_by_part
    }

    # Create an initial partition and a Pennsylvania-esque chain run.
    initial_partition = Partition(graph, assignment, updaters)
    validator = Validator(
        [refuse_new_splits, no_vanishing_districts, single_flip_contiguous])
    chain = MarkovChain(propose_random_flip,
                        validator,
                        always_accept,
                        initial_partition,
                        total_steps=steps)

    # Pick the scores we want to track.
    scores = {
        "Mean-Median":
        functools.partial(mean_median, proportion_column_name=r_header + "%"),
        "Mean-Thirdian":
        functools.partial(mean_thirdian,
                          proportion_column_name=d_header + "%"),
        "Efficiency Gap":
        functools.partial(efficiency_gap, col1=r_header, col2=d_header),
        "L1 Reciprocal Polsby-Popper":
        L1_reciprocal_polsby_popper
    }

    # Set initial scores, then allow piping and plotting things.
    initial_scores = {
        key: score(initial_partition)
        for key, score in scores.items()
    }
    table = pipe_to_table(chain, scores)
    fig, axes = plt.subplots(2, 2)

    # Configuring where the plots go.
    quadrants = {
        "Mean-Median": (0, 0),
        "Mean-Thirdian": (0, 1),
        "Efficiency Gap": (1, 0),
        "L1 Reciprocal Polsby-Popper": (1, 1)
    }

    # Plotting things!
    for key in scores:
        quadrant = quadrants[key]
        axes[quadrant].hist(table[key], bins=50)
        axes[quadrant].set_title(key)
        axes[quadrant].axvline(x=initial_scores[key], color="r")

    # Show the histogram.
    plt.savefig(f"./output/histograms/{steps}.png")
예제 #10
0
    zip(graph.nodes(), [graph.node[x][district_col] for x in graph.nodes()]))

# Input the shapefile with vote data here
vote_path = "./testData/wes_merged_data.shp"

# This inputs a shapefile with columns you want to add
df = gp.read_file(vote_path)

# Names of shapefile data columns go here
vote_col1 = "voteA"
vote_col2 = "voteB"

# This adds the data to the graph
data_list = [vote_col1, vote_col2]

add_data_to_graph(df, graph, data_list, id_col=unique_label)

# Desired proposal method
proposal_method = propose_random_flip_no_loops

# Desired proposal method
acceptance_method = always_accept

# Number of steps to run
steps = 1000

print("loaded data")

# Necessary updaters go here
updaters = {
    **votes_updaters([vote_col1, vote_col2]), 'population':
                        data_source_type="json")


tree_col = "tree_col"
district_col = tree_col

with open(tree_plan, 'r') as f:
        tree_dict = json.load(f)


tree_dict = dict(tree_dict)
df[tree_col] = df[unique_label].map(tree_dict)
election_columns = [['G_DEM_17_y','G_REP_17_y'],
 ['LG_DEM_1_1', 'LG_REP_1_1'],['AG_DEM_1_1','AG_REP_1_1'],['P_DEM_16_y','P_REP_16_y']]

add_data_to_graph(df, graph, [cols for pair in election_columns for cols in pair],id_col=unique_label)
add_data_to_graph(df, graph, [tree_col])

#df.plot(column=tree_col,cmap="tab20")
#plt.show()

# Get assignment dictionary
assignment = tree_dict#get_assignment_dict_from_graph(graph, tree_col)

election_columns = [["VABVAP","VAnBVAP"],["VABlack","VAnBPOP"],['G_DEM_17_y','G_REP_17_y'],
 ['LG_DEM_1_1', 'LG_REP_1_1'],['AG_DEM_1_1','AG_REP_1_1'],['P_DEM_16_y','P_REP_16_y']]

# Necessary updaters go here
updaters = {'population': Tally('population'),
  #          'perimeters': perimeters,
            'exterior_boundaries': exterior_boundaries,
예제 #12
0
def main():

    #graph = construct_graph_from_file("/Users/caranix/Desktop/Alaska_Chain/AK_data.shp", geoid_col="DISTRICT")

    with open('./alaska_graph.json') as f:
        data = json.load(f)
    graph = networkx.readwrite.json_graph.adjacency_graph(data)

    df = gp.read_file(
        "/Users/caranix/Desktop/Alaska_Chain/AK_data.shp"
    )  #    assignment = dict(zip(graph.nodes(), [graph.node[x]['HOUSEDIST'] for x in graph.nodes()]))
    add_data_to_graph(df,
                      graph, [
                          'join_Distr', 'POPULATION', 'join_Dem', 'join_Rep',
                          'perc_Dem', 'perc_Rep', 'AREA'
                      ],
                      id_col='DISTRICT')
    data = json.dumps(networkx.readwrite.json_graph.adjacency_data(graph))
    with open('./alaska_graph.json', 'w') as f:
        f.write(data)

    assignment = dict(
        zip(graph.nodes(),
            [graph.node[x]['join_Distr'] for x in graph.nodes()]))

    updaters = {
        'population':
        Tally('POPULATION', alias='population'),
        'cut_edges':
        cut_edges,
        'cut_edges_by_part':
        cut_edges_by_part,
        **votes_updaters(['join_Dem', 'join_Rep'], election_name='12'), 'perimeters':
        perimeters,
        'exterior_boundaries':
        exterior_boundaries,
        'boundary_nodes':
        boundary_nodes,
        'cut_edges':
        cut_edges,
        'areas':
        Tally('AREA', alias='areas'),
        'polsby_popper':
        polsby_popper
    }

    p = Partition(graph, assignment, updaters)
    print("Starting Chain")

    chain = BasicChain(p, 1000000)
    allAssignments = {0: chain.state.assignment}
    for step in chain:
        allAssignments[chain.counter + 1] = step.flips
    # print(mean_median(step, 'join_Dem%'))

# with open("chain_outputnew.json", "w") as f:
#     f.write(json.dumps(allAssignments))

#efficiency_gap(p)

# mean_median(p, 'join_Dem%')

    scores = {
        'Mean-Median':
        functools.partial(mean_median, proportion_column_name='join_Dem%'),
        'Mean-Thirdian':
        functools.partial(mean_thirdian, proportion_column_name='join_Dem%'),
        'Efficiency Gap':
        functools.partial(efficiency_gap, col1='join_Dem', col2='join_Rep'),
        'L1 Reciprocal Polsby-Popper':
        L1_reciprocal_polsby_popper
    }

    initial_scores = {key: score(p) for key, score in scores.items()}

    table = pipe_to_table(chain, scores)

    fig, axes = plt.subplots(2, 2)

    quadrants = {
        'Mean-Median': (0, 0),
        'Mean-Thirdian': (0, 1),
        'Efficiency Gap': (1, 0),
        'L1 Reciprocal Polsby-Popper': (1, 1)
    }

    for key in scores:
        quadrant = quadrants[key]
        axes[quadrant].hist(table[key], bins=50)
        axes[quadrant].set_title(key)
        axes[quadrant].axvline(x=initial_scores[key], color='r')
    plt.show()
    '''