コード例 #1
0
ファイル: defaults.py プロジェクト: themaninorange/RunDMCMC
def PA_partition():
    # this is a networkx adjancency data json file with CD, area, population, and vote data
    graph = construct_graph("./testData/PA_graph_with_data.json")

    # Add frozen attributes to graph
    # data = gp.read_file("./testData/frozen.shp")
    # add_data_to_graph(data, graph, ['Frozen'], 'wes_id')

    assignment = dict(
        zip(graph.nodes(), [graph.node[x]['CD'] for x in graph.nodes()]))

    updaters = {
        **votes_updaters(['VoteA', 'VoteB']), 'population':
        Tally('POP100', alias='population'),
        'perimeters':
        perimeters,
        'exterior_boundaries':
        exterior_boundaries,
        'boundary_nodes':
        boundary_nodes,
        'cut_edges':
        cut_edges,
        'areas':
        Tally('ALAND10', alias='areas'),
        'polsby_popper':
        polsby_popper,
        'cut_edges_by_part':
        cut_edges_by_part
    }

    return Partition(graph, assignment, updaters)
コード例 #2
0
def gsource_gdata(config, graphSource, graphData):
    """Create a graph from the config file GRAPH_SOURCE and GRAPH_DATA sections"""

    # make sure the config file has graph information in it
    if (not config.has_section(graphData)) or (
            not config.has_section(graphSource)):
        raise Exception(
            "ERROR: config needs a GRAPH_DATA section and a GRAPH_SOURCE section"
        )
    if not all(x in list(config[graphData].keys())
               for x in required_graph_fields()):
        elements = " ".join(required_graph_fields())
        raise Exception(
            "ERROR: graph_data must contain all of the following fields: %s" %
            elements)
    configGraphData = config[graphData]
    configGraphSource = config[graphSource]

    ID = configGraphData['id']
    POP = configGraphData['pop']
    AREA = configGraphData['area']
    CD = configGraphData['cd']
    # create graph from data and load required data
    graph = mgs.construct_graph(configGraphSource['gSource'], ID,
                                [POP, AREA, CD])
    return graph, POP, AREA, CD
コード例 #3
0
def gsource_gdata(config, graphSource, graphData):
    """Create a graph from the config file GRAPH_SOURCE and GRAPH_DATA sections"""
    # make sure the config file has graph information in it
    graph_source_field = "gSource"
    save_graph_field = "save_json"
    required_graph_data_fields = ['id', 'pop', 'area', 'cd']

    if not config.has_section(graphData):
        raise configparser.NoSectionError(graphData)
    if not config.has_section(graphSource):
        raise configparser.NoSectionError(graphSource)

    configGraphData = config[graphData]
    configGraphSource = config[graphSource]

    missing = [
        x for x in required_graph_data_fields if x not in configGraphData
    ]

    if missing:
        missing_str = " ".join(missing)
        raise configparser.NoOptionError(missing_str, graphData)

    if graph_source_field not in configGraphSource:
        raise configparser.NoOptionError(graph_source_field, graphSource)

    ID = configGraphData['id']
    POP = configGraphData['pop']
    AREA = configGraphData['area']
    CD = configGraphData['cd']
    # create graph from data and load required data

    path = configGraphSource[graph_source_field]
    save_graph = False
    load_graph = False

    if save_graph_field in configGraphSource:
        save_graph = True
        if os.path.isfile(configGraphSource[save_graph_field]):
            print("trying to load graph from", path)
            path = configGraphSource[save_graph_field]
            save_graph = False
            load_graph = True

    type = "json" if load_graph else "fiona"
    graph = mgs.construct_graph(path,
                                ID,
                                pop_col=POP,
                                area_col=AREA,
                                district_col=CD,
                                cols_to_add=[POP, AREA, CD],
                                data_source_type=type)

    if save_graph:
        print("saving graph to", configGraphSource[save_graph_field])
        with open(configGraphSource[save_graph_field], "w") as f:
            json.dump(json_graph.adjacency_data(graph), f)

    return graph, POP, AREA, CD
コード例 #4
0
def main():
    graph = construct_graph(*ingest("./testData/wyoming_test.shp", "GEOID"))

    cd_data = get_list_of_data('./testData/wyoming_test.shp', ['CD', 'ALAND'])

    add_data_to_graph(cd_data, graph, ['CD', 'ALAND'])

    assignment = pull_districts(graph, 'CD')
    validator = Validator([contiguous])

    initial_partition = Partition(graph,
                                  assignment,
                                  aggregate_fields=['ALAND'])
    accept = lambda x: True

    chain = MarkovChain(propose_random_flip,
                        validator,
                        accept,
                        initial_partition,
                        total_steps=10)

    for step in chain:
        print(step.assignment)
コード例 #5
0
import json
import random
from rundmcmc.make_graph import construct_graph
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import geopandas as gp
graph_path = "./Data/Arkansas_graph_with_data.json"
df = gp.read_file("./Data/AR_Full.shp")
#df=df.set_index("ID")

plt.ioff()

graph = construct_graph(graph_path,
                        id_col="ID",
                        pop_col="POP10",
                        district_col="CD",
                        data_source_type="json")

df.plot(column="CD", cmap="tab20")
plt.savefig("./Outputs/Tree/AR0.png")
plt.close()

et = [0, 0]

#build a list then use that list to update

assignment = df["CD"].tolist

el = list(graph.edges())
df["newtree0"] = df["CD"]
コード例 #6
0
with open(newdir + "init.txt", "w") as f:
    f.write("Created Folder")

# Input the path to the graph (either JSON or shapefile) and the label column
# This file should have at least population, area, and district plan
state_name = "Pennsylvania"
graph_path = "../testData/PA_rook.json"
unique_label = "wes_id"

# Names of graph columns go here
pop_col = "population"
area_col = "area"
district_col = "Remedial"

# This builds a graph
graph = construct_graph(graph_path, data_source_type="json")

# Write graph to file
with open(newdir + state_name + '_graph_with_data.json', 'w') as outfile1:
    outfile1.write(json.dumps(json_graph.adjacency_data(graph)))

# Get assignment dictionary
assignment = get_assignment_dict_from_graph(graph, district_col)

# Input the shapefile with vote data here
vote_path = "../testData/wes_with_districtings.shp"

# This inputs a shapefile with columns you want to add
df = gp.read_file(vote_path)
df = df.set_index(unique_label)
コード例 #7
0
ファイル: chain.py プロジェクト: apizzimenti/utah
def main():
    # Get the data, set the number of steps, and denote the column header
    # containing vote data.
    datapath = "./Prorated/Prorated.shp"
    graphpath = "./graphs/utah.json"
    steps = int(sys.argv[-1])
    r_header = "R"
    d_header = "D"

    # Generate a dataframe, graph, and then combine the two.
    df = gpd.read_file(datapath)
    graph = construct_graph(graphpath)
    add_data_to_graph(df, graph, [r_header, d_header], id_col="GEOID10")

    # Get the discrict assignment and add updaters.
    assignment = dict(
        zip(graph.nodes(), [graph.node[x]["CD"] for x in graph.nodes()]))
    updaters = {
        **votes_updaters([r_header, d_header]), "population":
        Tally("POP10", alias="population"),
        "perimeters":
        perimeters,
        "exterior_boundaries":
        exterior_boundaries,
        "interior_boundaries":
        interior_boundaries,
        "boundary_nodes":
        boundary_nodes,
        "cut_edges":
        cut_edges,
        "areas":
        Tally("ALAND10", alias="areas"),
        "polsby_popper":
        polsby_popper,
        "cut_edges_by_part":
        cut_edges_by_part
    }

    # Create an initial partition and a Pennsylvania-esque chain run.
    initial_partition = Partition(graph, assignment, updaters)
    validator = Validator(
        [refuse_new_splits, no_vanishing_districts, single_flip_contiguous])
    chain = MarkovChain(propose_random_flip,
                        validator,
                        always_accept,
                        initial_partition,
                        total_steps=steps)

    # Pick the scores we want to track.
    scores = {
        "Mean-Median":
        functools.partial(mean_median, proportion_column_name=r_header + "%"),
        "Mean-Thirdian":
        functools.partial(mean_thirdian,
                          proportion_column_name=d_header + "%"),
        "Efficiency Gap":
        functools.partial(efficiency_gap, col1=r_header, col2=d_header),
        "L1 Reciprocal Polsby-Popper":
        L1_reciprocal_polsby_popper
    }

    # Set initial scores, then allow piping and plotting things.
    initial_scores = {
        key: score(initial_partition)
        for key, score in scores.items()
    }
    table = pipe_to_table(chain, scores)
    fig, axes = plt.subplots(2, 2)

    # Configuring where the plots go.
    quadrants = {
        "Mean-Median": (0, 0),
        "Mean-Thirdian": (0, 1),
        "Efficiency Gap": (1, 0),
        "L1 Reciprocal Polsby-Popper": (1, 1)
    }

    # Plotting things!
    for key in scores:
        quadrant = quadrants[key]
        axes[quadrant].hist(table[key], bins=50)
        axes[quadrant].set_title(key)
        axes[quadrant].axvline(x=initial_scores[key], color="r")

    # Show the histogram.
    plt.savefig(f"./output/histograms/{steps}.png")
コード例 #8
0
ファイル: template_main.py プロジェクト: ljwolf/RunDMCMC
from rundmcmc.output import p_value_report, pipe_to_table

from vis_output import (hist_of_table_scores, trace_of_table_scores)

# Input the path to the graph (either JSON or shapefile) and the label column
graph_path = "./testData/PA_graph_with_data.json"
unique_label = "wes_id"

# Names of graph columns go here
pop_col = "POP100"
area_col = "ALAND10"
district_col = "CD"

# This builds a graph
graph = construct_graph(graph_path, "json")

# Write graph to file
with open('graph_with_data.json', 'w') as outfile1:
    outfile1.write(json.dumps(json_graph.node_link_data(graph)))

# Put district on graph
assignment = dict(
    zip(graph.nodes(), [graph.node[x][district_col] for x in graph.nodes()]))

# Input the shapefile with vote data here
vote_path = "./testData/wes_merged_data.shp"

# This inputs a shapefile with columns you want to add
df = gp.read_file(vote_path)
コード例 #9
0
# This file should have at least population, area, and district plan
state_name = "Pennsylvania"
graph_path = "./testData/FinalPA_new.shp"
unique_label = "wes_id"

# Names of graph columns go here
# area_col = "ALAND10"
pop_col = "population"
county_col = "County"

# This builds a graph
graph = construct_graph(graph_path,
                        id_col=unique_label,
                        pop_col=pop_col,
                        data_cols=[
                            "GOV", "TS", "2011Plan", "Remedial", "538dem",
                            "538cpct", "8thgrade", "8thgrade2", "Persily",
                            county_col, 'T16PRESD', 'T16PRESR', 'T16SEND',
                            'T16SENR'
                        ],
                        data_source_type="fiona")

df = gp.read_file(graph_path)

for name in [
        "GOV", "TS", "2011Plan", "Remedial", "538dem", "538cpct", "8thgrade",
        "8thgrade2", "Persily", county_col, 'T16PRESD', 'T16PRESR', 'T16SEND',
        'T16SENR'
]:
    df[name] = pd.to_numeric(df[name], errors='coerce')

# This is the number of elections you want to analyze
コード例 #10
0
from rundmcmc.validity import (Validator, no_vanishing_districts,
                               refuse_new_splits, single_flip_contiguous)
from rundmcmc.proposals import propose_random_flip
from rundmcmc.make_graph import construct_graph
from rundmcmc.accept import always_accept
from rundmcmc.partition import Partition
from rundmcmc.updaters import cut_edges
from rundmcmc.chain import MarkovChain

# Some file that contains a graph with congressional district data.
path = "./45_rook.json"
steps = 1000

graph = construct_graph(path)
# Gross!
assignment = dict(
    zip(graph.nodes(), [graph.node[x]['CD'] for x in graph.nodes()]))

updaters = {'cut_edges': cut_edges}

initial_partition = Partition(graph, assignment, updaters)

validator = Validator(
    [refuse_new_splits, no_vanishing_districts, single_flip_contiguous])
chain = MarkovChain(propose_random_flip,
                    validator,
                    always_accept,
                    initial_partition,
                    total_steps=steps)

for i, partition in enumerate(chain):
コード例 #11
0

# Make a folder for the output
current = datetime.datetime.now()
newdir = "./Outputs/" + state_name + "run" + str(current)[:10] + "-" + str(current)[11:13]\
         + "-" + str(current)[14:16] + "-" + str(current)[17:19] + "/"

os.makedirs(os.path.dirname(newdir + "init.txt"), exist_ok=True)
with open(newdir + "init.txt", "w") as f:
    f.write("Created Folder")


# This builds a graph
graph = construct_graph(graph_path, id_col=unique_label, area_col=area_col,
                        pop_col=pop_col, district_col=district_col,
                        data_cols=[county_col] + [cols
                                                  for pair in election_columns for cols in pair],
                        data_source_type="json")


# Get assignment dictionary
assignment = get_assignment_dict_from_graph(graph, district_col)


# Necessary updaters go here
updaters = {'population': Tally('population'),
            'perimeters': perimeters,
            'exterior_boundaries': exterior_boundaries,
            'interior_boundaries': interior_boundaries,
            'boundary_nodes': boundary_nodes,
            'cut_edges': cut_edges,
コード例 #12
0

for district_col in district_cols:

    current = datetime.datetime.now()
    newdir = "./Outputs/" + state_name + "_report-" + str(current)[:10] + "-" + str(current)[11:13]\
             + "-" + str(current)[14:16] + "-" + str(current)[17:19] + "/"

    os.makedirs(os.path.dirname(newdir + "init.txt"), exist_ok=True)
    with open(newdir + "init.txt", "w") as f:
        f.write("Created Folder")

    print("slowly building graph ...")
    # This builds a graph
    graph = construct_graph(graph_path, pop_col=pop_col, id_col=unique_label,
                            district_col=district_col,
                            data_source_type="fiona")

    # Get assignment dictionary
    assignment = get_assignment_dict_from_graph(graph, district_col)

    # This inputs a shapefile with columns you want to add
    df = gp.read_file(graph_path)
    df = df.set_index(unique_label)

    # This adds the data to the graph
    add_data_to_graph(df, graph, [cols for pair in election_columns for cols in pair])
    add_data_to_graph(df, graph, [county_col])

    # Write graph to file so it never has to be built again!
    with open(newdir + state_name + '_graph_with_data.json', 'w') as outfile1:
コード例 #13
0
from rundmcmc.output import p_value_report

from vis_output import (hist_of_table_scores, trace_of_table_scores)

# Input the path to the graph (either JSON or shapefile) and the label column
graph_path = "./testData/PA_graph_with_data.json"
unique_label = "wes_id"

# Names of graph columns go here
pop_col = "POP100"
area_col = "ALAND10"
district_col = "CD"

# This builds a graph
graph = construct_graph(graph_path)

# Write graph to file
with open('graph_with_data.json', 'w') as outfile1:
    outfile1.write(json.dumps(json_graph.node_link_data(graph)))

# Put district on graph
assignment = dict(
    zip(graph.nodes(), [graph.node[x][district_col] for x in graph.nodes()]))

# Input the shapefile with vote data here
vote_path = "./testData/wes_merged_data.shp"

# This inputs a shapefile with columns you want to add
df = gp.read_file(vote_path)