コード例 #1
0
ファイル: create_graph.py プロジェクト: maxhully/pennsylvania
def create_graph_from_shapefile(shapefile_path='./data/wes_with_districtings/wes_with_districtings.shp'):
    log.info(f"Creating the queen adjacency graph")
    pa_queen = Graph.from_shapefile(
        shapefile_path, adjacency_type='queen', data_columns=None, id_column='wes_id')
    log.info(f"Saving the queen adjacency graph")
    pa_queen.add_columns_from_shapefile(
        shapefile_path, columns, id_column='wes_id')

    # Fix nodes whose assignment is different from all their neighbors, and
    # whose neighbors all have the same assignment.
    # In practice, this affects one node's assignment in two plans. (Node '1')
    for plan in plans:
        correct_islands(pa_queen.graph, plan)

    pa_queen.save('./PA_queen.json')

    log.info(f"Creating the rook adjacency graph")
    pa_rook = Graph.from_shapefile(
        shapefile_path, adjacency_type='rook', data_columns=None, id_column='wes_id')
    pa_rook.add_columns_from_shapefile(
        shapefile_path, columns, id_column='wes_id')
    log.info(f"Saving the queen adjacency graph")

    for plan in plans:
        correct_islands(pa_rook.graph, plan)

    pa_rook.save('./PA_rook.json')
コード例 #2
0
ファイル: clean_up.py プロジェクト: maxhully/pennsylvania
def main(shapefile_path='./wes_unitsPA/wes_units_PA.shp',
         graph_path='wes_graph.json'):
    pa = Graph.load(graph_path)

    shape = geopandas.read_file(shapefile_path)
    shape = shape.set_index('wes_id')

    for plan in plans:
        log.info(f"Plan: {plan}")
        try:
            plot_plan(shape, plan, f"./plots/{plan}_before.svg")
        except Exception:
            log.error("There was an error while trying to plot", exc_info=1)

        correct_islands(pa.graph, plan)
        assignment = {
            node: pa.graph.nodes[node][plan]
            for node in pa.graph.nodes
        }

        try:
            plot_plan(shape, assignment, f"./plots/{plan}_after.svg")
        except Exception:
            log.error("There was an error while trying to plot", exc_info=1)

    pa.save()
コード例 #3
0
def set_up_plan(plan):
    graph = Graph.load('./PA_queen.json').graph

    assignment = {node: graph.nodes[node][plan] for node in graph.nodes}

    updaters = {
        'perimeters': perimeters,
        'exterior_boundaries': exterior_boundaries,
        'interior_boundaries': interior_boundaries,
        'boundary_nodes': boundary_nodes,
        'cut_edges': cut_edges,
        'areas': Tally('area', alias='areas'),
        'polsby_popper': polsby_popper,
        'cut_edges_by_part': cut_edges_by_part
    }

    return Partition(graph, assignment, updaters)
コード例 #4
0
def match_wes_units_to_remedial_plan():
    pa = Graph.load('./wes_graph.json')

    blocks = pandas.read_csv('./data/block_to_wesid.csv', dtype=str)
    plan = pandas.read_csv('./data/remedial/blocks_to_remedial.csv',
                           names=['GEOID10', 'remedial'],
                           dtype=str)

    blocks = blocks.set_index('GEOID10')
    plan = plan.set_index('GEOID10')

    blocks['remedial'] = plan['remedial']

    log.info('Mapping units to parts')
    mapping = map_units_to_parts_via_blocks(blocks,
                                            pa.graph,
                                            unit='wes_id',
                                            part='remedial')

    for node in mapping:
        if node != '0':
            pa.graph.nodes[node]['remedial'] = mapping[node]

    pa.save('./wes_graph2.json')

    log.info('Getting block populations')

    block_pops = geopandas.read_file(
        '../graphmaker/graphmaker/blocks/42/tabblock2010_42_pophu.shp')
    block_pops['GEOID10'] = block_pops['BLOCKID10'].astype('object')
    block_pops = block_pops.set_index('GEOID10')

    blocks['population'] = block_pops['POP10'].astype(int)

    log.info('Creating report')
    report = splitting_report(blocks, 'wes_id', 'remedial')

    with open('./reports/splitting_energy_remedial_plan.json', 'w') as f:
        json.dump(report, f)

    return report
コード例 #5
0
def set_up_chain(plan, total_steps, adjacency_type='queen'):
    graph = Graph.load(f"./PA_{adjacency_type}.json").graph

    assignment = {node: graph.nodes[node][plan] for node in graph.nodes}

    updaters = {
        **votes_updaters(elections["2016_Presidential"],
                         election_name="2016_Presidential"),
        **votes_updaters(elections["2016_Senate"], election_name="2016_Senate"), 'population':
        Tally('population', alias='population'),
        'perimeters':
        perimeters,
        'exterior_boundaries':
        exterior_boundaries,
        'interior_boundaries':
        interior_boundaries,
        'boundary_nodes':
        boundary_nodes,
        'cut_edges':
        cut_edges,
        'areas':
        Tally('area', alias='areas'),
        'polsby_popper':
        polsby_popper,
        'cut_edges_by_part':
        cut_edges_by_part
    }

    partition = Partition(graph, assignment, updaters)

    population_constraint = within_percent_of_ideal_population(partition, 0.01)
    compactness_constraint = SelfConfiguringLowerBound(L_minus_1_polsby_popper,
                                                       epsilon=0.1)

    is_valid = Validator(default_constraints +
                         [population_constraint, compactness_constraint])

    return partition, MarkovChain(propose_random_flip, is_valid, always_accept,
                                  partition, total_steps)
コード例 #6
0
# As a replacement, we can use Census tracts instead.
# The same technique can be used to make a graph of counties (e.g. for Iowa)
# or any other shapefile you want.

# First we'll download the tracts:

# The Tiger class lets us access Census shapefile URLs as if they were
# just python objects.
kentucky_tracts = Tiger(2012).tract.ky
# or, equivalently (fips)
kentucky_tracts = Tiger(2012).tract['21']
# or, equivalently (full name)
kentucky_tracts = Tiger(2012).tract.kentucky

# To download from the url, call download() and pass the directory
# you want to save the shapefiles in. (Make sure the directory exists.)
kentucky_tracts.download(target='./kentucky/')

# The shapefile will be named 'tl_2012_21_tract.shp
kentucky_queen = Graph.from_shapefile('./kentucky/tl_2012_21_tract.shp',
                                      adjacency_type='queen')

# ...This might take a while...

# Once it's done, you can view some statistics about the graph like this:
print(graph_report(kentucky_queen.graph))

# And then save the graph wherever you want:
kentucky_queen.save('./kentucky/queen.json')
コード例 #7
0
from graphmaker.graph import Graph
from graphmaker.match import match

# Load the state adjacency graph from wherever you downloaded it to
my_state = Graph.load(
    '../graphmaker/graphs/vtd-adjacency-graphs/vtd-adjacency-graphs/12/queen.json'
)

# Match the VTDs to 'State Legislature Upper Chamber' Districts
match(my_state, 'VTD', 'SLDU')

# Print all the nodes, to see that the 'SLDU' attribute has been added
print(my_state.graph.nodes(data=True))

# Save the graph back to the same file
my_state.save()
コード例 #8
0
from graphmaker.resources import BlockAssignmentFile
from graphmaker.graph import Graph
from graphmaker.integrate import integrate

# You can also add columns from a Pandas dataframe, which lets you do whatever joining,
# transformation, or merging that you need to do before adding it to the graph.

# A good way to do this is with our `integrate` function to take Census block-level
# statistics (like population) and add them to the VTD graph.

# First we need to download Block Assignment Files. These match census blocks to bigger
# subdivisions (like VTDs) so that we know how to add up the block-level data.

# We'll use Florida:

blocks = BlockAssignmentFile('12').download(target='./florida/blocks/')

# This actually downloads a whole set of files, with matchings from blocks to multiple
# different types of subdivisions, including Upper and Lower State Legislatures and
# elementary school districts.

# Now let's load our graph:
florida = Graph.load('./florida/queen.json')

df = integrate('./florida/blocks/BlockAssign_ST12_FL_VTD', ['POP10'], 'VTD')

florida.add_columns_from_df(df, columns=['POP10'])
コード例 #9
0
ファイル: clean_up.py プロジェクト: maxhully/pennsylvania
                  linewidth=0.5,
                  edgecolor='0.5',
                  column='assignment',
                  categorical=True,
                  cmap='tab20')
    ax.set_axis_off()

    plt.axis('equal')
    plt.savefig(filepath)


if __name__ == '__main__':
    shapefile_path = './data/wes_unitsPA/wes_units_PA.shp'
    shape = geopandas.read_file(shapefile_path)

    pa = Graph.load('./wes_graph2.json')

    for node in pa.graph.nodes:
        if 'remedial' not in pa.graph.nodes[node]:
            print(node)

    print(
        len([
            node for node in pa.graph.nodes
            if 'remedial' not in pa.graph.nodes[node]
        ]))

    assignment = {
        node: pa.graph.nodes[node]["remedial"]
        for node in pa.graph.nodes
    }
コード例 #10
0
from graphmaker.graph import Graph

# We'll continue from the Kentucky example:

# Load your graph from wherever you saved it:
kentucky_queen = Graph.load('./kentucky/queen.json')

# You can add columns from a shapefile:
kentucky_queen.add_columns_from_shapefile(
    './kentucky/tl_2012_21_tract.json',
    columns=['ALAND', 'AWATER', 'COUNTYFP', 'STATEFP'])
# Or from a CSV (using a made up example):
kentucky_queen.add_columns_from_csv('./votes.csv',
                                    columns=['D_VOTES_2020', 'R_VOTES_2020'],
                                    id_column='TRACT')
# By writing id_column='VTD', we note that the column with IDs that match the nodes of the
# graph (the tract GEOIDs) is 'TRACT'. Usually this won't be necessary, because the program
# will automatically recognize common names like 'GEOID' or 'ID'.