コード例 #1
0
def save_result(region):

    # (1) read small_buf_inconsist pickle

    small_buf_inconsist_path = utils.getSubDirPath(
        f"jcts_small_buf_inconsist_{region}", "pickled_data", "junctions")

    small_buf_accepted = pd.read_pickle(small_buf_inconsist_path)

    # (2) read consistent clusters pickle

    consistent_clusters_path = utils.getSubDirPath(
        f"jcts_consistent_clusters_{region}", "pickled_data", "junctions")

    consistent_plus_accepted_large_solutions = pd.read_pickle(
        consistent_clusters_path)

    complete_df = pd.concat(
        [small_buf_accepted, consistent_plus_accepted_large_solutions],
        ignore_index=True,
        sort=False)

    file_name = f'manual_merging_res_{region}_{datetime.date.today()}.csv'

    path = utils.getSubDirPath(file_name, 'csv_data', 'junctions')

    complete_df.to_csv(path, index=False, sep="|")
コード例 #2
0
def delete_clust(small_buf_clstr, region):

    # (1) read small_buf_inconsist pickle

    small_buf_inconsist_path = utils.getSubDirPath(
        f"jcts_small_buf_inconsist_{region}", "pickled_data", "junctions")

    small_buf_inconsist = pd.read_pickle(small_buf_inconsist_path)

    # (2) read large_buf_inconsist pickle

    large_buf_inconsist_path = utils.getSubDirPath(
        f"jcts_large_buf_inconsist_{region}", "pickled_data", "junctions")

    large_buf_inconsist = pd.read_pickle(large_buf_inconsist_path)

    # Delete the specified cluster

    small_buf_inconsist = small_buf_inconsist[
        small_buf_inconsist['neighbour_cluster'] != small_buf_clstr]

    small_buf_inconsist.to_pickle(small_buf_inconsist_path)

    # Draw a new map

    mapping.runAllMapTasks(region, small_buf_inconsist, large_buf_inconsist)
コード例 #3
0
def main(region, junctionsdf):

    highwaydf, idCoords_dict = dfShizzle.metaFunc(
        config.paramDict[region]["bounding_box"])
    print("Created highwaydf and coordsdict")

    unfoldedEnrichedDf = segmentizeAndEnrich.metaFunc(highwaydf, junctionsdf,
                                                      idCoords_dict)
    print("Unfolded the enrichted df")

    bufferedDf = bufferSegs.bufferize(unfoldedEnrichedDf)
    print("Created bufferDf")

    oddballs, normies = clusterSegs.cluster(bufferedDf, junctionsdf)
    print("Created segment clusters")

    completeSegments = tidyData_Segs.tidyItUp(region, oddballs, normies)
    print("Cleaned segments")

    # Write to pickle for future use

    file_name = f"{region}_segments"

    path = utils.getSubDirPath(file_name, "pickled_data", "segments")

    completeSegments.to_pickle(path)

    return completeSegments
コード例 #4
0
def runAllMapTasks(region, bbCentroid, nonIsolatedJunctions, isolatedJunctions,
                   bufferSize):

    # I.) Set up our maps

    myMap = folium.Map(location=bbCentroid,
                       zoom_start=15,
                       tiles='cartodbpositron')

    # II.) Plot polys onto their respective maps

    plotPolys_B(nonIsolatedJunctions, 'geometry', myMap, {
        'fillColor': '#ff1493',
        'lineColor': '#F5FFFA'
    })

    plotPolys_B(isolatedJunctions, 'poly_geometry', myMap, {
        'fillColor': '#7FFF00',
        'lineColor': '#F5FFFA'
    })

    # III.) Export map as html

    file_name = f'{region}-jcts-map_buf={bufferSize}_{datetime.date.today()}.html'

    path = utils.getSubDirPath(file_name, "html_maps", "junctions")

    myMap.save(path)
コード例 #5
0
def main(region, buffer_size):

    nodesdf = dfShizzle.metaFunc(config.paramDict[region]["bounding_box"])
    print("Created nodedf")

    junctionsdf, junctions_for_segs = findJunctions.getJunctionsDf(
        nodesdf, region)
    print("Got junctions for region {0!s}".format(region))

    bufferedJunctionsDf = bufferJcts.bufferize(junctionsdf, buffer_size)
    print("Created bufferDf")

    nonIsolatedJunctions, isolatedJunctions = clusterJcts.cluster(
        bufferedJunctionsDf)
    print("Created junction clusters")

    completeJunctions = tidyData_Jcts.tidyItUp(
        region, config.paramDict[region]["centroid"], nonIsolatedJunctions,
        isolatedJunctions, buffer_size)
    print("Cleaned junctions")

    # Write to pickle for future use

    file_name = f"{region}_junctions_buffer={buffer_size}"

    path = utils.getSubDirPath(file_name, "pickled_data", "junctions")

    # Write data frame to pickle folder; include buffer_size in the file name
    # ==> purpose of this is to be able to reuse data in the manual merging
    #     tool so if a data set for a specific region and buffer size already
    #     exists it can be utilized rather than computing everything from scratch again

    completeJunctions.to_pickle(path)

    return completeJunctions, junctions_for_segs
コード例 #6
0
def runAllMapTasks(region, bbCentroid, oddballs, normies):

    # I.) Set up our maps

    myMap = folium.Map(location=bbCentroid,
                       zoom_start=15,
                       tiles='cartodbpositron',
                       prefer_canvas=True)

    # II.) Plot polys onto their respective maps

    plotPolys(oddballs, 'geometry', myMap, {
        'fillColor': '#ff1493',
        'lineColor': '#F5FFFA'
    })

    plotPolys(normies, 'poly_geometry', myMap, {
        'fillColor': '#7FFF00',
        'lineColor': '#F5FFFA'
    })

    # III.) Export map as html

    # Find out if we're operating in 'segments'-subdirectory or its parent directory,
    # PyPipeline_ (background: we want to write all files related to segments to the
    # segments subdirectory)

    cwd = os.getcwd()

    file_name = f'{region}-segs-{datetime.date.today()}.html'

    path = utils.getSubDirPath(file_name, "html_maps", "segments")

    myMap.save(path)
コード例 #7
0
def runAllMapTasks(region, small_buf_inconsist, large_buf_inconsist):

    # region, nonIsolatedJunctions, isolatedJunctions, bufferSize

    bbCentroid = config.paramDict[region]['centroid']

    # I.) Set up our maps

    bbCentroid = config.paramDict[region]['centroid']

    myMap = folium.Map(location=bbCentroid,
                       zoom_start=15,
                       tiles='cartodbpositron')

    # II.) Plot polys onto their respective maps

    marker_cluster = MarkerCluster().add_to(myMap)

    plotPolys(large_buf_inconsist, myMap, {
        'fillColor': '#87CEEB',
        'lineColor': '#4682B4'
    }, marker_cluster, 'blue')

    plotPolys(small_buf_inconsist, myMap, {
        'fillColor': '#3CB371',
        'color': '#2E8B57'
    }, marker_cluster, 'green')

    # III.) Export map as htmls

    # Find out if we're operating in 'segments'-subdirectory or its parent directory,
    # PyPipeline_ (background: we want to write all files related to segments to the
    # segments subdirectory)

    cwd = os.getcwd()

    in_target_dir = utils.inTargetDir(cwd)

    file_name = f'{region}-segs-manualClust_{datetime.date.today()}.html'

    path = utils.getSubDirPath(file_name, "html_maps", "segments")

    myMap.save(path)
コード例 #8
0
    # Parse the input parameter

    args = parser.parse_args()

    # Read junctions data from csv

    # Read the junctions data from csv that was produced by the junctions sub-project.
    # It used to be read directly from the csv_data directory in the junctions subproject,
    # but now to enable utilization with docker and avoid mounting hell it has to be moved
    # manually from junctions/csv_data to segments/csv_data (unless the top level script
    # main.py in PyPipeline_ is used).
    # !!!!! Be sure to execute the junctions project first before executing the
    #       segments project for the same region !!!!
    # (Otherwise, there might be no file to read.)

    subdir_path = utils.getSubDirPath(f"{args.region}_junctions_for_segs.csv",
                                      "csv_data", "segments")

    # Notify user if junctions_for_segs.csv is unavailable as the junctions project hasn't been
    # executed before the segments fraction
    try:
        junctionsdf = pd.read_csv(subdir_path)
    except FileNotFoundError:
        print(
            "Junctions file wasn't found! Please execute OSM_jcts.py for this region to generate it."
        )
        sys.exit()

    start_time = time.time()

    completeSegs = main(args.region, junctionsdf)
コード例 #9
0
        dest='buf_size',
        type=float,
        help="By how much the one-dimensional junction points will be buffered."
    )

    # Parse the input parameters

    args = parser.parse_args()

    start_time = time.time()

    completeJunctions, junctions_for_segs = main(args.region, args.buf_size)

    print("--- %s seconds ---" % (time.time() - start_time))

    # Write entire data set to csv

    file_name = f"{args.region}_junctions_complete_{datetime.date.today()}.csv"

    path = utils.getSubDirPath(file_name, "csv_data", "junctions")

    completeJunctions.to_csv(path, index=False, sep="|")

    # Write data subset to be used by segments script to csv

    file_name_ = f"{args.region}_junctions_for_segs.csv"

    path_ = utils.getSubDirPath(file_name_, "csv_data", "junctions")

    junctions_for_segs.to_csv(path_)
コード例 #10
0
    f'Please navigate to directory PyPipeline_/junctions/html_maps and open the file \'{region}-jcts-manualClust_{datetime.date.today()}.html\' in your browser. \n'
)

print(
    'By default, the more conservative clustering solutions (green shapes on the map) will be accepted. \n'
)

# (4) Prompt the user to add the desired changes to {region}.toml.

run_modifications = input(
    "Please add your desired modifications to {region}.toml (in the PyPipeline_/junctions/manual_merge_config directory). \n Enter ok once you're done. \n"
)

if (run_modifications == 'ok'):

    config_path = utils.getSubDirPath(f'{region}.toml', 'manual_merge_config',
                                      'junctions')

    config = toml.load(config_path)

    #print(config)

    for elem in config['replace']:

        manualMergeTool.update_clust(elem['old'], elem['new'], region)

    for elem in config['delete']:

        manualMergeTool.delete_clust(elem, region)

        print(elem)
コード例 #11
0
def meta_assist(region, small_buf, large_buf):

    # NEW as of 12/01/21: avoid re-computing data that already exists.
    #                     This means that whenever OSM_jcts.py is executed
    #                     (and also when manual editing is performed and
    #                     'manualMergeTool' > save_result is called),
    #                     the resultant data is not only written to .csv but
    #                     also saved as a pickle (= python serialization format
    #                     that is easy to read in, i.e. as opposed to a .csv
    #                     file we don't have to parse numerical data, lists etc
    #                     from string)
    #                     => rather than calling OSM_jcts.py for small_buf and
    #                        large_buf per default, check if we already have the
    #                        data and only compute if we don't.

    # Get data frames for small_buf (more conservative buffer parameter) and large_buf (more liberal buffer parameter)
    # => from pickle (PyPipeline_/junctions/pickled_data) or computed

    # Do we already have a data set for the SMALL buffer size specified?
    # => if so, use it. Else, compute it.

    small_buf_file = f"{region}_junctions_buffer={small_buf}"

    if (utils.fileExists(small_buf_file)):

        small_buf_path = utils.getSubDirPath(small_buf_file, "pickled_data",
                                             "junctions")

        small_buf = pd.read_pickle(small_buf_path)

    else:
        small_buf = OSM_jcts.main(region, small_buf)

    # Do we already have a data set for the LARGE buffer size specified?
    # => if so, use it. Else, compute it.

    large_buf_file = f"{region}_junctions_buffer={large_buf}"

    if (utils.fileExists(large_buf_file)):

        large_buf_path = utils.getSubDirPath(large_buf_file, "pickled_data",
                                             "junctions")

        large_buf = pd.read_pickle(large_buf_path)

    else:
        large_buf = OSM_jcts.main(region, large_buf)

    # Determine where the two data frames (and thus the clustering solutions for smaller and larger buffer) differ

    small_buf_inconsist, small_buf_consist, large_buf_inconsist = determine_inconsistencies(
        small_buf, large_buf)

    mapping.runAllMapTasks(region, small_buf_inconsist, large_buf_inconsist)

    # PICKLE (SERIALIZE) THREE DATA SETS FOR USE BY MANUALMERGETOOL:
    # (1) 'small_buf_inconsist': subset of the small buffer df where clustering solutions differ from the
    #     larger buffer solution.
    # (2) 'large_buf_inconsist': subset of the large buffer df where clustering solutions differ from the
    #     smaller buffer solution.
    # (3) 'consistent_clusters': subset of the small buffer df where clustering solutions DO NOT differ from
    #     the larger buffer solution; i.e. if this subset was taken from the large buffer df it would be exactly
    #     the same.

    # INTENDED PROCESSING OF THESE DATA SETS IN MANUALMERGETOOL:
    # * The more conservative solutions contained in 'small_buf_inconsist' can be manually edited, i.e. replaced by the
    #   more liberal solutions contained in 'large_buf_inconsist'.
    # * That means, when a user compares the rivaling conservative vs. liberal solutions for inconsistent clusters,
    #   she might decide to pick the liberal solution over the conservative one.
    # * Hence, the respective rows belonging to the conservative solution are DELETED from the 'small_buf_inconsist'
    #   df and the respective row belonging to the liberal solution is taken from the 'large_buf_inconsist' data set
    #   and MOVED to 'small_buf_consist', our 'base' df which will be returned after all of the manual editing is
    #   finished. This means that the conflict has been resolved.
    # * When all editing is done, what remains of 'small_buf_inconsist' (i.e., the conservative solutions that
    #   were chosen over their liberal counterparts) is concatenated with 'consistent_clusters', which already
    #   contains all the more liberal solutions that were chosen over the conservative ones.

    # Write small_buf_inconsist pickle

    small_buf_inconsist_path = utils.getSubDirPath(
        f"jcts_small_buf_inconsist_{region}", "pickled_data", "junctions")

    small_buf_inconsist.to_pickle(small_buf_inconsist_path)

    # Write large_buf_inconsist pickle

    large_buf_inconsist_path = utils.getSubDirPath(
        f"jcts_large_buf_inconsist_{region}", "pickled_data", "junctions")

    large_buf_inconsist.to_pickle(large_buf_inconsist_path)

    # Write consistent clusters pickle

    consistent_clusters_path = utils.getSubDirPath(
        f"jcts_consistent_clusters_{region}", "pickled_data", "junctions")

    small_buf_consist.to_pickle(consistent_clusters_path)
コード例 #12
0
def update_clust(small_buf_clstrs, large_buf_clstr, region):

    # (1) read small_buf_inconsist pickle

    small_buf_inconsist_path = utils.getSubDirPath(
        f"jcts_small_buf_inconsist_{region}", "pickled_data", "junctions")

    small_buf_inconsist = pd.read_pickle(small_buf_inconsist_path)

    # (2) read large_buf_inconsist pickle

    large_buf_inconsist_path = utils.getSubDirPath(
        f"jcts_large_buf_inconsist_{region}", "pickled_data", "junctions")

    large_buf_inconsist = pd.read_pickle(large_buf_inconsist_path)

    # (3) read consistent clusters pickle

    consistent_clusters_path = utils.getSubDirPath(
        f"jcts_consistent_clusters_{region}", "pickled_data", "junctions")

    consistent_clusters = pd.read_pickle(consistent_clusters_path)

    # 'small_buf_clstrs' is a list of clusters having emerged in the small_buf-solution; remove the rows
    # corresponding to these clusters from 'small_buf_inconsist' as the large_buf-solution for the same
    # clustering problem is preferred as per user input. Accordingly, RESOLVE the conflict between the
    # small_buf- and large_buf-solutions by deleting the small_buf one and adding the large_buf one to our df
    # containing the 'consistent_clusters'.

    # Delete the rejected cluster solution from 'small_buf_inconsist'

    for clust in small_buf_clstrs:

        small_buf_inconsist = small_buf_inconsist[
            small_buf_inconsist['neighbour_cluster'] != clust]

    # Grab the accepted solution from 'large_buf_inconsist'

    accepted_solution = large_buf_inconsist.loc[
        large_buf_inconsist['neighbour_cluster'] == large_buf_clstr].copy()

    # Set 'neighbour_cluster' to 999999 to make manual editing obvious and facilitate highlighting on map

    accepted_solution['neighbour_cluster'] = 999999

    large_buf_inconsist.loc[large_buf_inconsist['neighbour_cluster'] ==
                            large_buf_clstr, ['neighbour_cluster']] = 999999

    # Append accepted solution to 'consistent_clusters'

    consistent_clusters = pd.concat([consistent_clusters, accepted_solution],
                                    ignore_index=True,
                                    sort=False)

    mapping.runAllMapTasks(region, small_buf_inconsist, large_buf_inconsist)

    # Pickle the three data sets for further editing.

    # Write small_buf_inconsist pickle

    small_buf_inconsist.to_pickle(small_buf_inconsist_path)

    # Write large_buf_inconsist pickle

    large_buf_inconsist.to_pickle(large_buf_inconsist_path)

    # Write consistent clusters pickle

    consistent_clusters.to_pickle(consistent_clusters_path)