Ejemplo n.º 1
0
def process_feature_classes(input_ws, output_ws, foreach_layer = None):
    """
    processes each featureclass with an optional function
    input_ws - the database or dataset path to process feature classes
    output_ws - the output for the feature classes
    foreach_layer - the function to process the feature classes
    """
    from arcpy import env, ListFeatureClasses, FeatureClassToGeodatabase_conversion, \
        AddWarning, AddMessage, GetCount_management, FeatureClassToFeatureClass_conversion
    from os.path import join
    env.workspace = input_ws
    feature_classes = ListFeatureClasses()
    for feature_class in feature_classes:
        
        AddMessage('Processing {}...'.format(feature_class))
        if env.skipEmpty:
            count = int(GetCount_management(feature_class)[0])
            if count == 0:
                AddWarning('Skipping because table is empty: {}'.format(feature_class))
                continue
        try:
            if foreach_layer:
                foreach_layer(input_ws, output_ws, feature_class)
            else:
                #copy each feature class over
                output_path = join(output_ws, get_name(feature_class))
                delete_existing(output_path)
                FeatureClassToFeatureClass_conversion(feature_class, output_ws, get_name(feature_class))
        except Exception as e:
            AddWarning('Error processing feature class {} - {}'.format(feature_class, e))
def process_feature_classes(input_ws, output_ws, foreach_layer=None):
    """
    processes each featureclass with an optional function
    input_ws - the database or dataset path to process feature classes
    output_ws - the output for the feature classes
    foreach_layer - the function to process the feature classes
    """
    from arcpy import env, ListFeatureClasses, FeatureClassToGeodatabase_conversion, AddWarning, AddMessage
    from os.path import join
    env.workspace = input_ws
    feature_classes = ListFeatureClasses()
    for feature_class in feature_classes:

        AddMessage('Processing {}...'.format(feature_class))
        try:
            if foreach_layer:
                foreach_layer(input_ws, output_ws, feature_class)
            else:
                #copy each feature class over
                output_path = join(output_ws, get_name(feature_class))
                delete_existing(output_path)
                FeatureClassToGeodatabase_conversion(feature_class, output_ws)
        except Exception as e:
            AddWarning('Error processing feature class {} - {}'.format(
                feature_class, e))
Ejemplo n.º 3
0
def network_features(network):
    """
  Returns the junction and edge feature names of |network|
  |network|: a network dataset
  """
    edge_feature = None
    junction_feature = None
    for source in Describe(network).sources:
        if source.sourceType == EDGE_FEATURE:
            edge_feature = source.name
        elif source.sourceType in JUNCTION_FEATURE:
            junction_feature = source.name
    if edge_feature == None:
        AddWarning(WARNING_NO_EDGE_FEATURE(network))
        raise Invalid_Input_Exception("Input Network")
    if junction_feature == None:
        AddWarning(WARNING_NO_JUNCTION_FEATURE(network))
        raise Invalid_Input_Exception("Input Network")
    return (junction_feature, edge_feature)
Ejemplo n.º 4
0
def subset_image_for_texture(in_image, in_polygon, area, out_raster):
    from os import path
    from arcpy import Describe, AddWarning
    from arcpy.management import Delete
    from math import sqrt
    temp_rast = path.join("in_memory", "temp_rast")
    ClipRaster(in_image, image_extent_2(in_polygon), temp_rast, "#", "#", "NONE")
    desc = Describe(temp_rast).children[0]
    height = desc.height
    width = desc.width
    cell_height = desc.meancell_height
    cell_width = desc.meancell_width
    r_length = height*cell_height
    r_width = width*cell_width
    if r_length > sqrt(area) and r_width > sqrt(area):
        subset_image(temp_rast, area, out_raster)
    else:
        AddWarning("Geometry Length and Width do not fit Area| Length = {0} | Width = {1}".format(r_length, r_width))
        AddWarning("Draw a larger area where length and width fit within the area as a square")
    Delete(temp_rast)
Ejemplo n.º 5
0
def copy_tables(input_ws, output_ws, foreach_table = None):
    """
    copies tables or sends each table to a function
        input_ws - the input database
        output_ws - the output database
        foreach_table - the optional function to process each table
    """
    from arcpy import env, ListTables, AddMessage, AddWarning, \
        TableToGeodatabase_conversion, GetCount_management, \
        TableToTable_conversion
    from os.path import join 

    env.workspace = input_ws
    for table in ListTables():
        AddMessage('Processing table: {}'.format(table))
        
        if env.skipAttach and '_attach' in table.lower():
            AddWarning('Skipping attachments table {}'.format(table))
            continue
        
        if env.skipEmpty:
            count = int(GetCount_management(table)[0])
            if count == 0:
                AddWarning('Skipping because table is empty: {} (empty)'.format(table))
                continue
        
        try:
            if foreach_table:
                foreach_table(input_ws, output_ws, table)
            else:
                output_path = join(output_ws, get_name(table))
                delete_existing(output_path)
                TableToTable_conversion(table, output_ws, get_name(table))
        except Exception as e:
            AddWarning('Error on table: {} - {}'.format(table, e))
            pass
Ejemplo n.º 6
0
def msg(text, arc_status=None, set_progressor_label=False):
    """
    output messages through Click.echo (cross-platform shell printing) 
    and the ArcPy GP messaging interface and progress bars
    """
    click.echo(text)

    if arc_status == "warning":
        AddWarning(text)
    elif arc_status == "error":
        AddError(text)
    else:
        AddMessage(text)

    if set_progressor_label:
        SetProgressorLabel(text)
def process_datasets(from_db,
                     to_db=None,
                     foreach_layer=None,
                     foreach_table=None,
                     foreach_dataset=None):
    """
    creates the projected datasets necessary and then calls the function
    to perform additional functions on each layer and table
    from_db - the input database to pull from
    to_db - the output database to place the processed data
    foreach_layer - the function to process each layer with
    foreach_table - the function to process each table with
    """
    #get the datasets in the input workspace
    from arcpy import AddMessage, AddWarning, CreateFeatureDataset_management, ListDatasets, Exists, env, ExecuteError
    AddMessage('Workspace: {}'.format(env.workspace))

    #handle feature classes at the top level. these are moved into _top dataset for
    #automatic projection handling
    copy_tables(from_db, to_db, foreach_table)

    process_feature_classes(from_db, to_db, foreach_layer)

    in_datsets = ListDatasets()
    if len(in_datsets):
        for dataset in in_datsets:
            to_dataset = get_name(dataset)
            from_dataset_path = '{}/{}'.format(from_db, dataset)
            to_dataset_path = '{}/{}'.format(to_db, to_dataset)
            AddMessage('Processing Dataset: {}'.format(from_dataset_path))
            try:
                if foreach_dataset:
                    foreach_dataset(from_db, to_db, dataset)
                else:
                    CreateFeatureDataset_management(to_db, to_dataset,
                                                    env.outputCoordinateSystem)
            except ExecuteError as e:
                AddWarning('Could not create dataset {}, {}'.format(
                    to_dataset, e))

            process_feature_classes(from_dataset_path, to_dataset_path,
                                    foreach_layer)
def copy_tables(input_ws, output_ws, foreach_table=None):
    """
    copies tables or sends each table to a function
        input_ws - the input database
        output_ws - the output database
        foreach_table - the optional function to process each table
    """
    from arcpy import env, ListTables, AddMessage, AddWarning, TableToGeodatabase_conversion
    from os.path import join

    env.workspace = input_ws
    for table in ListTables():
        AddMessage('Processing table: {}'.format(table))
        try:
            if (foreach_table):
                foreach_table(input_ws, output_ws, table)
            else:
                output_path = join(output_ws, get_name(table))
                delete_existing(output_path)
                TableToGeodatabase_conversion(table, output_ws)
        except Exception as e:
            AddWarning('Error on table: {} - {}'.format(table, e))
            pass
Ejemplo n.º 9
0

if __name__ == '__main__':
    debug = False
    if debug:
        ''' Seamless Texture Maps must be the same size as the source image'''
        in_image = r'C:\Users\geof7015\Documents\ArcGIS\Projects\ArcGIS_Image_Designer\TestData\imgFolder\Ortho.jpg'
        in_mask = r'C:\Users\geof7015\Documents\ArcGIS\Projects\ArcGIS_Image_Designer\TestData\maskFolder\dune8TestMask.tif'
        in_texture = r'C:\Users\geof7015\Documents\ArcGIS\Projects\ArcGIS_Image_Designer\Textures\Processed\coastal_steppe.jpg'
        out_image = r'C:\Users\geof7015\Documents\ArcGIS\Projects\ArcGIS_Image_Designer\TestData\test\Da_DuneOrtho.jpg'
        method = "None"  # "GaussianBlur", "BoxBlur", "None"
        blur_distance = 10  # Distance in Pixels
    else:
        from os.path import exists
        from arcpy import GetParameterAsText, GetParameter, AddMessage, AddWarning
        ''' Seamless Texture Maps must be the same size as the source image'''
        in_image = GetParameterAsText(0)
        in_mask = GetParameterAsText(1)
        in_texture = GetParameterAsText(2)
        out_image = GetParameterAsText(3)
        method = GetParameterAsText(4)  # "GaussianBlur", "BoxBlur", "None"
        blur_distance = GetParameter(5)  # Distance in Pixels

        for i in [in_image, in_mask, in_texture]:
            if not exists(i):
                AddWarning("{0} | Detected as non-existing".format(i))
                exit()

        AddMessage(blur_distance)
    main()
Ejemplo n.º 10
0
def main():
    # tool inputs
    INPUT_NETWORK = argv[1]
    INPUT_POINTS = argv[2]
    INPUT_ORIGINS_FIELD = argv[3]
    INPUT_DESTINATIONS_FIELD = argv[4]
    INPUT_BUILDING_WEIGHTS_FIELD = argv[5]
    INPUT_COEFF = float(argv[6])
    INPUT_SEARCH_RADIUS = float(argv[7]) if is_number(argv[7]) else float('inf')
    INPUT_OUTPUT_DIRECTORY = argv[8]
    INPUT_OUTPUT_FEATURE_CLASS_NAME = argv[9]

    # check that network has "Length" attribute
    if "Length" not in network_cost_attributes(INPUT_NETWORK):
      AddError("Network <%s> does not have Length attribute" % INPUT_NETWORK)
      return

    # check that coeff is at least 1
    if INPUT_COEFF < 1:
      AddError("Redundancy coefficient <%s> must be at least 1" % INPUT_COEFF)
      return

    # if we are given a building weights field, check that it is valid
    if INPUT_BUILDING_WEIGHTS_FIELD == "#":
      INPUT_BUILDING_WEIGHTS_FIELD = ""
    if INPUT_BUILDING_WEIGHTS_FIELD and (INPUT_BUILDING_WEIGHTS_FIELD not in
        fields(INPUT_POINTS)):
      AddError("Building weights field <%s> is not a valid attribute in the "
          "input points <%s>" % (INPUT_BUILDING_WEIGHTS_FIELD, INPUT_POINTS))
      return

    # setup
    env.overwriteOutput = True

    # copy the input points into an output feature class
    AddMessage("Copying input points to output feature class ...")
    input_points_layer = Layer(INPUT_POINTS)
    output_feature_class = "%s.shp" % join(INPUT_OUTPUT_DIRECTORY,
        INPUT_OUTPUT_FEATURE_CLASS_NAME)
    CopyFeatures_management(in_features=input_points_layer,
        out_feature_class=output_feature_class)
    AddMessage("\tDone.")

    # construct network and points
    network, points, edge_to_points = construct_network_and_load_buildings(
        INPUT_POINTS, INPUT_NETWORK, INPUT_BUILDING_WEIGHTS_FIELD)

    # extract origin and destination ids
    origin_ids = flagged_points(INPUT_POINTS, INPUT_ORIGINS_FIELD)
    destination_ids = flagged_points(INPUT_POINTS, INPUT_DESTINATIONS_FIELD)
    if len(origin_ids) == 0 or len(destination_ids) == 0 or (
        len(origin_ids) == 1 and origin_ids == destination_ids):
      AddWarning("No OD pair found, no computation will be done.")

    # compute redundancy index statistics for each origin point
    AddMessage("Computing redundancy indices ...")
    redundancy_indices = {}
    # memoize: computing index from O to D is same as computing it from D to O
    memo = {}
    for origin_id in origin_ids:
      progress_bar = Progress_Bar(len(destination_ids), 1,
          "Computing index for O=%s ..." % origin_id)
      # statistics variables
      tot_redundancy_index = 0
      tot_squared_redundancy_index = 0
      min_redundancy_index = None
      max_redundancy_index = None
      all_unique_segments = set()
      # track the number of destinations for which a numeric redundancy index is
      #     successfully computed
      n = 0
      for destination_id in destination_ids:
        if origin_id != destination_id:
          memo_key = (min(origin_id, destination_id), max(origin_id,
              destination_id))
          if memo_key not in memo:
            memo[memo_key] = find_redundancy_index(network, points,
                edge_to_points, INPUT_COEFF, origin_id, destination_id,
                INPUT_SEARCH_RADIUS, bool(INPUT_BUILDING_WEIGHTS_FIELD))
          if memo[memo_key] is not None:
            n += 1
            redundancy_pair, unique_segments_pair = memo[memo_key]
            min_redundancy_index = (min(min_redundancy_index, redundancy_pair)
                if min_redundancy_index is not None else redundancy_pair)
            max_redundancy_index = (max(max_redundancy_index, redundancy_pair)
                if max_redundancy_index is not None else redundancy_pair)
            tot_redundancy_index += redundancy_pair
            tot_squared_redundancy_index += redundancy_pair * redundancy_pair
            all_unique_segments |= unique_segments_pair
        progress_bar.step()
      if n > 0:
        avg_redundancy_index = tot_redundancy_index / n
        avg_squared_redundancy_index = tot_squared_redundancy_index / n
      else:
        avg_redundancy_index = avg_squared_redundancy_index = 0
      # TODO(mikemeko): work on std computation with better accuracy
      std = sqrt(max(avg_squared_redundancy_index - avg_redundancy_index *
          avg_redundancy_index, 0))
      if min_redundancy_index is None:
        min_redundancy_index = 0
      if max_redundancy_index is None:
        max_redundancy_index = 0
      redundancy_indices[origin_id] = (n, avg_redundancy_index, std,
          min_redundancy_index, max_redundancy_index, all_unique_segments)
    AddMessage("\tDone.")

    # write out redundancy statistics to output feature class
    # delete all points that are not origins from the output feature class
    AddMessage("Writing out results ...")
    int_fields = ["InputID", "Reach"]
    double_fields = ["AvgRedund", "StdRedund", "MinRedund", "MaxRedund"]
    for field in int_fields:
      AddField_management(in_table=output_feature_class, field_name=field,
          field_type="INTEGER")
    for field in double_fields:
      AddField_management(in_table=output_feature_class, field_name=field,
          field_type="DOUBLE")
    rows = UpdateCursor(output_feature_class,
        ["OID@"] + int_fields + double_fields)
    for row in rows:
      oid = row[0]
      if Describe(INPUT_POINTS).extension != "shp":
        # original ids start from 1, but shapefile ids start from 0, so add
        #     1 to shapefile id for correct matching
        oid += 1
      if oid in redundancy_indices:
        n, avg, std, m, M, all_unique_segments = redundancy_indices[oid]
        row[1:] = [oid, n, avg, std, m, M]
        rows.updateRow(row)
      else:
        rows.deleteRow()
    # create a layer of the output feature class, for symbology purposes
    output_layer = "%s.lyr" % join(INPUT_OUTPUT_DIRECTORY,
        INPUT_OUTPUT_FEATURE_CLASS_NAME)
    MakeFeatureLayer_management(in_features=output_feature_class,
        out_layer=INPUT_OUTPUT_FEATURE_CLASS_NAME)
    SaveToLayerFile_management(INPUT_OUTPUT_FEATURE_CLASS_NAME, output_layer,
        "ABSOLUTE")
    # add output feature layer to display after applying symbology
    ApplySymbologyFromLayer_management(output_layer, join(path[0],
        "Symbology_Layers\sample_points_symbology.lyr"))
    add_layer_to_display(output_layer)
    # if there is only one origin, symbolize selected edges
    if _common_id(memo.keys()) and len(all_unique_segments) > 0:
      n, avg, std, m, M, all_unique_segments = redundancy_indices[origin_ids[0]]
      select_edges_from_network(INPUT_NETWORK, all_unique_segments,
          INPUT_OUTPUT_DIRECTORY, "%s_edges" % INPUT_OUTPUT_FEATURE_CLASS_NAME)
    AddMessage("\tDone.")
def main():
  """
  Runs the centrality tool.
  """
  env.overwriteOutput = True # Enable overwritting
  CheckOutExtension("Network")

  # Success of the program through the six steps
  success = True

  # Inputs to the tool
  if len(argv) != INPUT_COUNT + 1:
    raise Exception("Invalid number of inputs")
  input_number = index()
  input_number.next() # Skip over sys.argv[0]
  inputs = {}
  inputs[INPUT_BUILDINGS] = argv[input_number.next()]
  inputs[POINT_LOCATION] = ("INSIDE" if argv[input_number.next()] == "true" else
      "CENTROID")
  inputs[INPUT_NETWORK] = argv[input_number.next()]
  inputs[COMPUTE_REACH] = argv[input_number.next()] == "true"
  inputs[COMPUTE_GRAVITY] = argv[input_number.next()] == "true"
  inputs[COMPUTE_BETWEENNESS] = argv[input_number.next()] == "true"
  inputs[COMPUTE_CLOSENESS] = argv[input_number.next()] == "true"
  inputs[COMPUTE_STRAIGHTNESS] = argv[input_number.next()] == "true"
  inputs[ID_ATTRIBUTE] = argv[input_number.next()]
  inputs[NODE_WEIGHT_ATTRIBUTE] = argv[input_number.next()]
  inputs[IMPEDANCE_ATTRIBUTE] = argv[input_number.next()]
  try: inputs[SEARCH_RADIUS] = float(argv[input_number.next()])
  except: inputs[SEARCH_RADIUS] = INFINITE_RADIUS
  inputs[USE_NETWORK_RADIUS] = (argv[input_number.next()] ==
      ON_THE_NETWORK_OPTION)
  try: inputs[BETA] = float(argv[input_number.next()])
  except: raise Invalid_Input_Exception("Beta")
  inputs[NORMALIZE_RESULTS] = [measure for measure in
      argv[input_number.next()].split(";") if measure != "#"]
  inputs[OUTPUT_LOCATION] = argv[input_number.next()]
  inputs[OUTPUT_FILE_NAME] = argv[input_number.next()]
  inputs[ACCUMULATOR_ATTRIBUTES] = argv[input_number.next()]

  # Record the origin nodes for centrality measurements
  # This is important if the user selects a subset of the features to be origins
  selected_features = all_values_in_column(inputs[INPUT_BUILDINGS],
    inputs[ID_ATTRIBUTE])
  # Clear selection if we got a layer file
  try:
    SelectLayerByAttribute_management(inputs[INPUT_BUILDINGS],
      "CLEAR_SELECTION")
  except:
    pass

  # Adjacency List table name
  node_locations_needed = (inputs[COMPUTE_STRAIGHTNESS] or
      not inputs[USE_NETWORK_RADIUS])
  adj_dbf_name = ("%s_%s_%s_%s_%s_%s.dbf" % (ADJACENCY_LIST_NAME,
      basename(inputs[INPUT_BUILDINGS]), basename(inputs[INPUT_NETWORK]),
      inputs[ID_ATTRIBUTE], inputs[IMPEDANCE_ATTRIBUTE],
      inputs[ACCUMULATOR_ATTRIBUTES])).replace("#", "None")
  if len(adj_dbf_name) > MAX_FILE_NAME_LENGTH:
    AddWarning(WARNING_LARGE_ADJ_FILE_NAME)
  adj_dbf = join(inputs[OUTPUT_LOCATION], adj_dbf_name)

  # Output file names
  output_feature_class_name = feature_class_name(inputs[OUTPUT_FILE_NAME])
  output_feature_class = "%s.shp" % join(inputs[OUTPUT_LOCATION],
      output_feature_class_name)
  # Create a feature class that is a copy of the input buildings
  try:
    AddMessage(INPUT_BUILDINGS_COPY_STARTED)
    CreateFeatureclass_management(out_path=inputs[OUTPUT_LOCATION],
        out_name=output_feature_class_name)
    CopyFeatures_management(in_features=inputs[INPUT_BUILDINGS],
        out_feature_class=output_feature_class)
    AddMessage(INPUT_BUILDINGS_COPY_FINISHED)
  except:
    AddWarning(GetMessages(2))
    AddMessage(INPUT_BUILDINGS_COPY_FAILED)
    success = False
  output_layer_name = layer_name(inputs[OUTPUT_FILE_NAME])
  output_layer = "%s.lyr" % join(inputs[OUTPUT_LOCATION], output_layer_name)

  # If output has already been created, don't carry on
  if Exists(output_layer):
    AddWarning(WARNING_OUTPUT_ALREADY_EXISTS)
    success = False

  # We will convert polygon input buildings to point feature class
  buildings_description = Describe(output_feature_class)
  if buildings_description.shapeType == "Point":
    # Input buildings are already a point shape file
    inputs[INPUT_POINTS] = output_feature_class
  elif buildings_description.shapeType == "Polygon":
    # Input buildings need to be converted to point feature class
    point_feature_class_name = POINT_FEATURE_CLASS_NAME(
        basename(output_feature_class), inputs[POINT_LOCATION])
    inputs[INPUT_POINTS] = "%s.shp" % join(inputs[OUTPUT_LOCATION],
        point_feature_class_name)
    # If FID is used as ID attribute, we need to change it since a point
    #     shapefile will be in use
    if inputs[ID_ATTRIBUTE] == "FID":
      inputs[ID_ATTRIBUTE] = ORIGINAL_FID
  else:
    # Input buildings need to be either points or polygons
    raise Invalid_Input_Exception("Input Buildings")

  # Find the appropriate symbology layer
  for metric_index in range(len(METRICS)):
      if inputs[COMPUTE_REACH + metric_index]:
          first_metric = METRICS[metric_index]
          break
  symbology_layer_name = get_symbology_layer_name(
      buildings_description.shapeType, first_metric)
  symbology_layer = join(SYMBOLOGY_DIR, symbology_layer_name)

  def clean_up():
    """
    Removes all auxiliary files
    """
    auxiliary_dir = join(inputs[OUTPUT_LOCATION], AUXILIARY_DIR_NAME)
    od_cost_matrix_layer = join(auxiliary_dir, OD_COST_MATRIX_LAYER_NAME)
    od_cost_matrix_lines = join(auxiliary_dir, OD_COST_MATRIX_LINES)
    temp_adj_dbf_name = "%s~.dbf" % adj_dbf_name[-4]
    temp_adj_dbf = join(inputs[OUTPUT_LOCATION], temp_adj_dbf_name)
    partial_adj_dbf = join(auxiliary_dir, PARTIAL_ADJACENCY_LIST_NAME)
    polygons = join(auxiliary_dir, POLYGONS_SHAPEFILE_NAME)
    raster = join(auxiliary_dir, RASTER_NAME)
    polygons_layer = join(auxiliary_dir, POLYGONS_LAYER_NAME)
    input_points_layer = join(auxiliary_dir, INPUT_POINTS_LAYER_NAME)
    for delete_path in [input_points_layer, polygons_layer, raster, polygons,
        partial_adj_dbf, temp_adj_dbf, od_cost_matrix_lines,
        od_cost_matrix_layer, auxiliary_dir]:
      delete(delete_path)

  try:
    """
    Here we carry out the six steps of the tool
    """
    # Step 1
    if success:
      AddMessage(STEP_1_STARTED)
      # If necessary, convert input buildings to point feature class
      if buildings_description.shapeType == "Polygon":
        AddMessage(POINT_CONVERSION_STARTED)
        to_point_feature_class(output_feature_class, inputs[INPUT_POINTS],
            inputs[POINT_LOCATION])
        AddMessage(POINT_CONVERSION_FINISHED)
      if Exists(adj_dbf):
        AddMessage(ADJACENCY_LIST_COMPUTED)
        if node_locations_needed:
          calculate_network_locations(inputs[INPUT_POINTS],
              inputs[INPUT_NETWORK])
        AddMessage(STEP_1_FINISHED)
      else:
        try:
          compute_adjacency_list(inputs[INPUT_POINTS], inputs[INPUT_NETWORK],
              inputs[ID_ATTRIBUTE], inputs[IMPEDANCE_ATTRIBUTE],
              inputs[ACCUMULATOR_ATTRIBUTES], inputs[SEARCH_RADIUS],
              inputs[OUTPUT_LOCATION], adj_dbf_name)
          AddMessage(STEP_1_FINISHED)
        except:
          AddWarning(GetMessages(2))
          AddMessage(STEP_1_FAILED)
          success = False

    # Step 2
    if success:
      AddMessage(STEP_2_STARTED)
      try:
        distance_field = trim("Total_%s" % inputs[IMPEDANCE_ATTRIBUTE])
        accumulator_fields = set([trim("Total_%s" % accumulator_attribute)
            for accumulator_attribute in inputs[ACCUMULATOR_ATTRIBUTES].split(
            ";") if accumulator_attribute != "#"])
        # Graph representation: dictionary mapping node id's to Node objects
        nodes = {}
        # The number of rows in |adj_dbf|
        directed_edge_count = int(GetCount_management(adj_dbf).getOutput(0))
        graph_progress = Progress_Bar(directed_edge_count, 1, STEP_2)
        rows = UpdateCursor(adj_dbf)
        for row in rows:
          # Get neighboring nodes, and the distance between them
          origin_id = row.getValue(trim(ORIGIN_ID_FIELD_NAME))
          destination_id = row.getValue(trim(DESTINATION_ID_FIELD_NAME))
          distance = float(row.getValue(distance_field))
          # Make sure the nodes are recorded in the graph
          for id in [origin_id, destination_id]:
            if not id in nodes:
              nodes[id] = Node()
          # Make sure that the nodes are neighbors in the graph
          if origin_id != destination_id and distance >= 0:
            accumulations = {}
            for field in accumulator_fields:
              accumulations[field] = float(row.getValue(field))
            nodes[origin_id].add_neighbor(destination_id, distance,
              accumulations)
            nodes[destination_id].add_neighbor(origin_id, distance,
              accumulations)
          graph_progress.step()
        N = len(nodes) # The number of nodes in the graph
        if N == 0:
          AddWarning(WARNING_NO_NODES)
          success = False
        AddMessage(STEP_2_FINISHED)
      except:
        AddWarning(GetMessages(2))
        AddMessage(STEP_2_FAILED)
        success = False

    # Step 3
    if success:
      AddMessage(STEP_3_STARTED)
      try:
        get_weights = inputs[NODE_WEIGHT_ATTRIBUTE] != "#"
        get_locations = node_locations_needed
        # Keep track of number nodes in input points not present in the graph
        point_not_in_graph_count = 0
        input_point_count = int(
            GetCount_management(inputs[INPUT_POINTS]).getOutput(0))
        node_attribute_progress = Progress_Bar(input_point_count, 1, STEP_3)
        rows = UpdateCursor(inputs[INPUT_POINTS])
        for row in rows:
          id = row.getValue(inputs[ID_ATTRIBUTE])
          if not id in nodes:
            point_not_in_graph_count += 1
            continue
          if get_weights:
            setattr(nodes[id], WEIGHT,
                row.getValue(trim(inputs[NODE_WEIGHT_ATTRIBUTE])))
          if get_locations:
            snap_x = row.getValue(trim("SnapX"))
            snap_y = row.getValue(trim("SnapY"))
            setattr(nodes[id], LOCATION, (snap_x, snap_y))
          node_attribute_progress.step()
        if point_not_in_graph_count:
          AddWarning(WARNING_POINTS_NOT_IN_GRAPH(N,
              point_not_in_graph_count))
        AddMessage(STEP_3_FINISHED)
      except:
        AddWarning(GetMessages(2))
        AddMessage(STEP_3_FAILED)
        success = False

    # Step 4
    if success:
      AddMessage(STEP_4_STARTED)
      try:
        # Compute measures
        compute_centrality(nodes, selected_features, inputs[COMPUTE_REACH],
            inputs[COMPUTE_GRAVITY], inputs[COMPUTE_BETWEENNESS],
            inputs[COMPUTE_CLOSENESS], inputs[COMPUTE_STRAIGHTNESS],
            inputs[SEARCH_RADIUS], inputs[USE_NETWORK_RADIUS], inputs[BETA],
            inputs[NORMALIZE_RESULTS], accumulator_fields)
        AddMessage(STEP_4_FINISHED)
      except:
        AddWarning(GetMessages(2))
        AddMessage(STEP_4_FAILED)
        success = False

    # Step 5
    if success:
      AddMessage(STEP_5_STARTED)
      try:
        # Make output layer
        MakeFeatureLayer_management(in_features=output_feature_class,
            out_layer=output_layer_name)
        # Save output layer
        SaveToLayerFile_management(output_layer_name, output_layer,
            "ABSOLUTE")
        # Use a test node to figure out which metrics were computed
        test_node_id = selected_features.pop()
        # Make sure the test node is in the graph
        while test_node_id not in nodes:
          test_node_id = selected_features.pop()
        test_node = nodes[test_node_id]
        measures = set([measure for measure in dir(test_node) if (measure in
            FINAL_ATTRIBUTES or is_accumulator_field(measure))])
        # Add a field in the output layer for each computed metric
        for measure in measures:
          AddField_management(in_table=output_layer, field_name=trim(measure),
              field_type="DOUBLE", field_is_nullable="NON_NULLABLE")
        # Figure out the id field to use based on the type of input buildings
        if (buildings_description.shapeType == "Polygon" and
            inputs[ID_ATTRIBUTE] == ORIGINAL_FID):
          id_field = "FID"
        else:
          id_field = inputs[ID_ATTRIBUTE]
        # Fill the layer with the metric values
        write_progress = Progress_Bar(N, 1, STEP_5)
        layer_rows = UpdateCursor(output_layer)
        for row in layer_rows:
            id = row.getValue(id_field)
            for measure in measures:
              # If no value was computed for this node id, set value to 0
              value = 0
              if id in nodes and hasattr(nodes[id], measure):
                value = getattr(nodes[id], measure)
              row.setValue(trim(measure), value)
            layer_rows.updateRow(row)
            write_progress.step()
        # Save to toolbox output
        SetParameterAsText(OUTPUT_FEATURE_CLASS, output_feature_class)
        AddMessage(STEP_5_FINISHED)
      except:
        AddWarning(GetMessages(2))
        AddMessage(STEP_5_FAILED)
        success = False

    # Step 6
    if success:
      AddMessage(STEP_6_STARTED)
      # Apply symbology
      try:
        ApplySymbologyFromLayer_management(in_layer=output_layer,
            in_symbology_layer=symbology_layer)
      except:
        AddWarning(WARNING_APPLY_SYMBOLOGY_FAILED)
        AddWarning(GetMessages(2))
        AddMessage(STEP_6_FAILED)
      # Display
      try:
        current_map_document = mapping.MapDocument("CURRENT")
        data_frame = mapping.ListDataFrames(current_map_document,
            "Layers")[0]
        add_layer = mapping.Layer(output_layer)
        mapping.AddLayer(data_frame, add_layer, "AUTO_ARRANGE")
        AddMessage(STEP_6_FINISHED)
      except:
        AddWarning(WARNING_FAIL_TO_DISPLAY)
        AddWarning(GetMessages(2))
        AddMessage(STEP_6_FAILED)

    # Clean up
    clean_up()

    AddMessage(SUCCESS if success else FAILURE)

  except ExecuteAbort:
    clean_up()
Ejemplo n.º 12
0
# Clear selection if we got a layer file
try:
  SelectLayerByAttribute_management(inputs[INPUT_BUILDINGS],
    "CLEAR_SELECTION")
except:
  pass

# Adjacency List table name
node_locations_needed = (inputs[COMPUTE_STRAIGHTNESS] or
    not inputs[USE_NETWORK_RADIUS])
adj_dbf_name = ("%s_%s_%s_%s_%s_%s.dbf" % (ADJACENCY_LIST_NAME,
    basename(inputs[INPUT_BUILDINGS]), basename(inputs[INPUT_NETWORK]),
    inputs[ID_ATTRIBUTE], inputs[IMPEDANCE_ATTRIBUTE],
    inputs[ACCUMULATOR_ATTRIBUTES])).replace("#", "None")
if len(adj_dbf_name) > MAX_FILE_NAME_LENGTH:
  AddWarning(WARNING_LARGE_ADJ_FILE_NAME)
adj_dbf = join(inputs[OUTPUT_LOCATION], adj_dbf_name)

# Output file names
output_feature_class_name = feature_class_name(inputs[OUTPUT_FILE_NAME])
output_feature_class = "%s.shp" % join(inputs[OUTPUT_LOCATION],
    output_feature_class_name)
# Create a feature class that is a copy of the input buildings
try:
  AddMessage(INPUT_BUILDINGS_COPY_STARTED)
  CreateFeatureclass_management(out_path=inputs[OUTPUT_LOCATION],
      out_name=output_feature_class_name)
  CopyFeatures_management(in_features=inputs[INPUT_BUILDINGS],
      out_feature_class=output_feature_class)
  AddMessage(INPUT_BUILDINGS_COPY_FINISHED)
except:
Ejemplo n.º 13
0
def main():
    fc = GetParameterAsText(0)
    out_table = GetParameterAsText(1)
    out_fc = GetParameterAsText(2)

    for thingy in [out_table, out_fc]:
        if Exists(thingy):
            Delete_management(thingy)

    # --------------set up reporting table for new field names-----------------
    field_name = "PRENAME"
    field_count = "NEWCOUNT"
    schema_count = "PRECOUNT"
    new_name = "NEWNAME"

    #see if the output table exists
    if not Exists(out_table):
        CreateTable_management(dirname(out_table), basename(out_table))
    else:
        DeleteRows_management(out_table)

    #see if fields already exist, if not, create them
    if not fieldExists(out_table, field_name):
        AddField_management(out_table, field_name, "TEXT", "", "", 30)

    if not fieldExists(out_table, schema_count):
        AddField_management(out_table, schema_count, "LONG")

    if not fieldExists(out_table, field_count):
        AddField_management(out_table, field_count, "SHORT")

    if not fieldExists(out_table, new_name):
        AddField_management(out_table, new_name, "TEXT", "", "", 10)

    # loop through all fields
    all_fields = ListFields(fc)

    # create name dictionary of shortened shapefile names
    name_dictionary = {}
    shortList = []  # necessary for flagging repeated field names

    for fn in all_fields:
        short_name = fn.name
        if len(fn.name) > 10:
            short_name = fn.name[0:10]

        # make sure the shortened field name doesn't already exists
        if short_name not in shortList:
            shortList.append(short_name)
            name_dictionary[fn.name] = short_name
        else:
            i = 0
            while short_name in shortList and i < 100:
                short_name = short_name[0:7] + "_" + str(i)
                i += 1
            name_dictionary[fn.name] = short_name
            shortList.append(short_name)

    # -----next step, create new feature class & add all fields----------------
    # -----for text fields, make the length the proper length------------------

    desc = Describe(fc)
    geom_type = desc.shapeType
    SR = desc.spatialReference

    # create new feature class
    CreateFeatureclass_management(dirname(out_fc), basename(out_fc), geom_type,
                                  "", "", "", SR)

    # create list to hold the names of number fields (used later)
    numFields = []
    dateFields = []

    # get the name of the OID field while looping
    oid = ""

    # loop through string fields
    for f in all_fields:
        short_name = name_dictionary[f.name]
        data_type = f.type.upper()

        # check to see if the data type is "normal"
        if data_type in [
                "TEXT", "FLOAT", "DOUBLE", "SHORT", "LONG", "DATE", "BLOB",
                "RASTER", "GUID", "STRING", "INTEGER", "SMALLINTEGER"
        ]:

            # special track for string fields
            if data_type in ["STRING", "TEXT"]:

                # set counter at 0
                i = 0

                # set up search cursor on feature class just on that field
                with SearchCursor(fc, (f.name)) as rows:
                    for row in rows:
                        if row[0] is not None:
                            # loop through values to get the longest length
                            if len(row[0]) > i:
                                i = len(row[0])

                # make sure i isn't bigger than 254
                if i > 254:
                    i = 254

                # at this point, i equals the length of the longest field entry

                # insert the field name and the length into the output table
                cursor = InsertCursor(
                    out_table,
                    (field_name, field_count, schema_count, new_name))
                new_row = (f.name, i, f.length, short_name)
                cursor.insertRow(new_row)

                del row, rows, cursor, new_row

                # add a row to the new feature class
                AddField_management(out_fc, short_name, "TEXT", "", "", i)

            # track for numbers, GUIDs & dates
            else:
                AddField_management(out_fc, short_name, data_type)

                # if it's a number, record the field name in the num field list
                if data_type in [
                        "SHORT", "LONG", "INTEGER", "FLOAT", "DOUBLE"
                ]:
                    numFields.append(f.name)
                elif data_type in ["DATE"]:
                    dateFields.append(f.name)

                #make sure all fields are in the translation table
                cursor = InsertCursor(out_table, (field_name, new_name))
                new_row = (f.name, short_name)
                cursor.insertRow(new_row)
                del cursor, new_row

        elif data_type == "OID":
            AddField_management(out_fc, "LinkOID", "INTEGER")
            name_dictionary[f.name] = "LinkOID"  # add for field mapping
            oid = f.name

            # add link field for object ID to the mapping table
            cursor = InsertCursor(out_table, (field_name, new_name))
            new_row = (f.name, "LinkOID")
            cursor.insertRow(new_row)
            del cursor, new_row
        elif data_type == "GEOMETRY":
            pass
        else:
            print("Field " + f.name + " is type " + f.type +
                  ". It will not be copied over.")
            AddWarning("Field " + f.name + " is type " + f.type +
                       ". It will not be copied over.")
            del name_dictionary[f.name]

    # -----copy data into the new FC-------------------------------------------

    # set up field lists for search & insert cursors
    oldFields, newFields = [], []

    for field in name_dictionary.keys():
        oldFields.append(field)
        newFields.append(name_dictionary[field])

    # set up a text only version of the fields
    oldFieldsTextOnly = tuple(oldFields)
    newFieldsTextOnly = tuple(newFields)

    # add SHAPE to the original set of fields
    oldFields.append("SHAPE@")
    newFields.append("SHAPE@")

    # convert the new field list to a tuple, safety first
    newFields = tuple(newFields)  # this is the one with the shape field

    # create a list of the indexes of number & date fields
    numFieldsIndexList, dateFieldsIndexList = [], []
    for numF in numFields:
        numFieldsIndexList.append(oldFields.index(numF))
    for dateF in dateFields:
        dateFieldsIndexList.append(oldFields.index(dateF))

    # ran into an issue with invalid geometry, so here's the workaround
    invalidDict = {"point": 1, "polyline": 2, "polygon": 3}

    # set up reporting for records that didn't copy
    didNotCopy = []

    # fill new rows with old rows
    with SearchCursor(fc, oldFields) as rows:
        for row in rows:
            geomIndex = oldFields.index("SHAPE@")
            geom = row[geomIndex]
            objectID = str(row[oldFields.index(oid)])
            try:

                try:
                    # find the minimum number of required points
                    minNum = invalidDict[geom_type.lower()]

                    # get the count of points in the geometry
                    count = geom.pointCount

                    # if the count is smaller than the minimum number, there's a problem
                    if count < minNum:
                        wc = oid + " = " + objectID
                        # here, we won't copy the geometry, only the fields
                        userMessage("count smaller than min")
                        with SearchCursor(fc, oldFieldsTextOnly, wc) as rows2:
                            for row2 in rows2:
                                makeRow(out_fc, newFieldsTextOnly, row2,
                                        numFieldsIndexList,
                                        dateFieldsIndexList)
                        del row2, rows2, wc

                    else:
                        # excellent, the record is normal & will copy
                        makeRow(out_fc, newFields, row, numFieldsIndexList,
                                dateFieldsIndexList)

                except Exception as e:
                    userMessage(str(e))
                    # if we're in this area, it means the record has no geometry
                    wc = oid + " = " + objectID
                    with SearchCursor(fc, oldFieldsTextOnly, wc) as rows2:
                        for row2 in rows2:
                            makeRow(out_fc, newFieldsTextOnly, row2,
                                    numFieldsIndexList, dateFieldsIndexList)
                    del row2, rows2, wc

            except Exception as e:
                userMessage(str(e))
                # for whatever reason, the record did not copy
                userMessage("Error copying record ObjectID " + objectID)
                didNotCopy.append(objectID)

    if didNotCopy != []:
        userMessage("These records did not copy- %s %s" %
                    (oid, ", ".join(didNotCopy)))

    userMessage("Skinny shapefile complete.")
Ejemplo n.º 14
0
def main():
    # tool inputs
    INPUT_NETWORK = argv[1]
    INPUT_POINTS = argv[2]
    INPUT_ORIGINS_FIELD = argv[3]
    INPUT_DESTINATIONS_FIELD = argv[4]
    INPUT_COEFF = float(argv[5])
    INPUT_SEARCH_RADIUS = float(argv[6]) if is_number(
        argv[6]) else float('inf')
    INPUT_OUTPUT_DIRECTORY = argv[7]
    INPUT_OUTPUT_FEATURE_CLASS_NAME = argv[8]
    INPUT_COMPUTE_WAYFINDING = argv[9] == "true"
    INPUT_VISUALIZATION = argv[10]

    # check that network has "Length" attribute
    if "Length" not in network_cost_attributes(INPUT_NETWORK):
        AddError("Network <%s> does not have Length attribute" % INPUT_NETWORK)
        return

    # check that coeff is at least 1
    if INPUT_COEFF < 1:
        AddError("Redundancy coefficient <%s> must be at least 1" %
                 INPUT_COEFF)
        return

    # extract origin and destination ids
    origin_ids = flagged_points(INPUT_POINTS, INPUT_ORIGINS_FIELD)
    if len(origin_ids) != 1:
        AddError("Number of origins <%s> must be 1" % len(origin_ids))
        return
    origin_id = origin_ids[0]
    destination_ids = flagged_points(INPUT_POINTS, INPUT_DESTINATIONS_FIELD)
    if len(destination_ids) == 0 or origin_ids == destination_ids:
        AddWarning("No OD pair found, no computation will be done")
        return

    # check that the output file does not already exist
    output_feature_class = "%s.shp" % join(INPUT_OUTPUT_DIRECTORY,
                                           INPUT_OUTPUT_FEATURE_CLASS_NAME)
    if Exists(output_feature_class):
        AddError("Output feature class <%s> already exists" %
                 output_feature_class)
        return

    # obtain visualization method
    visualize_segments = visualize_polylines = False
    if INPUT_VISUALIZATION == "Unique Segments":
        visualize_segments = True
    elif INPUT_VISUALIZATION == "Path Polylines":
        visualize_polylines = True
    elif INPUT_VISUALIZATION != "None":
        AddError("Visualization method <%s> must be one of 'Unique Segments', "
                 "'Path Polylines', or 'None'" % INPUT_VISUALIZATION)
        return

    # setup
    env.overwriteOutput = True

    # construct network and points
    network, points, edge_to_points = construct_network_and_load_buildings(
        INPUT_POINTS, INPUT_NETWORK)

    # find redundant paths for each origin-destination
    AddMessage("Computing redundant paths ...")
    progress_bar = Progress_Bar(len(destination_ids), 1, "Finding paths ...")
    # build output table one row at a time, starting from header row
    answers = [["OrigID", "DestID", "NumPaths", "Redundancy"]]
    if INPUT_COMPUTE_WAYFINDING:
        answers[0].append("Wayfinding")
    # visualization state
    if visualize_polylines:
        polylines = []
        polyline_data = []
    elif visualize_segments:
        all_unique_segment_counts = defaultdict(int)
    for destination_id in destination_ids:
        if origin_id != destination_id:
            all_paths = find_all_paths(network, points, INPUT_COEFF, origin_id,
                                       destination_id, INPUT_SEARCH_RADIUS,
                                       INPUT_COMPUTE_WAYFINDING)
            if all_paths is not None:
                if INPUT_COMPUTE_WAYFINDING:
                    (all_path_points, unique_segment_counts, num_paths,
                     redundancy, waypoint) = all_paths
                    answers.append([
                        origin_id, destination_id, num_paths, redundancy,
                        waypoint
                    ])
                else:
                    (all_path_points, unique_segment_counts, num_paths,
                     redundancy) = all_paths
                    answers.append(
                        [origin_id, destination_id, num_paths, redundancy])
                if visualize_polylines:
                    for i, path_points in enumerate(all_path_points):
                        polylines.append(
                            Polyline(
                                Array([
                                    Point(*coords) for coords in path_points
                                ])))
                        polyline_data.append((origin_id, destination_id, i))
                elif visualize_segments:
                    for edge_id in unique_segment_counts:
                        all_unique_segment_counts[
                            edge_id] += unique_segment_counts[edge_id]
        progress_bar.step()
    AddMessage("\tDone.")

    # write out results
    if len(answers) > 1:
        AddMessage("Writing out results ...")
        # write out to a table
        write_rows_to_csv(answers, INPUT_OUTPUT_DIRECTORY,
                          INPUT_OUTPUT_FEATURE_CLASS_NAME)
        # visualize
        if visualize_polylines:
            CopyFeatures_management(polylines, output_feature_class)
            data_fields = ["OrigID", "DestID", "PathID"]
            for field in data_fields:
                AddField_management(in_table=output_feature_class,
                                    field_name=field,
                                    field_type="INTEGER")
            rows = UpdateCursor(output_feature_class, data_fields)
            for j, row in enumerate(rows):
                row[0], row[1], row[2] = polyline_data[j]
                rows.updateRow(row)
            # create a layer of the polylines shapefile and symbolize
            polylines_layer_name = "%s_layer" % INPUT_OUTPUT_FEATURE_CLASS_NAME
            polylines_layer = "%s.lyr" % join(INPUT_OUTPUT_DIRECTORY,
                                              INPUT_OUTPUT_FEATURE_CLASS_NAME)
            MakeFeatureLayer_management(output_feature_class,
                                        polylines_layer_name)
            SaveToLayerFile_management(polylines_layer_name, polylines_layer,
                                       "ABSOLUTE")
            ApplySymbologyFromLayer_management(
                polylines_layer,
                join(path[0],
                     "Symbology_Layers\sample_polylines_symbology.lyr"))
            add_layer_to_display(polylines_layer)
        elif visualize_segments:
            id_mapping, edges_file = select_edges_from_network(
                INPUT_NETWORK, all_unique_segment_counts.keys(),
                INPUT_OUTPUT_DIRECTORY,
                "%s_edges" % INPUT_OUTPUT_FEATURE_CLASS_NAME)
            AddField_management(in_table=edges_file,
                                field_name="PathCount",
                                field_type="INTEGER")
            rows = UpdateCursor(edges_file, ["OID@", "PathCount"])
            for row in rows:
                row[1] = all_unique_segment_counts[id_mapping[row[0]]]
                rows.updateRow(row)
        AddMessage("\tDone.")
    else:
        AddMessage("No results to write out.")
# Convert values from text to float
floLatitude = float(strLatitude)
floLongitude = float(strLongitude)

# Make a Point Geometry object.
try:
    ptPointOfInterest = Point(X=floLongitude,
                              Y=floLatitude,
                              Z=None,
                              M=None,
                              ID=0)
    spatial_ref = SpatialReference(4269)
    ptGeometry = PointGeometry(ptPointOfInterest, spatial_ref)
except:
    strErrorMsg = "Error creating Point or PointGeometry objects."
    AddWarning(strErrorMsg)
    SetParameterAsText(6, strErrorMsg)
    sys.exit()

# Open Output File for use
try:
    fhand = open(strFilePath, 'w')
except:
    strErrorMsg = "File did not open"
    AddWarning(strErrorMsg)
    SetParameterAsText(6, strErrorMsg)
    sys.exit()

# Make a dictionary of layer name and layer variable for iteration purposes
dictLayers = {"zip": flZCTA, "lepc": flLEPC, "county": flCounty}
dictNames = {}
Ejemplo n.º 16
0
def compute_centrality(nodes, origins, compute_r, compute_g, compute_b,
    compute_c, compute_s, radius, network_radius, beta, measures_to_normalize,
    accumulator_fields):
  """
  Computes reach, gravity, betweenness, closeness, and straightness on a graph.
  |nodes|: graph representation; dictionary mapping node id's to |Node| objects
  |origins|: subset of nodes that will be used as sources of shortest path trees
  |compute_r|: compute reach?
  |compute_g|: compute gravity type index?
  |compute_b|: compute betweenness?
  |compute_c|: compute closeness?
  |compute_s|: compute straightness?
  |radius|: for each node, only consider other nodes that can be reached within
      this distance
  |network_radius|: use network radius or birds-eye radius?
  |beta|: parameter for gravity type index
  |measures_to_normalize|: a list of measures to normalize
  |accumulator_fields|: a list of cost attributes to accumulate
  """

  # Number of nodes in the graph
  N = len(nodes)
  O = len(origins)
  if O > N:
    raise Invalid_Parameters_Exception("size of origins exceeds size of nodes")
  elif O == 0:
    return

  # Preprocessing
  have_accumulations = len(accumulator_fields) > 0
  if have_accumulations:
    empty_accumulations = lambda: dict((field, 0.0) for field in
        accumulator_fields)
  have_locations = hasattr(nodes.values()[0], LOCATION)
  if compute_s and not have_locations:
    # We cannot compute straightness without node locations
    compute_s = False
  if compute_b:
    # Initialize betweenness values
    for id in nodes:
      setattr(nodes[id], BETWEENNESS, 0.0)

  # Initialize the sum of all node weights (normalization)
  sum_weights = 0.0

  # Computation
  progress = Progress_Bar(O, 1, STEP_4)
  for s in origins:
    if s not in nodes:
      continue
    weight_s = getattr(nodes[s], WEIGHT)
    if have_locations: location_s = getattr(nodes[s], LOCATION)

    sum_weights += weight_s

    # Initialize reach (weighted and unweighted) computation for |s|
    #     (normalization)
    reach_s = -1
    weighted_reach_s = -weight_s

    # Initialize measures
    if compute_g: gravity_s = 0.0
    if compute_b:
      P = {s: []} # Predecessors
      S = [] # Stack containing nodes in the order they are extended
      sigma = {s: 1.0} # Number of shortest paths from |s| to other nodes
      delta = {} # Dependency of |s| on other nodes
    if compute_c: d_sum_s = 0.0
    if compute_s: straightness_s = 0.0
    if have_accumulations:
      accumulations_s = {s: empty_accumulations()}

    d = {s: 0.0} # Shortest distance from |s| to other nodes
    # Queue for Dijkstra
    Q = [(0.0, s)] if network_radius else [(0.0, s, 0.0)]

    # If we use euclidean radius, make a list of all reachable nodes
    if not network_radius:
      reachable_s = set()
      for t in nodes:
        location_t = getattr(nodes[t], LOCATION)
        if dist(location_s, location_t) <= radius:
          reachable_s.add(t)

    # Dijkstra
    while Q and (True if network_radius else reachable_s):
      # Pop the closest node to |s| from |Q|
      if network_radius:
        d_sv, v = heappop(Q)
      else:
        d_sv, v, dist_sv = heappop(Q)
        if v in reachable_s:
          reachable_s.remove(v)
      weight_v = getattr(nodes[v], WEIGHT)
      if have_locations: location_v = getattr(nodes[v], LOCATION)

      compute = network_radius or dist_sv <= radius
      if compute:
        reach_s += 1
        weighted_reach_s += weight_v
        if d_sv > 0:
          if compute_g: gravity_s += weight_v * exp(-d_sv * beta)
          if compute_c: d_sum_s += weight_v * d_sv
          if compute_s: straightness_s += (weight_v *
              dist(location_s, location_v) / d_sv)
        if compute_b: S.append(v)

      for w, d_vw, accumulations_vw in getattr(nodes[v], NEIGHBORS):
        # s ~ ... ~ v ~ w
        d_sw = d_sv + d_vw
        if not network_radius:
            # Use Euclidean distance
            location_w = getattr(nodes[w], LOCATION)
            dist_sw = dist(location_s, location_w)

        if compute_b: b_refresh = False

        add_w_to_Q = False

        if not w in d: # Found a path from |s| to |w| for the first time
          if d_sw <= radius or not network_radius:
            add_w_to_Q = True
          d[w] = d_sw
          if compute_b: b_refresh = True

        elif lt_tol(d_sw, d[w]): # Found a better path from |s| to |w|
          if d_sw <= radius or not network_radius:
            if d[w] <= radius or not network_radius:
              longer_path_node = (d[w], w) if network_radius else (d[w], w,
                  dist_sw)
              Q.remove(longer_path_node)
              heapify(Q)
            add_w_to_Q = True
          d[w] = d_sw
          if compute_b: b_refresh = True

        if add_w_to_Q:
          new_node = (d_sw, w) if network_radius else (d_sw, w, dist_sw)
          heappush(Q, new_node)
          if have_accumulations:
            accumulations_s[w] = merge_maps(accumulations_s[v],
                dict(accumulations_vw), add)

        if compute_b:
          if b_refresh:
            sigma[w] = 0.0
            P[w] = []
          if eq_tol(d_sw, d[w]): # Count all shortest paths from |s| to |w|
            sigma[w] += sigma[v] # Update the number of shortest paths
            P[w].append(v) # |v| is a predecessor of |w|
            delta[v] = 0.0 # Recognize |v| as a predecessor

    if compute_r: setattr(nodes[s], REACH, weighted_reach_s)
    if compute_g: setattr(nodes[s], GRAVITY, gravity_s)
    if compute_b:
      while S: # Revisit nodes in reverse order of distance from |s|
        w = S.pop()
        delta_w = delta[w] if w in delta else 0.0 # Dependency of |s| on |w|
        for v in P[w]:
          weight_w = getattr(nodes[w], WEIGHT)
          delta[v] += sigma[v] / sigma[w] * (weight_w + delta_w)
        if w != s:
          between_w = getattr(nodes[w], BETWEENNESS)
          setattr(nodes[w], BETWEENNESS, between_w + delta_w)
    if compute_c: setattr(nodes[s], CLOSENESS, (1.0 / d_sum_s if d_sum_s > 0
        else 0.0))
    if compute_s: setattr(nodes[s], STRAIGHTNESS, straightness_s)

    nodes[s].reach = reach_s
    nodes[s].weighted_reach = weighted_reach_s

    if have_accumulations:
      total_accumulations_s = empty_accumulations()
      for v in accumulations_s:
        total_accumulations_s = merge_maps(total_accumulations_s,
            accumulations_s[v], add)
      for field in accumulator_fields:
        setattr(nodes[s], field, total_accumulations_s[field])

    progress.step()

  # Normalization
  if BETWEENNESS in measures_to_normalize and O < N:
      measures_to_normalize.remove(BETWEENNESS)
      AddWarning(WARNING_NO_BETWEENNESS_NORMALIZATION)
  if measures_to_normalize:
    norm_progress = Progress_Bar(O, 1, PROGRESS_NORMALIZATION)
    for s in origins:
      if s not in nodes:
        continue
      reach_s = nodes[s].reach
      weighted_reach_s = nodes[s].weighted_reach

      # Normalize reach
      if compute_r and REACH in measures_to_normalize:
        weight_s = getattr(nodes[s], WEIGHT)
        try: setattr(nodes[s], NORM_REACH, reach_s / (sum_weights - weight_s))
        except: setattr(nodes[s], NORM_REACH, 0.0)

      # Normalize gravity
      if compute_g and GRAVITY in measures_to_normalize:
        gravity_s = getattr(nodes[s], GRAVITY)
        try: setattr(nodes[s], NORM_GRAVITY, (exp(beta) * gravity_s /
            weighted_reach_s))
        except: setattr(nodes[s], NORM_GRAVITY, 0.0)

      # Normalize betweenness
      if compute_b and BETWEENNESS in measures_to_normalize:
        betweenness_s = getattr(nodes[s], BETWEENNESS)
        try: setattr(nodes[s], NORM_BETWEENNESS, (betweenness_s /
            (weighted_reach_s * (reach_s - 1))))
        except: setattr(nodes[s], NORM_BETWEENNESS, 0.0)

      # Normalize closeness
      if compute_c and CLOSENESS in measures_to_normalize:
        closeness_s = getattr(nodes[s], CLOSENESS)
        try: setattr(nodes[s], NORM_CLOSENESS, closeness_s * weighted_reach_s)
        except: setattr(nodes[s], NORM_CLOSENESS, 0.0)

      # Normalize straightness
      if compute_s and STRAIGHTNESS in measures_to_normalize:
        straightness_s = getattr(nodes[s], STRAIGHTNESS)
        try: setattr(nodes[s], NORM_STRAIGHTNESS, (straightness_s /
            weighted_reach_s))
        except: setattr(nodes[s], NORM_STRAIGHTNESS, 0.0)

      norm_progress.step()
Ejemplo n.º 17
0
    def execute(self, parameters, messages):
        """Runs the script"""

        # Get the user's input
        fc = parameters[0].valueAsText
        field_mappings = parameters[1].valueAsText
        fields = parameters[1].valueAsText.split(';')
        fields.append('SHAPE@XY')
        output_dir = parameters[2].valueAsText
        output_name = parameters[3].valueAsText
        convert_to_wgs84 = self.toBool(parameters[4].valueAsText)
        convert_to_geojson = self.toBool(parameters[5].valueAsText)
        convert_to_kmz = self.toBool(parameters[6].valueAsText)
        convert_to_csv = self.toBool(parameters[7].valueAsText)
        convert_metadata = self.toBool(parameters[8].valueAsText)
        debug = self.toBool(parameters[9].valueAsText)

        # Setup vars
        output_path = output_dir + '\\' + output_name
        shp_output_path = output_dir + '\\shapefile'
        shp_temp_output_path = output_dir + '\\shapefile\\temp\\'
        shapefile = shp_output_path + '\\' + output_name + '.shp'
        temp_shapefile = shp_output_path + '\\temp\\' + output_name + '.shp'

        if debug:
            AddMessage('Field infos:')
            AddMessage(field_mappings)

        try:
            arcpy.Delete_management('temp_layer')
        except:
            if debug:
                AddMessage('Did not have a temp_layer feature ' +
                           'class to delete')

        if not os.path.exists(shp_output_path):
            os.makedirs(shp_output_path)
            if debug:
                AddMessage('Created directory ' + shp_output_path)

        if not os.path.exists(shp_temp_output_path):
            os.makedirs(shp_temp_output_path)
        else:
            for file in os.listdir(shp_temp_output_path):
                file_path = os.path.join(shp_temp_output_path, file)
                try:
                    if os.path.isfile(file_path):
                        os.unlink(file_path)
                except:
                    AddWarning('Unable to delete ' + file +
                               'from the temp folder. This ' +
                               'may become a problem later')
                    pass

        arcpy.MakeFeatureLayer_management(fc, 'temp_layer', '', '',
                                          field_mappings)
        arcpy.CopyFeatures_management('temp_layer', temp_shapefile)

        if convert_to_wgs84:
            AddMessage('Converting spatial reference to WGS84...')
            arcpy.Project_management(
                temp_shapefile, shapefile,
                "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433],METADATA['World',-180.0,-90.0,180.0,90.0,0.0,0.0174532925199433,0.0,1262]]",
                "WGS_1984_(ITRF00)_To_NAD_1983",
                "PROJCS['NAD_1983_StatePlane_Pennsylvania_South_FIPS_3702_Feet',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',1968500.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-77.75],PARAMETER['Standard_Parallel_1',39.93333333333333],PARAMETER['Standard_Parallel_2',40.96666666666667],PARAMETER['Latitude_Of_Origin',39.33333333333334],UNIT['Foot_US',0.3048006096012192]]"
            )
            AddMessage('Projection conversion completed.')
        else:
            AddMessage('Exporting shapefile already in WGS84...')
            arcpy.FeatureClassToShapefile_conversion(temp_shapefile,
                                                     shp_output_path)

        try:
            arcpy.Delete_management('temp_layer')
        except:
            AddError('Unable to delete in_memory feature class')

        AddMessage('Compressing the shapefile to a .zip file...')

        export = Export(output_dir, output_name, debug)

        zip = export.zip()
        if zip:
            AddMessage('Finished creating ZIP archive')

        if convert_to_geojson:
            AddMessage('Converting to GeoJSON...')
            output = output_path + '.geojson'
            geojson = esri2open.toOpen(shapefile,
                                       output,
                                       includeGeometry='geojson')
            if geojson:
                AddMessage('Finished converting to GeoJSON')

        if convert_to_kmz:
            AddMessage('Converting to KML...')
            kmz = export.kmz()
            if kmz:
                AddMessage('Finished converting to KMZ')

        if convert_to_csv:
            AddMessage('Converting to CSV...')
            csv = export.csv()
            if csv:
                AddMessage('Finished converting to CSV')

        if convert_metadata:
            AddMessage('Converting metadata to Markdown ' +
                       'README.md file...')
            md = export.md()
            if md:
                AddMessage('Finished converting metadata to ' +
                           'Markdown README.md file')

        # Delete the /temp directory because we're done with it
        shutil.rmtree(shp_output_path + '\\temp')
        if (debug):
            AddMessage('Deleted the /temp folder because we don\'t' +
                       ' need it anymore')

        return