def find_mean_center_of_movement(gdb, in_fc, team_dict):
    print('Creating mean center of movement for entire game')
    layer = 'in_memory\\mean_center_team_lyr'
    #query = '"TEAM_ID" = ' + str(team)
    arcpy.MakeFeatureLayer_management(in_fc, layer, "#", "#", "#")
    arcpy.MeanCenter_stats(layer, os.path.join(gdb, 'MeanCenterOfAction'), '#',
                           '#', '#')
예제 #2
0
def f1():

    ##########
    arcpy.env.workspace = ""
    rasters = arcpy.ListRasters()
    mask=""
    for raster in rasters:
        out = ExtractByMask(raster, mask)   #"按掩膜提取"
        out.save("xxx/_34.tif")

        arcpy.Resample_management(raster, out, "xres yres", "BILINEAR")  # "NEAREST ","BILINEAR","CUBIC","MAJORITY" #"重采样"

        arcpy.ExtractSubDataset_management("xxx.hdf", "outfile.tif", "2")   #"提取子数据集,第三个参数是选择提取第几个子数据集(波段)"

        layer=""
        arcpy.MakeNetCDFRasterLayer_md(raster, "precipitation", "lon", "lat", layer)      # "nc制作图层"
        arcpy.CopyRaster_management(layer, out, format="TIFF")  # "图层保存为栅格"

        ExtractValuesToPoints(mask, raster,out, "INTERPOLATE","VALUE_ONLY") # "值提取到点"/"NONE","INTERPOLATE"/"VALUE_ONLY","ALL"

        out= SetNull(raster, raster, "Value=-3000") # "将满足条件的像元值设为Nodata"

        out=CellStatistics(rasters, out, "SUM", "NODATA")   # "像元统计" "MEAN/MAJORITY/MAXIMUM/MEDIAN/MINIMUM/MINORITY/RANGE/STD/SUM/VARIETY "    "NODATA"/"DATA"忽略nodata像元
        out.save("xxx.img")

        arcpy.Delete_management(raster) # "删除文件"

        rasters = arcpy.ListRasters()   # "数据的重命名"
        for raster in rasters:
            raster.save("xxx.tif")

        arcpy.TableToExcel_conversion(mask, "xxx.xls")# "表转Excel"

        arcpy.DirectionalDistribution_stats(raster, out, "1_STANDARD_DEVIATION", "xxx", "#")# "标准差椭圆"
        arcpy.MeanCenter_stats(raster, out, "xxx", "#", "#")    # "中心"
예제 #3
0
def calcVS(unsnfltpts, bldg_veg_mask, ct, datapath):
    """
    Calculates the cumulative viewshed from remaining unseen flight points. Generates Euclidean distance
    to mean center table.
    :param unsnfltpts: List of remaining unseen flight points
    :param bldg_veg_mask: Binary mask to remove invalid observer surfaces from cumulative VS raster
    :param ct: Pass number
    :param datapath:  Path to input/output directory
    """

    print('Calculating cumulative viewshed for {} unseen flight points...'.
          format(len(unsnfltpts)))
    # Make chunks of flight points
    usfp_chunks = makechunks(unsnfltpts, 500)
    print('Flight point chunks generated')
    print(len(chunk) for chunk in usfp_chunks)
    chunk_sums = []
    chunkpass = 1
    # Sum each chunk of single viewshed rasters
    for chunk in usfp_chunks:
        print('Chunksum operation {} on {} flight points...'.format(
            chunkpass, len(chunk)))
        # Set null values equal to 0 to avoid NoData holes
        chunkgen = (Con(IsNull(arcpy.Raster(datapath + "vs_" + str(usfp))), 0,
                        1) for usfp in chunk)
        chunkstats = arcpy.sa.CellStatistics(chunkgen, 'SUM', 'NODATA')
        chunk_sums.append((chunkstats))
        print('...Done.')
        chunkpass += 1
    # Sum chunks
    sumrast = arcpy.sa.CellStatistics(chunk_sums, 'SUM', 'NODATA')
    sumrast.save(datapath + "vs_pass_" + str(ct) + "_unmasked")
    print('Unmasked cumulative viewshed saved.')

    # mask out buildings and vegetation
    # set Bldg_Veg_Mask cells to 0
    unmasked = arcpy.Raster(datapath + "vs_pass_" + str(ct) + "_unmasked")
    cumulative_masked = unmasked * bldg_veg_mask
    print('Invalid observer surfaces masked.')
    # set 0 value cells to Null
    cumulative_masked = SetNull(cumulative_masked == 0, cumulative_masked)
    print('Setting null values.')
    # save to .GDB as cumulative raster
    cumulative_masked.save(datapath + "vs_pass_" + str(ct))
    print('Masked cumulative viewshed saved.')

    # Convert raster to points with number views for VS pass and X Y location
    vs_total_pts_ = datapath + "vs_pass_" + str(ct) + "_pts"
    arcpy.RasterToPoint_conversion(cumulative_masked, vs_total_pts_)
    arcpy.AddGeometryAttributes_management(vs_total_pts_, ['POINT_X_Y_Z_M'])
    print('Viewshed points for pass {} generated'.format(ct))
    # Find mean center of cumulative viewshed for pass, save as feature class
    vs_center_ = datapath + "vs_pass_" + str(ct) + "_cntr"
    arcpy.MeanCenter_stats(vs_total_pts_, vs_center_)
    print('Mean center calculated.')
    # Calculate distance of each observation from centroid of observer masspoints
    vs_dist_ = datapath + "vs_pass_" + str(ct) + "_dist"
    arcpy.PointDistance_analysis(vs_total_pts_, vs_center_, vs_dist_)
    print('Observer distances table calculated.')
def find_mean_center_of_movement_per_quarter(gdb, in_fc, team_dict, quarters):
    for quarter in quarters:
        print('Creating mean center of movement for quarter ' + str(quarter))
        layer = 'in_memory\\mean_center_team_lyr_' + str(quarter)
        query = '"QUARTER" = ' + str(quarter)
        arcpy.MakeFeatureLayer_management(in_fc, layer, query, "#", "#")
        arcpy.MeanCenter_stats(
            layer,
            os.path.join(gdb, 'MeanCenterOfAction_Quarter_' + str(quarter)),
            '#', '#', '#')
예제 #5
0
def aa():
    #标准差椭圆
    input = "F:/MSA/RAIN/IRG.shp"
    for i in range(14, 15):
        for j in range(4, 13):
            output = "F:/MSA/OUT/" + "IME" + str((2000 + i) * 100 + j) + ".shp"
            arcpy.DirectionalDistribution_stats(
                input, output, "1_STANDARD_DEVIATION",
                "M" + str((2000 + i) * 100 + j), "#")
    #求重心
    input = "F:/MSA/RAIN/IRG.shp"
    for i in range(15, 15):
        for j in range(4, 5):
            output = "F:/MSA/RAIN/CENTERS/" + "RSE" + str(2000 +
                                                          i) + str(j) + ".shp"
            arcpy.MeanCenter_stats(input, output, "S" + str(2000 + i) + str(j),
                                   "#", "#")
예제 #6
0
def f6():
    arcpy.env.workspace = r"F:/SA/RAIN/RAIN3/ELLIPSE/"
    I = 'I'
    R = 'R'
    RGS = 'RGS'
    arcpy.MakeTableView_management("F:/SA/RAIN/RAIN3/ELLIPSE/IMERG.dbf", I)
    arcpy.MakeTableView_management("F:/SA/RAIN/RAIN3/ELLIPSE/RGS.dbf", R)
    arcpy.MakeFeatureLayer_management("F:/SA/RAIN/RAIN3/ELLIPSE/RGSnew.shp",
                                      RGS)
    arcpy.AddJoin_management(RGS, "FID", I, "OID")
    arcpy.AddJoin_management(RGS, "FID", R, "OID")
    # for i in range(1,13):
    # out="F:/SA/RAIN/RAIN3/ELLIPSE/"+'I'+'CES'+str(i)+'.shp'
    # arcpy.DirectionalDistribution_stats(RGS, out, "1_STANDARD_DEVIATION", "IMERG.S"+str(i), "#")
    # out = "F:/SA/RAIN/RAIN3/ELLIPSE/" + 'I' + 'CES' + str(i) + '.shp'
    # arcpy.MeanCenter_stats(RGS, out, "IMERG.S"+str(i), "#", "#")

    for i in range(1, 13):
        # out = "F:/SA/RAIN/RAIN3/ELLIPSE/" + 'T' + 'ES' + str(i) + '.shp'
        # arcpy.DirectionalDistribution_stats(RGS, out, "1_STANDARD_DEVIATION","RGS.S" + str(i), "#")
        out = "F:/SA/RAIN/RAIN3/ELLIPSE/" + 'T' + 'CES' + str(i) + '.shp'
        arcpy.MeanCenter_stats(RGS, out, "RGS.S" + str(i), "#", "#")
def temporal_mean_center(inFeatureClass, outFeatureClass, start_time, end_time,
                         time_interval, bin_start, weight_field, case_field,
                         dimension_field):
    """ This tool will split a feature class into multiple kernel densities based on a datetime field and a
    a set time interval. The result will be a time enabled moasic with Footprint. """
    try:
        outWorkSpace = os.path.dirname(outFeatureClass)
        if arcpy.Exists(outWorkSpace):
            arcpy.env.workspace = outWorkSpace
            arcpy.env.overwriteOutput = True
            san.arc_print(
                "The current work space is: {0}.".format(outWorkSpace), True)
            # Set up Work Space Environments
            out_workspace_path_split = os.path.split(outWorkSpace)
            workSpaceTail = out_workspace_path_split[1]
            inFeatureClassTail = os.path.split(inFeatureClass)[1]
            san.arc_print(
                "Gathering describe object information from workspace and input feature class."
            )
            ws_desc = arcpy.Describe(outWorkSpace)
            workspace_is_geodatabase = ws_desc.dataType == "Workspace" or ws_desc.dataType == "FeatureDataset"
            fin_output_workspace = outWorkSpace

            # Set up Time Deltas and Parse Time String
            san.arc_print(
                "Constructing Time Delta from input time period string.", True)
            time_magnitude, time_unit = san.alphanumeric_split(
                str(time_interval))
            time_delta = san.parse_time_units_to_dt(time_magnitude, time_unit)
            san.arc_print(
                "Using datetime fields to generate new feature classes in {0}."
                .format(str(workSpaceTail)))
            san.arc_print(
                "Getting start and final times in start time field {0}.".
                format(start_time))
            start_time_min, start_time_max = san.get_min_max_from_field(
                inFeatureClass, start_time)
            # Establish whether to use end time field or only a start time (Single Date Field)
            if san.field_exist(inFeatureClass, end_time) and end_time:
                san.arc_print(
                    "Using start and end time to grab feature classes whose bins occur within an events "
                    "start or end time.")
                end_time_min, end_time_max = san.get_min_max_from_field(
                    inFeatureClass, end_time)
                start_time_field = start_time
                end_time_field = end_time
                start_time_range = start_time_min
                end_time_range = end_time_max
            else:
                san.arc_print(
                    "Using only first datetime start field to construct time bin ranges."
                )
                start_time_field = start_time
                end_time_field = start_time
                start_time_range = start_time_min
                end_time_range = start_time_max
            if isinstance(bin_start, datetime.datetime) or isinstance(
                    bin_start, datetime.date):
                start_time_range = bin_start
                san.arc_print(
                    "Bin Start Time was selected, using {0} as bin starting time period."
                    .format(str(bin_start_time)))
            time_bins = san.construct_time_bin_ranges(start_time_range,
                                                      end_time_range,
                                                      time_delta)
            san.arc_print("Constructing queries based on datetime ranges.")
            temporal_queries = san.construct_sql_queries_from_time_bin(
                time_bins, inFeatureClass, start_time_field, end_time_field)
            # Declare New Temporal Field Names
            join_id_field = arcpy.ValidateFieldName("TemporalJoinID",
                                                    fin_output_workspace)
            bin_number = arcpy.ValidateFieldName("Bin_Number",
                                                 fin_output_workspace)
            dt_starttime = arcpy.ValidateFieldName("DT_Start_Bin",
                                                   fin_output_workspace)
            dt_endtime = arcpy.ValidateFieldName("DT_End_Bin",
                                                 fin_output_workspace)
            txt_starttime = arcpy.ValidateFieldName("TXT_Start_Bin",
                                                    fin_output_workspace)
            txt_endtime = arcpy.ValidateFieldName("TXT_End_Bin",
                                                  fin_output_workspace)
            extract_query_field = arcpy.ValidateFieldName(
                "Extract_Query", fin_output_workspace)
            # Transition to temporal iteration
            time_counter = 0
            temporal_record_dict = {}
            san.arc_print(
                "Generating Mean Centers based on {0} queries.".format(
                    len(temporal_queries)), True)
            for query in temporal_queries:
                try:
                    time_counter += 1
                    newFCName = "TempFCBin_{0}".format(str(time_counter))
                    if not workspace_is_geodatabase:
                        newFCName = newFCName[
                            0:13]  # Truncate Name if not workspace.
                    san.arc_print(
                        "Creating Mean Center with query '{0}'.".format(
                            str(query)), True)
                    temporary_layer = arcpy.MakeFeatureLayer_management(
                        inFeatureClass, newFCName, query)
                    # Break up general density to have pop field set to none if no actually field exists.
                    temporal_mean_center = "in_memory/MCTemporalTemp"
                    arcpy.MeanCenter_stats(temporary_layer,
                                           temporal_mean_center, weight_field,
                                           case_field, dimension_field)
                    start_date_time = time_bins[time_counter - 1][0]
                    end_date_time = time_bins[time_counter - 1][1]
                    start_bin_time_string = str(start_date_time)
                    end_bin_time_string = str(end_date_time)
                    if not workspace_is_geodatabase:
                        arcpy.AddWarning(
                            "DBF tables can only accept date fields, not datetimes."
                            " Please check string field.")
                        start_date_time = start_date_time.date()
                        end_date_time = end_date_time.date()
                    # Create Unique ID
                    san.add_new_field(temporal_mean_center,
                                      join_id_field,
                                      "TEXT",
                                      field_alias="TEMPORAL_JOIN_ID")
                    join_fields = [join_id_field]
                    case_present = False
                    if san.field_exist(temporal_mean_center, case_field):
                        join_fields = [join_id_field, case_field]
                        case_present = True
                    with arcpy.da.UpdateCursor(temporal_mean_center,
                                               join_fields) as join_cursor:
                        for row in join_cursor:
                            unique_id = san.constructUniqueStringID(
                                [str("1"), str(time_counter)])
                            if case_present:
                                unique_id = san.constructUniqueStringID(
                                    [str(row[1]),
                                     str(time_counter)])
                            row[0] = unique_id
                            join_cursor.updateRow(row)
                            temporal_record_dict[unique_id] = [
                                time_counter, start_date_time, end_date_time,
                                start_bin_time_string, end_bin_time_string,
                                query
                            ]

                    if time_counter == 1:
                        san.arc_print(
                            "Copying First Mean Center to Output Feature Class."
                        )
                        arcpy.CopyFeatures_management(temporal_mean_center,
                                                      outFeatureClass)
                    else:
                        san.arc_print(
                            "Appending Mean Center to Output Feature Class.")
                        arcpy.Append_management(temporal_mean_center,
                                                outFeatureClass)
                    arcpy.Delete_management(
                        temporal_mean_center)  # memory management
                except:
                    arcpy.AddWarning("Could not process query {0}.".format(
                        str(query)))
            san.arc_print(
                "Adding time fields to output temporal feature class.", True)
            san.add_new_field(outFeatureClass, bin_number, "LONG")
            san.add_new_field(outFeatureClass,
                              dt_starttime,
                              "DATE",
                              field_alias="Start Bin Datetime")
            san.add_new_field(outFeatureClass,
                              dt_endtime,
                              "DATE",
                              field_alias="End Bin Datetime")
            san.add_new_field(outFeatureClass,
                              txt_starttime,
                              "TEXT",
                              field_alias="Start Bin String")
            san.add_new_field(outFeatureClass,
                              txt_endtime,
                              "TEXT",
                              field_alias="End Bin String")
            san.add_new_field(outFeatureClass, extract_query_field, "TEXT")

            san.arc_print(
                "Joining Temporal values by joining a dictionary to the unique ID.",
                True)
            table_fields = [
                bin_number, dt_starttime, dt_endtime, txt_starttime,
                txt_endtime, extract_query_field
            ]
            san.join_record_dictionary(outFeatureClass, temporal_record_dict,
                                       join_id_field, table_fields)
            san.arc_print("Tool execution complete.", True)
            pass
        else:
            san.arc_print(
                "The desired workspace does not exist. Tool execution terminated.",
                True)
            arcpy.AddWarning("The desired workspace does not exist.")

    except arcpy.ExecuteError:
        print(arcpy.GetMessages(2))
    except Exception as e:
        san.arc_print(str(e.args[0]))
t2 = getTime()
msg = 'Time for FeatureToPoint to create {}'.format(os.path.basename(outfile1))
timeDifference(t1, t2, msg)

#calculates time taken to find central feature of centroids
t1 = getTime()
outfile2 = outdir + '/' + os.path.splitext(
    os.path.basename(infile))[0] + 'Central.shp'
arcpy.CentralFeature_stats(outfile1, outfile2, 'EUCLIDEAN_DISTANCE')
t2 = getTime()
msg = 'Time for CentralFeature to create {}'.format(os.path.basename(outfile2))
timeDifference(t1, t2, msg)

#calculates time taken to find the mean centroid
t1 = getTime()
outfile3 = outdir + '/' + os.path.splitext(
    os.path.basename(infile))[0] + 'Mean.shp'
arcpy.MeanCenter_stats(outfile1, outfile3)
t2 = getTime()
msg = 'Time for MeanCenter to create {}'.format(os.path.basename(outfile3))
timeDifference(t1, t2, msg)

#calculates time taken to estimate distance between central centroid and mean centroid
t1 = getTime()
outfile4 = outdir + '/' + os.path.splitext(
    os.path.basename(infile))[0] + 'Mean2Central.dbf'
arcpy.PointDistance_analysis(outfile2, outfile3, outfile4)
t2 = getTime()
msg = 'Time for PointDistance to create {}'.format(os.path.basename(outfile4))
timeDifference(t1, t2, msg)
예제 #9
0
def main(nd, nodes, sr, intersections, output_dir_fc, pro, name_clst):

    n_nodes = int(arcpy.GetCount_management(nodes).getOutput(0))
    n_clusters = int(math.ceil(float(n_nodes) / float(sr)))

    ###########################################################################################################
    # Get the cost matrix: OD
    ###########################################################################################################

    # Set local variables
    layer_name = "ODcostMatrix"
    impedance = "Length"

    layer_path = os.path.join('in_memory', 'od_layer')
    check_exists(layer_path)

    # Create and get the layer object from the result object. The OD cost matrix layer can
    # now be referenced using the layer object.
    layer_object = arcpy.na.MakeODCostMatrixLayer(nd, layer_path, impedance).getOutput(0)

    # Get the names of all the sublayers within the OD cost matrix layer.
    sublayer_names = arcpy.na.GetNAClassNames(layer_object)

    # Stores the layer names that we will use later
    origins_layer_name = sublayer_names["Origins"]
    destinations_layer_name = sublayer_names["Destinations"]
    lines_layer_name = sublayer_names["ODLines"]

    # Load the intersections as both origin and destinations.
    arcpy.na.AddLocations(layer_object, origins_layer_name, nodes)
    arcpy.na.AddLocations(layer_object, destinations_layer_name, nodes)

    # Solve the OD cost matrix layer
    arcpy.na.Solve(layer_object)

    # Get the Lines Sublayer (all the distances)
    if not pro:
        lines_sublayer = arcpy.mapping.ListLayers(layer_object, lines_layer_name)[0]

    else:
        lines_sublayer = layer_object.listLayers(lines_layer_name)[0]

    lines = os.path.join('in_memory', lines_layer_name)

    arcpy.management.CopyFeatures(lines_sublayer, lines)

    ################################################################################################################
    # Gathering the data for the penalty matrix
    ################################################################################################################
    leng = int(arcpy.GetCount_management(lines_sublayer).getOutput(0))  # Number of paths from every BS to every intersection

    if sr < n_nodes:
        thr = int(sr)
    else:
        thr = int(n_nodes)

    # Make the # of clusters more to avoid the splitting of the clusters
    if thr != int(n_nodes):
        n_clusters_tmp = int(math.ceil(float(n_nodes) / float(sr)))
        n_clusters = n_clusters_tmp + int(math.ceil(float(n_clusters_tmp)/2))
        arcpy.AddMessage(n_clusters)
        thr = int(math.ceil(float(n_nodes)/float(n_clusters)))

    # Convert  attribute table of lines from optimization to python nested dict
    cost = make_attribute_dict(lines, "ObjectID", ["OriginID", 'DestinationID', 'DestinationRank', 'Total_Length'])
    cost_keys = ["ObjectID", "OriginID", 'DestinationID', 'DestinationRank', 'Total_Length']

    node_id_field = get_ids(nodes)
    nodes_dict = make_attribute_dict(nodes, node_id_field)

    ################################################################################################################
    # Clustering
    ################################################################################################################

    # Define a list of non-available nodes for clustering
    flags = set()
    # Define clustering dictionary
    clustering = {}

    count = n_nodes
    index = 0

    j = 1  # iterator through the cost matrix
    cl = 1  # counter fr the cluster

    sort = penalty_update(n_nodes, thr, cost, cost_keys, index)  # calculate penalty matrix
    cost_len = len(nodes_dict)

    for i in range(0, count):  # for all the nodes
        k = 0  # counter for number of cluster members
        if j <= leng:
            if len(flags) != n_nodes:
                node = sort[i][0]
                if node not in flags:
                    clustering[cl] = {}
                    clustering[cl]['members'] = []
                    # We need to traverse the cost matrix. However we start not from the first element. As the matrix
                    # structure is regular we can calculate the starting index for our array to assign the (X,Y)
                    # to the seed master. E.g., we have three elements [1, 2, 3] then we have 111222333 structure, where
                    # 3 starts from position 7 number of nodes is 3, thus 3*(3-1)+1 = 7. for 1 it is 3*(1-1)+1 = 1 and
                    # for 2 is 3*(2-1)+1 = 4
                    j = cost_len * (node - 1) + 1

                    while k < thr:
                        # now we need to populate the cluster dict with non-clustered yet members
                        if cost[j][cost_keys[2]] not in flags:
                            member_id = cost[j][cost_keys[2]]
                            clustering[cl]['members'].append((member_id,  j))
                            flags.add(member_id)
                            k += 1
                            if j < leng:
                                j += 1
                            else:
                                break
                        else:
                            if j < leng:
                                j += 1
                                if len(flags) == n_nodes:
                                    break
                    cl += 1

        else:
            break

    # print(clustering)

    # By select by attribute select all the cluster members
    nodes_layer = os.path.join('in_memory', 'nodes')
    check_exists(nodes_layer)
    arcpy.MakeFeatureLayer_management(nodes, nodes_layer)

    int_layer = os.path.join('in_memory', 'int_layer')
    check_exists(int_layer)
    arcpy.MakeFeatureLayer_management(intersections, int_layer)

    int_id_field = get_ids(intersections)

    cluster_heads = []

    for i in range(0, n_clusters):
        n_members = len(clustering[i+1]['members'])
        # print('***********************************************************')
        # print('Cluster # {0}'.format(i))
        # print('# Members {0}'.format(n_members))

        for j in range(0, n_members):
            # print('Iterating through member # {0}'.format(j))
            if j == 0:
                clause_nodes = '{0} = {1}'.format(node_id_field, clustering[i+1]['members'][j][0])

            elif j > 0:
                clause_nodes += ' OR {0} = {1}'.format(node_id_field, clustering[i+1]['members'][j][0])

        # print(clause_nodes)
        arcpy.SelectLayerByAttribute_management(nodes_layer, selection_type='NEW_SELECTION', where_clause=clause_nodes)

        name_cluster = 'Cluster_{0}_{1}'.format(i, name_clst)
        out_cluster = os.path.join(output_dir_fc, name_cluster)
        check_exists(out_cluster)
        arcpy.CopyFeatures_management(nodes_layer, out_cluster)

        # Find the centroid of the cluster
        out_cluster_head_tmp = os.path.join('in_memory', 'cluster_head_tmp')
        check_exists(out_cluster_head_tmp)
        arcpy.MeanCenter_stats(out_cluster, out_cluster_head_tmp)

        # Find the closest intersection to the centroid
        arcpy.Near_analysis(out_cluster_head_tmp, intersections, method='GEODESIC')

        with arcpy.da.SearchCursor(out_cluster_head_tmp, 'NEAR_FID') as cursor:
            for row in cursor:
                intersection_id = row[0]

        clause_int = '{0} = {1}'.format(int_id_field, intersection_id)
        arcpy.SelectLayerByAttribute_management(int_layer, selection_type='NEW_SELECTION', where_clause=clause_int)

        name_cluster_head = 'Cluster_head_{0}_{1}'.format(i, name_clst)
        # print(name_cluster_head)
        out_cluster_head = os.path.join(output_dir_fc, name_cluster_head)
        check_exists(out_cluster_head)
        arcpy.CopyFeatures_management(int_layer, out_cluster_head)
        cluster_heads.append(out_cluster_head)

    # Merge clusterheads
    merge_name = os.path.join('in_memory', 'Merged_cluster_heads_sr{0}'.format(sr))
    check_exists(merge_name)
    arcpy.Merge_management(cluster_heads, merge_name)

    # Save them to a file
    name_cluster_heads = os.path.join(output_dir_fc,  'Cluster_heads_{0}'.format(name_clst))
    check_exists(name_cluster_heads)
    arcpy.CopyFeatures_management(merge_name, name_cluster_heads)

    return n_clusters
예제 #10
0
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# calculate_mean_center.py
# Created on: 2017-02-22 14:18:52.00000
#   (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------

# Import arcpy module
import arcpy

# Overwrite things.  We're feeling confident.
arcpy.env.overwriteOutput = True

# Local variables:
in_shp = "D:\\projects\\ak_fire\\gis\\data\\AICC_fire_perimeters\\FireAreaHistory.shp"
out_shp1 = "D:\\projects\\ak_fire\\gis\\data\\firePerimeters_1940_2016_mean_center_weightedByAcres3.shp"
out_shp2 = "D:\\projects\\ak_fire\\gis\\data\\firePerimeters_1940_2016_mean_center3.shp"

# Process: Mean Center weighted by acres
arcpy.MeanCenter_stats(in_shp, out_shp1, "CalcAcres", "FireYear", "")
arcpy.MeanCenter_stats(in_shp, out_shp2, "", "FireYear", "")
예제 #11
0
import arcpy 
from arcpy import env

env.workspace = 'E:\CBS Project\MyGdb.mdb'

arcpy.Intersect_analysis(["KrgNok","Export_Output"],"Turkiye_Out","ALL","","INPUT")
#env.workspace= 'E:\CBS Project'
arcpy.MeanCenter_stats("Turkiye_Out","KargoEnIyi" , "Turkiye_NUFUS_2000","","")

#arcpy.MakeFeatureLayer_management("E:\CBS Project\MyGdb.mdb\KargoEnIyi", "KrgSlct")
#arcpy.SelectLayerByAttribute_management("KrgSlct", "NEW_SELECTION","")
#arcpy.SelectLayerByAttribute_management("KargoEnIyi","NEW_SELECTION")
arcpy.SelectLayerByLocation_management("Export_Output","CONTAINS","KargoEnIyi","","NEW_SELECTION")

예제 #12
0
 def optimal(path, pointlayer, weight, facility, number):
     #if no seed file is added, generate random facility
     facillist = []
     facillist.append(facility)
     for A in facillist:
         if facillist[0] == "":
             arcpy.AddMessage("Generating random seed location")
             global seedx
             seedx = arcpy.CreateRandomPoints_management(
                 workspace,
                 "Random",
                 points_copy,
                 "",
                 number_of_points_or_field=int(number))
             deletefeatures.append(seedx)
         else:
             arcpy.AddMessage("Using input seed")
             seedx = seed_copy
     global iterlist
     iterlist = []
     for iter in range(iterations):
         ###near and mean center#########################################################
         if iter == 0:
             arcpy.AddMessage("Performing iteration #" + str(iter + 1))
             near = arcpy.Near_analysis(pointlayer, seedx, "", "", "",
                                        near_method)
             mean = arcpy.MeanCenter_stats(
                 pointlayer, path + "\\" + "meancenter" + str(iter + 1),
                 weight, 'NEAR_FID', '')
             arcpy.AddField_management(mean, "Iteration", 'TEXT')
             arcpy.CalculateField_management(mean, 'ITERATION',
                                             str(iter + 1))
             iterlist.append(mean)
             table0 = arcpy.Statistics_analysis(pointlayer,
                                                "mean_" + str(iter),
                                                [['NEAR_DIST', 'Mean']])
             with arcpy.da.SearchCursor(table0,
                                        ["MEAN_NEAR_DIST"]) as cursor:
                 for row in cursor:
                     value0 = row[0]
             meandiff.update({iter + 1: value0})
             arcpy.Delete_management(table0)
             deletefeatures.append(mean)
             #arcpy.AddMessage("Average Difference for iteration " + str(iter + 1) + " is " + str(value0))
         elif iter >= 1:
             arcpy.AddMessage("Performing iteration #" + str(iter + 1))
             nearx = arcpy.Near_analysis(pointlayer, iterlist[-1], "",
                                         "", "", near_method)
             meanx = arcpy.MeanCenter_stats(
                 pointlayer, path + "\\" + "meancenter" + str(iter + 1),
                 weight, 'NEAR_FID', '')
             arcpy.AddField_management(meanx, "Iteration", 'TEXT')
             arcpy.CalculateField_management(meanx, 'ITERATION',
                                             str(iter + 1))
             iterlist.append(meanx)
             table1 = arcpy.Statistics_analysis(pointlayer,
                                                "mean_" + str(iter),
                                                [['NEAR_DIST', 'Mean']])
             with arcpy.da.SearchCursor(table1,
                                        ["MEAN_NEAR_DIST"]) as cursor:
                 for row in cursor:
                     value1 = row[0]
             meandiff.update({iter + 1: value1})
             arcpy.Delete_management(table1)
             deletefeatures.append(meanx)
         else:
             arcpy.AddMessage("Error in first iteration")
     return iterlist
def do_analysis(inFeatureClass, outFeatureClass, lengthNum, lengthField, blockWidthValue, referenceFeatureClass):
    """This function will create blocks in one location based on the incoming reference centroid for the
    purpose of being used for data driven design applications in CityEngine."""
    # try:
    # Delete Existing Output
    arcpy.env.overwriteOutput = True
    if arcpy.Exists(outFeatureClass):
        arc_print("Deleting existing output feature.", True)
        arcpy.Delete_management(outFeatureClass)
    workspace = os.path.dirname(outFeatureClass)
    tempOutName = arcpy.ValidateTableName("TempBlockFC_1", workspace)
    tempOutFeature = os.path.join(workspace, tempOutName)
    # Add New Fields
    arc_print("Adding new fields for old object IDs and geometry name.", True)
    OldObjectIDName = "UniqueFeatID"
    GeometryName = "CEStreetName"
    AddNewField(inFeatureClass, OldObjectIDName, "LONG")
    AddNewField(inFeatureClass, GeometryName, "TEXT")
    # Create feature class to get outputFC
    arc_print("Making a new output feature class using the input as a template", True)
    OutPut = arcpy.CreateFeatureclass_management(workspace, tempOutName, "POLYLINE", template=inFeatureClass,
                                                 spatial_reference=inFeatureClass)

    arc_print("Gathering feature information.", True)
    # Get feature description and spatial reference information for tool use
    desc = arcpy.Describe(inFeatureClass)
    SpatialRef = desc.spatialReference
    shpType = desc.shapeType
    srName = SpatialRef.name
    arc_print(
            "The shape type is {0}, and the current spatial reference is: {1}".format(str(shpType), str(srName)),
            True)
    # Get mean center of feature class (for pointGeo)
    if arcpy.Exists(referenceFeatureClass) and referenceFeatureClass != "#":
        arc_print("Calculating the mean center of the reference feature class.", True)
        meanCenter = arcpy.MeanCenter_stats(referenceFeatureClass)
    else:
        arc_print("Calculating the mean center of the copied feature.", True)
        meanCenter = arcpy.MeanCenter_stats(inFeatureClass)

    fieldNames = getFields(inFeatureClass)
    arc_print("Getting point geometry from copied center.", True)
    pointGeo = copy.deepcopy(arcpy.da.SearchCursor(meanCenter, ["SHAPE@"]).next()[0])  # Only one center, so one recor

    # Check if the optional Street Length/ Lot Area field is used.
    idsAndFieldSearchNames = ["SHAPE@"] + fieldNames
    arc_print("The search cursor's fields and tags are:{0}".format(str(idsAndFieldSearchNames)), True)
    records = []
    with arcpy.da.SearchCursor(inFeatureClass, idsAndFieldSearchNames) as cursorSearch:
        arc_print("Loading input feature classes rows into a new record list.", True)
        for search_row in cursorSearch:
            records.append(search_row)
    arc_print("Inserting new rows and geometries to new feature class.", True)
    count = 0
    with arcpy.da.InsertCursor(tempOutFeature, idsAndFieldSearchNames) as cursorInsert:
        if desc.shapeType == "Polyline":
            for row in records:
                # Use two try statements, one time to try to catch the error
                count += 1
                try:
                    arc_print("A creating geometry dictionary for feature iteration: {0}.".format(str(count)))
                    geoDict = CreateMainStreetBlockCEGeometry(pointGeo, lineLength(row, lengthField, lengthNum,
                                                                                   idsAndFieldSearchNames),
                                                              blockWidthValue)
                    # print geoDict
                    for key in geoDict.keys():
                        try:
                            rowList = copyAlteredRow(row, idsAndFieldSearchNames,
                                                     {"SHAPE@": geoDict[key], OldObjectIDName: count,
                                                      GeometryName: str(key)})
                            cursorInsert.insertRow(rowList)
                        except:
                            arcpy.AddWarning("Passed line at iteration {0}.".format(str(count)))
                            pass
                except:
                    arcpy.GetMessage(2)
                    arc_print("Failed on iteration {0}.".format(str(count)), True)
                    pass
        else:
            arc_print("Input geometry is not a polyline. Check arguments.", True)
            arcpy.AddError("Input geometry is not a polyline. Check arguments.")

    arc_print("Projecting data into Web Mercator Auxiliary Sphere (a CityEngine compatible projection).", True)
    webMercatorAux = arcpy.SpatialReference(3857)
    arcpy.Project_management(OutPut, outFeatureClass, webMercatorAux)
    arc_print("Cleaning up intermediates.", True)
    arcpy.Delete_management(meanCenter)
    arcpy.Delete_management(OutPut)
    arcpy.DeleteField_management(inFeatureClass, OldObjectIDName)
    arcpy.DeleteField_management(inFeatureClass, GeometryName)
    del SpatialRef, desc, cursorSearch, webMercatorAux, cursorInsert
def do_analysis(inFeatureClass, outFeatureClass, Length, Field,
                referenceFeatureClass):
    """This function will create streets in one location based on the incoming reference centroid for the
    purpose of being used for data driven design applications in CityEngine."""
    try:
        # Delete Existing Output
        arcpy.env.overwriteOutput = True
        if arcpy.Exists(outFeatureClass):
            arc_print("Deleting existing output feature.", True)
            arcpy.Delete_management(outFeatureClass)
        # Copy/Project feature class to get outputFC
        arc_print("Making a copy of input feature class for output.", True)
        OutPut = arcpy.CopyFeatures_management(inFeatureClass)
        arc_print("Gathering feature information.", True)
        # Get feature description and spatial reference information for tool use
        desc = arcpy.Describe(OutPut)
        SpatialRef = desc.spatialReference
        shpType = desc.shapeType
        srName = SpatialRef.name
        arc_print(
            "The shape type is {0}, and the current spatial reference is: {1}".
            format(str(shpType), str(srName)), True)
        # Get mean center of feature class (for pointGeo)
        if arcpy.Exists(
                referenceFeatureClass) and referenceFeatureClass != "#":
            arc_print(
                "Calculating the mean center of the reference feature class.",
                True)
            meanCenter = arcpy.MeanCenter_stats(referenceFeatureClass)
        else:
            arc_print("Calculating the mean center of the copied feature.",
                      True)
            meanCenter = arcpy.MeanCenter_stats(inFeatureClass)

        arc_print("Getting point geometry from copied center.", True)
        pointGeo = copy.deepcopy(
            arcpy.da.SearchCursor(
                meanCenter,
                ["SHAPE@"]).next()[0])  # Only one center, so one record
        # Check if the optional Street Length/ Lot Area field is used.
        if Field and FieldExist(OutPut, Field):
            arc_print("Using size field to create output geometries.", True)
            cursorFields = ["SHAPE@", "OID@", Field]
        else:
            arc_print(
                "Using size input value to create same sized output geometries.",
                True)
            cursorFields = ["SHAPE@", "OID@"]

        with arcpy.da.UpdateCursor(OutPut, cursorFields) as cursor:
            arc_print("Replacing existing input geometry.", True)
            count = 1
            if desc.shapeType == "Polyline":
                for row in cursor:
                    # Use two try statements, one time to try to catch the error
                    count += 1
                    try:
                        print("A Line at OID: {0}.".format(str(row[1])))
                        row[0] = CreateMainStreetCEGeometry(
                            pointGeo,
                            lineLength(row, Field, Length, cursorFields))
                        cursor.updateRow(row)
                    except:
                        handleFailedStreetUpdate(
                            cursor, row, pointGeo,
                            lineLength(row, Field, Length, cursorFields))
            else:
                arc_print("Input geometry is not a polyline. Check arguments.",
                          True)
                arcpy.AddError(
                    "Input geometry is not a polyline. Check arguments.")

            arc_print(
                "Projecting data into Web Mercator Auxiliary Sphere (a CityEngine compatible projection).",
                True)
            webMercatorAux = arcpy.SpatialReference(3857)
            arcpy.Project_management(
                OutPut, outFeatureClass,
                webMercatorAux)  # No preserve shape, keeps 2 vertices
            arc_print("Cleaning up intermediates.", True)
            arcpy.Delete_management(meanCenter)
            arcpy.Delete_management(OutPut)
            del SpatialRef, desc, cursor, webMercatorAux

    except arcpy.ExecuteError:
        print(arcpy.GetMessages(2))
    except Exception as e:
        print(e.args[0])
예제 #15
0
def temporal_aggregate_field(inFeatureClass,
                             outFeatureClass,
                             start_time,
                             end_time,
                             time_interval,
                             weight_field="#",
                             case_field="#",
                             summary_field="#",
                             bin_start=None):
    """ This tool will split a feature class into multiple kernel densities based on a datetime field and a
    a set time interval. The result will be a time enabled moasic with Footprint. """
    try:
        splitOutPath = os.path.split(outFeatureClass)
        outWorkSpace = splitOutPath[0]
        outFCTail = splitOutPath[1]
        fin_output_workspace = outWorkSpace
        if arcpy.Exists(fin_output_workspace):
            arcpy.env.workspace = fin_output_workspace
            arcpy.env.overwriteOutput = True
            arcPrint(
                "The current work space is: {0}.".format(fin_output_workspace),
                True)
            # Set up Work Space Environments
            out_workspace_path_split = os.path.split(fin_output_workspace)
            workSpaceTail = out_workspace_path_split[1]
            inFeatureClassTail = os.path.split(inFeatureClass)[1]
            ws_desc = arcpy.Describe(fin_output_workspace)
            workspace_is_geodatabase = ws_desc.dataType == "Workspace"
            arcPrint(
                "Gathering describe object information from fields and input feature class."
            )
            fc_desc = arcpy.Describe(inFeatureClass)
            summary_field_type = arcpy.Describe(weight_field).type

            try:
                arcPrint(
                    "Attempting to create Temporal Table in output workspace.")
                arcpy.CreateFeatureclass_management(splitOutPath, outFCTail,
                                                    'POINT')
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Unique_ID", fin_output_workspace),
                    "TEXT")
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Bin_Number",
                                            fin_output_workspace), "LONG")
                AddNewField(outFeatureClass,
                            arcpy.ValidateFieldName("DT_Start_Bin",
                                                    fin_output_workspace),
                            "DATE",
                            field_alias="Start Bin Datetime")
                AddNewField(outFeatureClass,
                            arcpy.ValidateFieldName("DT_End_Bin",
                                                    fin_output_workspace),
                            "DATE",
                            field_alias="End Bin Datetime")
                AddNewField(outFeatureClass,
                            arcpy.ValidateFieldName("TXT_Start_Bin",
                                                    fin_output_workspace),
                            "TEXT",
                            field_alias="Start Bin String")
                AddNewField(outFeatureClass,
                            arcpy.ValidateFieldName("TXT_End_Bin",
                                                    fin_output_workspace),
                            "TEXT",
                            field_alias="End Bin String")
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Extract_Query",
                                            fin_output_workspace), "TEXT")
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Bin_Count", fin_output_workspace),
                    "DOUBLE")
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Bin_Mean", fin_output_workspace),
                    "DOUBLE")
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Bin_Median",
                                            fin_output_workspace), "DOUBLE")
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Bin_Sum", fin_output_workspace),
                    "DOUBLE")
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Bin_StdDev",
                                            fin_output_workspace), "DOUBLE")
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Bin_Min", fin_output_workspace),
                    "DOUBLE")
                AddNewField(
                    outFeatureClass,
                    arcpy.ValidateFieldName("Bin_Max", fin_output_workspace),
                    "DOUBLE")
            except:
                arcpy.AddWarning(
                    "Could not create Moasic Dataset. Time enablement is not possible."
                )
                pass
            try:
                arcpy.RefreshCatalog(outWorkSpace)
            except:
                arcPrint("Could not refresh catalog.")
                pass
            # Set up Time Deltas and Parse Time String
            arcPrint("Constructing Time Delta from input time period string.",
                     True)
            arcPrint(str(time_interval))
            time_magnitude, time_unit = alphanumeric_split(str(time_interval))
            time_delta = parse_time_units_to_dt(time_magnitude, time_unit)
            arcPrint(
                "Using datetime fields to generate new feature classes in {0}."
                .format(str(workSpaceTail)))
            arcPrint("Getting start and final times in start time field {0}.".
                     format(start_time))
            start_time_min, start_time_max = get_min_max_from_field(
                inFeatureClass, start_time)
            # Establish whether to use end time field or only a start time (Single Date Field)
            if FieldExist(inFeatureClass, end_time) and end_time:
                arcPrint(
                    "Using start and end time to grab feature classes whose bins occur within an events "
                    "start or end time.")
                end_time_min, end_time_max = get_min_max_from_field(
                    inFeatureClass, end_time)
                start_time_field = start_time
                end_time_field = end_time
                start_time_range = start_time_min
                end_time_range = end_time_max
            else:
                arcPrint(
                    "Using only first datetime start field to construct time bin ranges."
                )
                start_time_field = start_time
                end_time_field = start_time
                start_time_range = start_time_min
                end_time_range = start_time_max
            if isinstance(bin_start, datetime.datetime) or isinstance(
                    bin_start, datetime.date):
                start_time_range = bin_start
                arcPrint(
                    "Bin Start Time was selected, using {0} as bin starting time period."
                    .format(str(bin_start_time)))
            time_bins = construct_time_bin_ranges(start_time_range,
                                                  end_time_range, time_delta)
            arcPrint("Constructing queries based on datetime ranges.")
            temporal_queries = construct_sql_queries_from_time_bin(
                time_bins, inFeatureClass, start_time_field, end_time_field)
            temporary_fc_name = "Temp_{1}".format(
                arcpy.ValidateTableName(inFeatureClassTail,
                                        fin_output_workspace)[0:13])
            temporary_fc_path = os.path.join(fin_output_workspace,
                                             temporary_fc_name)
            # Transition to kernel density creation
            time_counter = 0
            temporal_record_table = []
            arcPrint(
                "Generating kernel densities based on {0} queries.".format(
                    len(temporal_queries)), True)
            for query in temporal_queries:
                try:
                    time_counter += 1
                    arcPrint(
                        "Determining name and constructing query for new feature class.",
                        True)
                    # Break up general density to have pop field set to none if no actually field exists.

                    temporary_layer = arcpy.MakeFeatureLayer_management(
                        inFeatureClass, temporary_fc_name, query)
                    tempoary_dataframe = ArcGISTabletoDataFrame()
                    arcPrint(
                        "Created Mean Center {0} with query [{1}], appending to master feature class."
                        .format(temporary_fc_name, str(query)), True)
                    arcpy.MeanCenter_stats(temporary_layer, temporary_fc_path,
                                           weight_field, case_field)
                    start_date_time = time_bins[time_counter - 1][0]
                    end_date_time = time_bins[time_counter - 1][1]
                    start_bin_time_string = str(start_date_time)
                    end_bin_time_string = str(end_date_time)
                    if not workspace_is_geodatabase:
                        arcpy.AddWarning(
                            "DBF tables can only accept date fields, not datetimes."
                            " Please check string field.")
                        start_date_time = start_date_time.date()
                        end_date_time = end_date_time.date()
                    temporal_record_table.append([
                        time_counter, start_date_time, end_date_time,
                        start_bin_time_string, end_bin_time_string, query
                    ])

                except Exception as e:
                    arcPrint(
                        "The feature bin ID {0}, could not be processed. Check arguments"
                        .format(str(query)))
                    arcpy.AddWarning(str(e.args[0]))
                    pass

            # arc_print("Adding record values to Temporal Table with an insert cursor.")
            # table_fields= get_fields(outFeatureClass)
            # with arcpy.da.InsertCursor(outFeatureClass,table_fields) as cursor:
            #     for records in temporal_record_table:
            #         cursor.insertRow(records)
            #     arc_print("Finished inserting records for database.")
            #     del cursor
            # arc_print("Tool execution complete.", True)
            pass
        else:
            arcPrint(
                "The desired workspace does not exist. Tool execution terminated.",
                True)
            arcpy.AddWarning("The desired workspace does not exist.")

    except arcpy.ExecuteError:
        print(arcpy.GetMessages(2))
    except Exception as e:
        arcPrint(str(e.args[0]))
예제 #16
0
def process_dates(er, date_info, hate_fc, windows, length):

    mean_fcs = []
    date_fcs = []

    for date, articles in date_info:

        try:
            fl = arcpy.MakeFeatureLayer_management(
                hate_fc,
                os.path.join(arcpy.env.scratchGDB, 'GDELT_{}'.format(date)),
                where_clause="dateadded = '{}'".format(date))
            fl_count = arcpy.GetCount_management(fl)[0]

            # Ignore Dates That Return Less Than 3 GDELT Records
            if int(fl_count) < 3:
                continue

            dd = arcpy.DirectionalDistribution_stats(
                fl, os.path.join(windows, 'D_{0}_{1}_{2}'.format(*er, date)),
                '1_STANDARD_DEVIATION', 'numarticles')

            mc = arcpy.MeanCenter_stats(
                fl, os.path.join(windows, 'MC_{0}_{1}_{2}'.format(*er, date)),
                'numarticles')

            arcpy.AddField_management(dd, 'EVENT_DATE', 'DATE')
            arcpy.AddField_management(mc, 'EVENT_DATE', 'DATE')

            arcpy.AddField_management(mc, 'ARTICLES', 'LONG')
            arcpy.AddField_management(dd, 'ARTICLES', 'LONG')

            for target in [dd, mc]:
                with arcpy.da.UpdateCursor(
                        target, ['EVENT_DATE', 'ARTICLES']) as cursor:
                    for _ in cursor:
                        cursor.updateRow([
                            datetime.datetime.strptime(date, '%Y%m%d'),
                            articles
                        ])

            date_fcs.append(dd)
            mean_fcs.append(mc)

        except arcpy.ExecuteError:
            pass

    print('{} - {}: {} of {} Processed'.format(*er, len(date_fcs),
                                               len(date_info)))

    arcpy.Merge_management(
        date_fcs, os.path.join(windows, 'GDELT_{0}_{1}_Windows'.format(*er)))

    mc_merge = arcpy.Merge_management(
        mean_fcs, os.path.join(windows, 'GDELT_{0}_{1}_MC'.format(*er)))

    pl = arcpy.PointsToLine_management(mc_merge,
                                       os.path.join(windows,
                                                    'MC_{0}_{1}'.format(*er)),
                                       Sort_Field='EVENT_DATE')

    arcpy.AddField_management(pl, 'EVENT_LEN', 'LONG')
    with arcpy.da.UpdateCursor(pl, ['EVENT_LEN']) as cursor:
        for _ in cursor:
            cursor.updateRow([length])

    for fc in date_fcs:
        arcpy.Delete_management(fc)

    for fc in mean_fcs:
        arcpy.Delete_management(fc)

    movement = [row[0].length for row in arcpy.da.SearchCursor(pl, ['SHAPE@'])]
    movement = round(movement[0]) if movement else 0

    return movement
예제 #17
0
def main(
    in_data,
    population_field,
    out_dir,
    unit="Kilometer",
    beta=0,
    norm_by_reference="None",  #None without_Geographic_Constraints with_Geographic_Constraints Both
    reference_density=300,
    unbuildable='',  #Geographic_Constraints determines the maximum difference between the area of reference after removing unbuildable area and the area of perfect circular reference
    percision=0.5):
    e = 2.718281828459045
    pi = 3.14159265359
    conversionDic={'Mile':{'Mile':1,'Meter':1609.34,'Foot':5280,'Kilometer':1.60934},\
                   'Meter':{'Mile':0.000621371,'Meter':1,'Foot':3.28084,'Kilometer':0.001},\
                   'Foot':{'Mile':0.000189394,'Meter':0.3048,'Foot':1,'Kilometer':0.0003048},\
                   'Kilometer':{'Mile':0.621371,'Meter':1000,'Foot':3280.84,'Kilometer':1}}

    sp = arcpy.Describe(in_data).spatialReference
    linear_unit = sp.linearUnitName
    #print linear_unit
    conversionRatio = conversionDic[linear_unit][unit]
    #print conversionRatio
    if linear_unit == 'Foot_US':
        linear_unit = 'Foot'
    case_rawCompactness = rawCompactness(in_data, population_field,
                                         conversionRatio, beta)
    total_population = case_rawCompactness[1]
    raw_Compactness = case_rawCompactness[0]
    normalizedCompactness = raw_Compactness / total_population
    if norm_by_reference == "None":
        return ([raw_Compactness, normalizedCompactness])
    '''
    print "raw compactness:", case_rawCompactness[0]
    arcpy.AddMessage("raw compactness: "+str(case_rawCompactness[0]))
    print "compactness normalized by population:", case_rawCompactness[0]/total_population
    arcpy.AddMessage("compactness normalized by population:"+str(case_rawCompactness[0]/total_population))
    '''
    dsc = arcpy.Describe(out_dir)
    if dsc.dataType == "Workspace":
        fishnet = out_dir + "/fishnet"
        fishnet_label = out_dir + "/fishnet_label"
        center = out_dir + "/center"
        reference = out_dir + "/reference"
        reference_point_c = out_dir + "/reference_point_C"
        unbuildable_merged = out_dir + "/unbuildable_merged"
        reference_w_gConstraints_step1 = out_dir + "/reference_w_gConstraints_step1"
        reference_w_gConstraints_step2 = out_dir + "/reference_w_gConstraints_step2"
        reference_w_gConstraints = out_dir + "/reference_w_gConstraints"
        reference_point_CG = out_dir + "/reference_point_CG"

    elif dsc.dataType == "Folder":
        fishnet = out_dir + "/fishnet.shp"
        fishnet_label = out_dir + "/fishnet_label.shp"
        center = out_dir + "/center.shp"
        reference = out_dir + "/reference.shp"
        reference_point_c = out_dir + "/reference_point_C.shp"
        unbuildable_merged = out_dir + "/unbuildable_merged.shp"
        reference_w_gConstraints_step1 = out_dir + "/reference_w_gConstraints_step1.shp"
        reference_w_gConstraints_step2 = out_dir + "/reference_w_gConstraints_step2.shp"
        reference_w_gConstraints = out_dir + "/reference_w_gConstraints.shp"
        reference_point_CG = out_dir + "/reference_point_CG.shp"

    if norm_by_reference != "None":
        resolution = float(
            sys.argv[8])  # for converting the reference polygon to point
        referenceArea = total_population / float(reference_density)
        referenceRadius = ((referenceArea / math.pi)**0.5) / conversionRatio
        arcpy.MeanCenter_stats(in_data, center, population_field)
        arcpy.Buffer_analysis(center, reference, referenceRadius)

        if norm_by_reference == "without_Geographic_Constraints" or norm_by_reference == "Both":
            pointGrid(reference, out_dir, conversionRatio, resolution,
                      total_population, "C")
            reference_rawCompactness = rawCompactness(reference_point_c,
                                                      "population",
                                                      conversionRatio)
            print "raw compactness of the reference (without geographic constraints): ", reference_rawCompactness[
                0]
            arcpy.AddMessage(
                "raw compactness of the reference (without geographic constraints): "
                + str(reference_rawCompactness[0]))
            #print reference_rawCompactness[1]
            print "compactness normalized by a circular reference:", (
                case_rawCompactness[0] / reference_rawCompactness[0])
            arcpy.AddMessage(
                "compactness normalized by the reference(without geographic constraints): "
                + str(case_rawCompactness[0] / reference_rawCompactness[0]))
            arcpy.SetParameterAsText(10, reference)

        if norm_by_reference == "with_Geographic_Constraints" or norm_by_reference == "Both":
            arcpy.Merge_management(unbuildable, unbuildable_merged)
            arcpy.Erase_analysis(reference, unbuildable_merged,
                                 reference_w_gConstraints_step1)
            fields = arcpy.ListFields(reference_w_gConstraints_step1)
            if "AREA" not in fields:
                arcpy.AddField_management(reference_w_gConstraints_step1,
                                          "AREA", "DOUBLE")
            arcpy.CalculateField_management(
                reference_w_gConstraints_step1, "AREA",
                "!shape.area@square{0}!".format(unit), 'PYTHON')
            cursor = arcpy.da.SearchCursor(reference_w_gConstraints_step1,
                                           "AREA")
            for row in cursor:
                referenceArea_w_gConstraints = row[0]
            del cursor

            buffer_dist = referenceRadius * ((
                (referenceArea / referenceArea_w_gConstraints)**0.5) - 1)
            arcpy.Buffer_analysis(reference_w_gConstraints_step1,
                                  reference_w_gConstraints_step2, buffer_dist)

            expansionRate = 0
            while expansionRate < (1 - percision) or expansionRate > (
                    1 + percision):
                arcpy.Erase_analysis(reference_w_gConstraints_step2,
                                     unbuildable_merged,
                                     reference_w_gConstraints)
                fields = arcpy.ListFields(reference_w_gConstraints)
                if "AREA" not in fields:
                    arcpy.AddField_management(reference_w_gConstraints, "AREA",
                                              "DOUBLE")
                arcpy.CalculateField_management(
                    reference_w_gConstraints, "AREA",
                    "!shape.area@square{0}!".format(unit), 'PYTHON')
                cursor = arcpy.da.SearchCursor(reference_w_gConstraints,
                                               "AREA")
                for row in cursor:
                    referenceArea_w_gConstraints = row[0]
                del cursor
                expansionRate = referenceArea / referenceArea_w_gConstraints
                buffer_dist = buffer_dist * expansionRate
                arcpy.Buffer_analysis(reference_w_gConstraints_step1,
                                      reference_w_gConstraints_step2,
                                      buffer_dist)

            pointGrid(reference_w_gConstraints, out_dir, conversionRatio,
                      resolution, total_population, "CG")
            reference_rawCompactness_w_gConstraints = rawCompactness(
                reference_point_CG, "population", conversionRatio)
            print "raw compactness of the reference (with geographic constraints)", reference_rawCompactness_w_gConstraints[
                0]
            arcpy.AddMessage(
                "raw compactness of the reference (with geographic constraints): "
                + str(reference_rawCompactness_w_gConstraints[0]))
            #print reference_rawCompactness_w_gConstraints[1]
            print "compactness normalized by the reference(with geographic constraints):", (
                case_rawCompactness[0] /
                reference_rawCompactness_w_gConstraints[0])
            arcpy.AddMessage(
                "compactness normalized by the reference(with geographic constraints):"
                + str(case_rawCompactness[0] /
                      reference_rawCompactness_w_gConstraints[0]))
            arcpy.SetParameterAsText(11, reference_w_gConstraints)
            arcpy.Delete_management(fishnet_label)
            arcpy.Delete_management(fishnet)
            arcpy.Delete_management(center)
            arcpy.Delete_management(reference_w_gConstraints_step1)
            arcpy.Delete_management(reference_w_gConstraints_step2)
            arcpy.Delete_management(unbuildable_merged)
예제 #18
0
            def optimalmeta(pathmeta, pointlayermeta, weightmeta, facilitymeta,
                            numbermeta):
                #if no seed file is added, generate random facilities
                facillistmeta = []
                facillistmeta.append(facilitymeta)
                for A in facillistmeta:
                    if facillistmeta[0] == "":
                        arcpy.AddMessage(
                            "Generating random seed location for Meta-iteration "
                            + str(metaan + 1))
                        global seedxmeta
                        seedxmeta = arcpy.CreateRandomPoints_management(
                            workspace, "Random_meta_" + str(metaan + 1),
                            points_copy, "", int(numbermeta))
                        deletefeatures.append(seedxmeta)
                    else:
                        arcpy.AddMessage("Using input seed")
                        seedxmeta = seed_copy
                global iterlistmeta
                iterlistmeta = []
                for iter in range(iterations):
                    ###near and mean center#########################################################
                    if iter == 0:
                        arcpy.AddMessage("Performing iteration #" +
                                         str(iter + 1) +
                                         " for Meta-Iteration " +
                                         str(metaan + 1))
                        nearmeta = arcpy.Near_analysis(pointlayermeta,
                                                       seedxmeta, "", "", "",
                                                       near_method)
                        meanmeta = arcpy.MeanCenter_stats(
                            pointlayermeta,
                            pathmeta + "\\" + "meancenter_meta" +
                            str(metaan + 1) + "_iteration_" + str(iter + 1),
                            weightmeta, 'NEAR_FID', '')
                        arcpy.AddField_management(meanmeta, "Iteration",
                                                  'TEXT')
                        arcpy.AddField_management(meanmeta, "MetaIT", 'TEXT')
                        arcpy.CalculateField_management(
                            meanmeta, 'ITERATION', str(iter + 1))
                        arcpy.CalculateField_management(
                            meanmeta, 'METAIT', str(metaan + 1))
                        iterlistmeta.append(meanmeta)
                        metamasterlist.append(meanmeta)
                        table0meta = arcpy.Statistics_analysis(
                            pointlayermeta, "mean_meta_" + str(iter),
                            [['NEAR_DIST', 'Mean']])
                        with arcpy.da.SearchCursor(
                                table0meta, ["MEAN_NEAR_DIST"]) as cursor:
                            for row in cursor:
                                value0meta = row[0]
                        metalist = dictlist[0]
                        metalist.update({iter + 1: value0meta})
                        arcpy.Delete_management(table0meta)
                        deletefeatures.append(meanmeta)

                    elif iter >= 1:
                        arcpy.AddMessage("Performing iteration #" +
                                         str(iter + 1) +
                                         " for Meta-Iteration " +
                                         str(metaan + 1))
                        nearxmeta = arcpy.Near_analysis(
                            pointlayermeta, iterlistmeta[-1], "", "", "",
                            near_method)
                        meanxmeta = arcpy.MeanCenter_stats(
                            pointlayermeta,
                            pathmeta + "\\" + "meancenter_meta" +
                            str(metaan + 1) + "_iteration_" + str(iter + 1),
                            weightmeta, 'NEAR_FID', '')
                        arcpy.AddField_management(meanxmeta, "Iteration",
                                                  'TEXT')
                        arcpy.AddField_management(meanxmeta, "MetaIT", 'TEXT')
                        arcpy.CalculateField_management(
                            meanxmeta, 'ITERATION', str(iter + 1))
                        arcpy.CalculateField_management(
                            meanxmeta, 'METAIT', str(metaan + 1))
                        iterlistmeta.append(meanxmeta)
                        metamasterlist.append(meanxmeta)
                        table1meta = arcpy.Statistics_analysis(
                            pointlayermeta,
                            "mean_" + str(iter) + "_" + str(metaan + 1),
                            [['NEAR_DIST', 'Mean']])
                        with arcpy.da.SearchCursor(
                                table1meta, ["MEAN_NEAR_DIST"]) as cursor:
                            for row in cursor:
                                value1meta = row[0]
                        metalist = dictlist[metaan]
                        metalist.update({iter + 1: value1meta})
                        arcpy.Delete_management(table1meta)
                        deletefeatures.append(meanxmeta)

                    else:
                        arcpy.AddMessage("Error in first iteration")
예제 #19
0
    # --- Invert the raster
    arcpy.gp.RasterCalculator_sa('(("euclidean" - %s) * -1) + 0' %elevMAX, "euc_inv_input")

    print("Raster inverted")

    # --- Set nan values to 10000
    arcpy.gp.RasterCalculator_sa('Con(IsNull("euc_inv_input"),60000,"euc_inv_input")',"euc_inv")

    print("Nan values changed to 60000")

    # --- Get high point
    arcpy.gp.ZonalStatistics_sa("polyline", "FID", "raster_polyline", "max_elevation", "MAXIMUM", "DATA")
    arcpy.gp.RasterCalculator_sa('Con("raster_polyline" == "max_elevation","raster_polyline")', "max_value")
    arcpy.RasterToPoint_conversion(in_raster="max_value", out_point_features="max_p", raster_field="Value")
    arcpy.MeanCenter_stats(Input_Feature_Class="max_p", Output_Feature_Class="max_point")

    print("Get high point")

    # Get low point
    arcpy.gp.ZonalStatistics_sa("polyline", "FID", "raster_polyline", "min_elevation", "MINIMUM", "DATA")
    arcpy.gp.RasterCalculator_sa('Con("raster_polyline" == "min_elevation","raster_polyline")', "min_value")
    arcpy.RasterToPoint_conversion(in_raster="min_value", out_point_features="min_p", raster_field="Value")
    arcpy.MeanCenter_stats(Input_Feature_Class="min_p", Output_Feature_Class="min_point")

    print("Get low point")

    # --- Cost Distance
    arcpy.gp.CostDistance_sa("max_point", "euc_inv", "cost_distance", "", "cost_direction", "", "", "", "", "")

    print("Cost distance")