def execute(self, parameters, messages):
        """The source code of the tool."""
        arcpy.env.overwriteOutput = True

        in_nc = parameters[0].valueAsText
        start_datetime = parameters[1].valueAsText
        time_interval = parameters[2].valueAsText
        out_flat_table = parameters[3].valueAsText
        out_uniqueID_table = parameters[4].valueAsText

        # validate the netCDF dataset
        self.validateNC(in_nc, messages)

        # create flat table based on the netcdf data file
        self.createFlatTable(in_nc, out_flat_table)

        # add and calculate TimeValue field
        self.calculateTimeField(out_flat_table, start_datetime, time_interval)

        # add attribute indices for COMID and TimeValue
        arcpy.AddIndex_management(out_flat_table, self.fields_oi[1],
                                  self.fields_oi[1])
        arcpy.AddIndex_management(out_flat_table, self.fields_oi[3],
                                  self.fields_oi[3])

        # create unique ID table if user defined
        arcpy.AddMessage("unique ID table: {0}".format(out_uniqueID_table))
        if out_uniqueID_table is not None:
            self.createUniqueIDTable(in_nc, out_uniqueID_table)

        return
Exemple #2
0
    def execute(self, parameters, messages):
        """The source code of the tool."""
        arcpy.env.overwriteOutput = True

        in_data = parameters[0].value
        in_workspace_server = parameters[1].valueAsText

        for row in in_data:
            data = row[0]
            name = os.path.basename(str(data))
            if "Discharge_Table" in name:
                outTable = os.path.join(in_workspace_server, name)
                # Copy discharge table
                arcpy.CopyRows_management(data, outTable, '#')
                # Add attribute index to the discharge table
                arcpy.AddIndex_management(outTable, self.fields_oi[1],
                                          self.fields_oi[1])
                arcpy.AddIndex_management(outTable, self.fields_oi[3],
                                          self.fields_oi[3])
            elif "Flowline_" in name:
                outFlowline = os.path.join(in_workspace_server, name)
                # Copy flowline feature class
                arcpy.CopyFeatures_management(data, outFlowline)
                # Add attribute index to the flowline feature class
                arcpy.AddIndex_management(outFlowline, self.name_ID,
                                          self.name_ID, "UNIQUE", "ASCENDING")
            else:
                arcpy.AddMessage(
                    "{0} is not copied due to incorrect name".format(name))

        return
Exemple #3
0
def MakeUPNetWeightsTable(UPConfig, ts, lu):
    '''
    Create an empty table to hold weights data in the database. 
    Only call this if you want to create a new table. This
    function is not intended to overwite exising versions. 
    
    Called By:
    WriteNetWeightsByLu
    
    Calls:
  
    Arguments:
    UPConfig
    
    '''

    if not arcpy.Exists(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_net_weights')):
        Logger("Creating New up_net_weights table")
        arcpy.env.overwriteOutput = False
        arcpy.CreateTable_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname']), 'up_net_weights')
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_net_weights'),
            'timestep', 'TEXT', "", "", 8)
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_net_weights'), 'lu',
            'TEXT', "", "", 8)
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_net_weights'),
            UPConfig['BaseGeom_id'], 'LONG')
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_net_weights'),
            'weight', 'DOUBLE')
        arcpy.AddIndex_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_net_weights'),
            'timestep', 'timestep_nwt_idx')
        arcpy.AddIndex_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_net_weights'), 'lu',
            'lu_nwt_idx')
        arcpy.AddIndex_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_net_weights'),
            UPConfig['BaseGeom_id'],
            "_".join([UPConfig['BaseGeom_id'], 'nwt', 'idx']))
        Logger("Created New up_net_weights table")
    else:
        Logger("up_net_weights table already exists, skipping")
Exemple #4
0
def main():

    #### Set Up Arguments
    parser = argparse.ArgumentParser(description="slice mfp into sub-regions")

    parser.add_argument("mfp", help="master footprint gdb layer")
    parser.add_argument("--dryrun",
                        action="store_true",
                        default=False,
                        help="print actions without executing")

    #### Parse Arguments
    args = parser.parse_args()
    mfp = os.path.abspath(args.mfp)
    gdb, lyr = os.path.split(mfp)

    #### Validate Required Arguments
    if not arcpy.Exists(mfp):
        parser.error("mfp path is not a valid dataset")

    # Set up logging.
    lsh = logging.StreamHandler()
    lsh.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s %(levelname)s- %(message)s',
                                  '%m-%d-%Y %H:%M:%S')
    lsh.setFormatter(formatter)
    logger.addHandler(lsh)

    arcpy.env.workspace = gdb

    #### Derive subdatasets and P1BS versions
    # Datasets: source, dst, expression
    dss = [
        (lyr, lyr + "_arctic", """CENT_LAT >= 49"""),
        (lyr, lyr + "_antarctic", """CENT_LAT <= -50"""),
        (lyr, lyr + "_hma",
         """CENT_LAT >= 25 AND CENT_LAT <= 53 AND CENT_LONG >= 65 AND CENT_LONG <= 110"""
         ),
        (lyr, lyr + "_p1bs", """PROD_CODE = 'P1BS'"""),
        (lyr + "_arctic", lyr + "_arctic_p1bs", """PROD_CODE = 'P1BS'"""),
        (lyr + "_antarctic", lyr + "_antarctic_p1bs",
         """PROD_CODE = 'P1BS'"""),
        (lyr + "_hma", lyr + "_hma_p1bs", """PROD_CODE = 'P1BS'"""),
    ]

    for ds in dss:
        src, dst, exp = ds
        logger.info("Calculating {}".format(dst))
        arcpy.FeatureClassToFeatureClass_conversion(src, gdb, dst, exp)
        logger.info("Adding index to {}".format(dst))
        arcpy.AddIndex_management(dst, "CATALOG_ID", "catid")

    ## Add index to base table
    logger.info("Adding index to {}".format(lyr))
    arcpy.AddIndex_management(lyr, "CATALOG_ID", "catid")
Exemple #5
0
    def process(self):
        start_seconds = clock()

        workspace = arcpy.env.workspace
        arcpy.env.workspace = self.cadastre

        self.log.debug('removing index')
        try:
            arcpy.RemoveIndex_management(in_table='PLSSPoint_AGRC',
                                         index_name='webquery')
        except Exception as e:
            self.log.warn('error removing PLSS index: %s', e)

        self.log.debug('adding index')
        try:
            arcpy.AddIndex_management(in_table='PLSSPoint_AGRC',
                                      fields='POINTID',
                                      index_name='webquery')
        except Exception as e:
            self.log.warn('error adding parcel index: %s', e)

        arcpy.env.workspace = workspace

        self.log.debug('finished PLSS processing %s',
                       seat.format_time(clock() - start_seconds))
Exemple #6
0
def add_indexes(dataset, indexes_schema):
    for index_name, index_properties in indexes_schema:
        fields = index_properties['fields']
        unique = index_properties['unique']
        arcpy.AddIndex_management(in_table=dataset, fields=fields, index_name=index_name, unique=unique)

    return dataset
def create_index(fn, fields, index_name):
    if (isOptimizable(fn)):
        if index_name not in [index.name for index in arcpy.ListIndexes(fn)]:
            log('Creating index ' + index_name + ' in ' + fn)
            arcpy.AddIndex_management(fn, fields, index_name, 'UNIQUE',
                                      'ASCENDING')
    return
Exemple #8
0
def _add_indices(output_target, out_gdb, x):

    if output_target == OUTPUT_GDB:
        for i in x['indexes']:
            if i['fields'][0]['type'] not in ['OID', 'Geometry', 'GlobalID']:
                arcpy.AddIndex_management(
                    in_table=out_gdb + '/' + x['name'],
                    index_name=i['name'],
                    fields=[f['name'] for f in i['fields']],
                    unique='UNIQUE' if i['unique'] else 'NON_UNIQUE',
                    ascending='ASCENDING'
                    if i['ascending'] else 'NON_ASCENDING')

    elif output_target == OUTPUT_XML:

        return list(
            map(
                lambda i: """<Index xsi:type="esri:Index">
                    <Name>%(name)s</Name>
                    <IsUnique>%(isUnique)s</IsUnique>
                    <IsAscending>%(isAscending)s</IsAscending>
                    <Fields xsi:type="esri:Fields">
                        <FieldArray xsi:type="esri:ArrayOfField">
                            %(fields)s
                        </FieldArray>
                    </Fields>
                </Index>""" % {
                    'name': i['name'],
                    'isUnique': 'true' if i['unique'] else 'false',
                    'isAscending': 'true' if i['ascending'] else 'false',
                    'fields': "\n".join(_add_fields(output_target, None, i))
                }, x['indexes']))
Exemple #9
0
    def copyFlowlines(self, in_drainage_line, path_database, list_uniqueID):
        """Create copies of flowlines based on the layer query definitions"""
        # make a feature layer for query selection
        name_lyr = "flowlines"
        arcpy.MakeFeatureLayer_management(in_drainage_line, name_lyr)
        '''Create the query expression for line features with matching records in the flat table'''
        expression_base = self.name_ID + " IN ("
        count = len(list_uniqueID)
        counter = 1
        for each_ID in list_uniqueID:
            if counter == count:
                expression_base = expression_base + str(each_ID) + ")"
            else:
                expression_base = expression_base + str(each_ID) + ", "
            counter += 1

        for each_key in self.layer_minScale_maxScale_query.keys():
            out_copy = os.path.join(path_database, "Flowline_" + each_key)
            pars = self.layer_minScale_maxScale_query[each_key]
            query = pars[2]
            expression = expression_base
            if query is not None:
                expression = expression_base + "AND " + query

            arcpy.SelectLayerByAttribute_management(name_lyr, "NEW_SELECTION",
                                                    expression)
            arcpy.CopyFeatures_management(name_lyr, out_copy)
            arcpy.AddIndex_management(out_copy, self.name_ID, self.name_ID,
                                      "UNIQUE", "ASCENDING")

        return
Exemple #10
0
def main():
    # Prepare ArcGIS to work
    print datetime.datetime.now(), "Loading..."
    env.workspace = WUP_WORKSPACE

    # Make sure we have an index
    # NOTE Shapefiles don't support mutiple indicies, they are composites instead
    arcpy.AddIndex_management(WUP_SHAPEFILE, "PIN")

    # Open search cursors on the secondary files
    for key in WUP_COUNTIES:
        print datetime.datetime.now(), "Processing County: ", key
        values = WUP_COUNTIES.get(key)
        path = values[0]
        values = values[1:]
        if '' in values: values.remove('')

        # Iterate over the county plats, attempt to update the owners
        print datetime.datetime.now(
        ), "Feature Count: ", arcpy.GetCount_management(path)
        count = 0
        with arcpy.da.SearchCursor(path, values) as cursor:
            for row in cursor:
                updateOwner(row, key)
                count = count + 1
                if count % 250 == 0:
                    print datetime.datetime.now(), "Processed: ", count
        print datetime.datetime.now(), "Processed: ", count
Exemple #11
0
def makeTextID(field, table):
    ''' This function creates a copy of an existing field with the String format.
        
    ** Description: **
        
        Certain types of fields cause problems when performing joins, and Strings are generally the most reliable.
        This function creates a new field with string format of length 30 and copies all data from the problem field.
    
    **Arguments:**
    
        * *field* - input arcpy field object
        * *table* - name with full path of input table to be modified)
    
    **Returns:**
    
        * *textFieldName* - validated field name of added field.
        
    '''
    # Obtain valid fieldname
    textFieldName = arcpy.ValidateFieldName("txt" + field.name, table)
    # Test for Schema Lock
    if arcpy.TestSchemaLock(table):
        # Add the output text field
        arcpy.AddField_management(table, textFieldName, "TEXT", "#", "#", "30")
    else:
        arcpy.AddMessage(
            "Unable to acquire the necessary schema lock to add the new field")
    # Calculate the field values
    arcpy.CalculateField_management(table, textFieldName,
                                    '!' + field.name + '!', "PYTHON")
    # Since this field will be used in joins, index the field.
    arcpy.AddIndex_management(table, textFieldName, "idIDX", "UNIQUE")
    return textFieldName
Exemple #12
0
 def addindex(self,
              ifields,
              iname,
              unique="NON_UNIQUE",
              ascending="NON_ASCENDING"):
     arcpy.AddIndex_management(self.fname, ifields, iname, unique,
                               ascending)
Exemple #13
0
def MakeUPDistTable(UPConfig):
    '''
    Create an empty table to hold distance data in the database. 
    Only call this if you want to create a new table. This
    function is not intended to overwite exising versions. 
    
    Called By:
    
    Calls:
    
    Arguments:
    UPConfig
    
    '''

    if not arcpy.Exists(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_distances')):
        Logger("Creating New up_distances table")
        arcpy.env.overwriteOutput = False
        arcpy.CreateTable_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname']), 'up_distances')
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_distances'),
            'attracter', 'TEXT', "", "", 50)
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_distances'),
            UPConfig['BaseGeom_id'], 'LONG')
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_distances'),
            'distance', 'DOUBLE')
        arcpy.AddIndex_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_distances'),
            'attracter', 'attracter_idx')
        arcpy.AddIndex_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_distances'),
            UPConfig['BaseGeom_id'], "_".join([UPConfig['BaseGeom_id'],
                                               'idx']))
        Logger("Created New up_distances table")
    else:
        Logger("up_distances table already exists, skipping")
Exemple #14
0
def add_index(lyr, field_name):
    try:
        if not index_exists(lyr, field_name):
            arcpy.AddIndex_management(lyr, field_name, field_name, "UNIQUE",
                                      "ASCENDING")
    except Exception as error:
        print("Adding index {} not successful. {}".format(
            field_name, str(error)))
def convertAltStreets(Project_Folder):
    arcpy.env.overwriteOutput = True

    Model_Inputs_gdb = os.path.join(Project_Folder, 'Model_Inputs.gdb')
    Model_Outputs_gdb = os.path.join(Project_Folder, 'Model_Outputs.gdb')

    streets_simple = os.path.join(Model_Outputs_gdb, 'Streets_Simple')
    altstreets = os.path.join(Model_Inputs_gdb, 'AltStreets')

    arcpy.env.workspace = Model_Inputs_gdb

    # Simplify AltStreets and Streets Lines
    # removes some of the nodes that make up the lines to make the files low resolution enough to be uploaded through mapmaker
    altstreets_simple = arcpy.SimplifyLine_cartography(in_features=altstreets, out_feature_class=os.path.join(Model_Outputs_gdb, "AltStreet_simple"), algorithm="POINT_REMOVE",
                                                       tolerance="5 Feet", error_resolving_option="RESOLVE_ERRORS", collapsed_point_option="KEEP_COLLAPSED_POINTS", error_checking_option="CHECK", in_barriers=[])[0]

    # add ref_zlev and dom fields for alias classification and linking to streets file
    arcpy.AddFields_management(in_table=altstreets_simple, field_description=[
                               ["REF_ZLEV", "SHORT"], ["DOM", "LONG"]])
    print('added fields to altstreets')

    arcpy.AddIndex_management(altstreets_simple, fields=[
                              "LINK_ID"], index_name="LINK_ID", unique="NON_UNIQUE", ascending="ASCENDING")
    print('added altstreet index')

    arcpy.JoinField_management(in_data=altstreets_simple, in_field="LINK_ID",
                               join_table=streets_simple, join_field="LINK_ID", fields=["NUM_STNMES"])
    print('joined altstreets to streets')

    # Filter out all of the altstreet rows that do not have multiple names
    altstreets_filter = arcpy.FeatureClassToFeatureClass_conversion(
        in_features=altstreets_simple, out_path=Model_Outputs_gdb, out_name="AltStreets_Filter", where_clause="NUM_STNMES > 1")
    print('altstreets filtered if less than 2')

    # Create Statistics Table from AltStreets_Simple
    # add in the count of all the street names added to the altstreets simple
    altstreet_stats = os.path.join(Model_Outputs_gdb, "Altstreets_Stats")
    arcpy.Statistics_analysis(in_table=altstreets_filter, out_table=altstreet_stats, statistics_fields=[
                              ["LINK_ID", "FIRST"]], case_field=["LINK_ID", "ST_NAME"])

    # Join AltStreets_Simple with AltStreets_Stats
    arcpy.JoinField_management(in_data=altstreets_simple, in_field="LINK_ID",
                               join_table=altstreet_stats, join_field="LINK_ID", fields=["NUM_STNMES"])

    arcpy.CalculateField_management(in_table=altstreets_simple, field="Dom",
                                    expression="1", expression_type="PYTHON3", code_block="", field_type="TEXT")

    # Alias streetname identifier calculation (Alias == -9)
    # MapMaker REQUIRES it to be -9 in order to find it as an alias field
    arcpy.CalculateField_management(in_table=altstreets_simple, field="REF_ZLEV",
                                    expression="-9", expression_type="PYTHON3", code_block="", field_type="TEXT")

    # updated the schema to match mapmaker schema
    updateSchema(altstreets_simple)

    # returns altstreets_final gdb location
    return arcpy.FeatureClassToFeatureClass_conversion(in_features=altstreets_simple, out_path=Model_Outputs_gdb, out_name="AltStreets_Final")[0]
Exemple #16
0
def pandas_to_features(df,
                       fc,
                       pd_id_fld,
                       arc_id_fld,
                       out_fc,
                       keep_common=True):
    """
    Exports a pandas data frame and join it to an existing
    feature class or table. Intended for larger datasts.

    Parameters:
    ----------
    df: pandas.DataFrame
        Data frame to export.
    fc: str
        Full path to feature class or layer to join to.
    pd_id_fld: str
        Name of field in data frame to join on.
    arc_id_fld: str
        Name of field in feature class to join on.
    out_fc: str
        Full path to the output feature class.
    keep_common: bool, optional, default True
        If True, only joined features will be retained.
        IF False, all features will be retained.

    """

    with ScratchGdb() as scratch:

        with TempWork(scratch.path):

            temp_pd_name = '__pd_temp'
            temp_arc_name = '__polys_temp'

            # output the pandas table to a scratch workspace and add an attribute index
            pandas_to_arc(df, scratch.path, temp_pd_name, overwrite=True)
            arcpy.AddIndex_management(temp_pd_name, pd_id_fld, pd_id_fld)

            # do the join and export
            create_layer(temp_arc_name, fc)

            if keep_common:
                join_type = 'KEEP_COMMON'
            else:
                join_type = 'KEEP_ALL'

            arcpy.AddJoin_management(temp_arc_name, arc_id_fld, temp_pd_name,
                                     pd_id_fld, join_type)
            with TempQualifiedFields(False):
                arcpy.CopyFeatures_management(temp_arc_name, out_fc)

            # tidy up
            arcpy.Delete_management(temp_pd_name)
            arcpy.Delete_management(temp_arc_name)
Exemple #17
0
def add_stats(stats_vw, core_id, fld_pre, table_vw, join_col):
    """Add zonal and calculated statistics to stick table"""
    tmp_mea = fld_pre + "_tmp_mea"
    tmp_std = fld_pre + "_tmp_std"
    umin2std = fld_pre + "umin2std"

    # Add fields to stick table - has to be done before join
    arcpy.AddField_management(table_vw, tmp_mea, "Float", "", "", "", "",
                              "NULLABLE")
    arcpy.AddField_management(table_vw, tmp_std, "Float", "", "", "", "",
                              "NULLABLE")
    arcpy.AddField_management(table_vw, umin2std, "Float", "", "", "", "",
                              "NULLABLE")

    # Join distance table to zonal stats table
    arcpy.AddIndex_management(table_vw, FR_COL, "fridx", "NON_UNIQUE",
                              "ASCENDING")
    arcpy.AddIndex_management(table_vw, TO_COL, "toidx", "NON_UNIQUE",
                              "ASCENDING")
    arcpy.AddIndex_management(stats_vw, core_id, "coreidx", "UNIQUE",
                              "ASCENDING")
    arcpy.AddJoin_management(table_vw, join_col, stats_vw, core_id)

    tbl_name = arcpy.Describe(table_vw).baseName
    stats_tbl_nm = arcpy.Describe(stats_vw).baseName

    # Insert values into fields
    mean_value = "!" + stats_tbl_nm + ".MEAN" + "!"
    std_value = "!" + stats_tbl_nm + ".STD" + "!"
    mea_fld = "!" + tbl_name + "." + tmp_mea + "!"
    std_fld = "!" + tbl_name + "." + tmp_std + "!"

    arcpy.CalculateField_management(table_vw, tmp_mea, mean_value,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(table_vw, tmp_std, std_value, "PYTHON_9.3")
    expression = mea_fld + " - " + std_fld + " - " + std_fld
    arcpy.CalculateField_management(table_vw, umin2std, expression,
                                    "PYTHON_9.3")

    # Remove join
    arcpy.RemoveJoin_management(table_vw, stats_tbl_nm)
Exemple #18
0
    def index(self, column, unique=False):

        # https://pro.arcgis.com/en/pro-app/tool-reference/data-management/add-attribute-index.htm
        # unique indexes cant be specified for multiversioned tables

        logging.info('indexing column {0} on {1}'.format(column, self.name))

        # BUILDINGBINIX
        # BUILDING_HISTORICDOITT_IDIX = 27 careful friend
        return self.interpret(
            arcpy.AddIndex_management(
                self.featureclass, column,
                '{0}{1}{2}'.format(self.name, column, 'IX'), unique))
Exemple #19
0
def limit_cores(pair_tbl, stats_tbl):
    """Limit core pairs based upon climate threshold."""
    pair_vw = "dist_tbvw"
    stats_vw = "stats_tbvw"
    core_id = cc_env.core_fld.upper()

    lm_util.gprint("\nLIMITING CORE PAIRS BASED UPON CLIMATE " "THRESHOLD")

    arcpy.MakeTableView_management(pair_tbl, pair_vw)
    arcpy.MakeTableView_management(stats_tbl, stats_vw)

    # Add basic stats to distance table
    lm_util.gprint("Joining zonal statistics to pairings table")
    arcpy.AddIndex_management(pair_vw, FR_COL, "fridx")
    arcpy.AddIndex_management(pair_vw, TO_COL, "toidx")
    arcpy.AddIndex_management(stats_vw, core_id, "coreidx")
    add_stats(stats_vw, core_id, "fr", pair_vw, TO_COL)
    add_stats(stats_vw, core_id, "to", pair_vw, FR_COL)

    # Calculate difference of 2 std
    lm_util.gprint("Calculating difference of 2 std")
    diffu_2std = "diffu_2std"
    arcpy.AddField_management(pair_vw, diffu_2std, "Float", "", "", "", "",
                              "NULLABLE")
    arcpy.CalculateField_management(pair_vw, diffu_2std,
                                    "abs(!frumin2std! - !toumin2std!)",
                                    "PYTHON_9.3")

    # Filter distance table based on inputed threshold and delete rows
    lm_util.gprint("Filtering table based on threshold")
    diffu2std_fld = arcpy.AddFieldDelimiters(pair_vw, diffu_2std)
    expression = diffu2std_fld + " <= " + str(cc_env.climate_threshold)
    arcpy.SelectLayerByAttribute_management(pair_vw, "NEW_SELECTION",
                                            expression)
    rows_del = int(arcpy.GetCount_management(pair_vw).getOutput(0))
    if rows_del > 0:
        arcpy.DeleteRows_management(pair_vw)
    lm_util.gprint(str(rows_del) + " rows deleted")
Exemple #20
0
def createIndex(targetpath,indexFields = []):
    """创建空间索引和属性索引,创建属性索引之前判断是否存在该索引"""

    arcpy.AddSpatialIndex_management(targetpath)

    indexlist = [str(index.name.lower()) for index in arcpy.ListIndexes(targetpath)]

    for field in indexFields:
        
        if field not in indexlist:

                try:
                        arcpy.AddIndex_management(targetpath,field,field)
                except arcpy.ExecuteError:
                        arcpy.GetMessages() 
Exemple #21
0
def efficient_merge(feature_class_list, output_fc, filter=''):
    fc_count = len(feature_class_list)
    all_exist_test = all(arcpy.Exists(fc) for fc in feature_class_list)

    # EXECUTE
    # Start with FC containing largest extent to prevent spatial grid errors
    descriptions = [arcpy.Describe(fc).extent for fc in feature_class_list]
    fc_areas = [
        int(d.XMax - d.XMin) * int(d.YMax - d.YMin) for d in descriptions
    ]
    index = [i for i, x in enumerate(fc_areas) if x == max(fc_areas)]
    first_fc = feature_class_list[index[0]]
    indexes = arcpy.ListIndexes(first_fc)
    feature_class_list.remove(first_fc)

    # This is a fast and stable merge method for this number of features compared to arcpy Merge
    if all_exist_test:
        print(
            "Beginning merge of {} feature classes, copying first feature class to output..."
            .format(fc_count))
        arcpy.Select_analysis(first_fc, output_fc, filter)
        arcpy.SetLogHistory = False  # speeds up iterative updates, won't write to geoprocessing for every step
        insertRows = arcpy.da.InsertCursor(output_fc, ["SHAPE@", "*"])

        for fc in feature_class_list:
            searchRows = arcpy.da.SearchCursor(fc, ["SHAPE@", "*"], filter)
            counter = 0
            for searchRow in searchRows:
                insertRows.insertRow(searchRow)
                counter += 1
            del searchRow, searchRows
            print("Merged {0} features from {1}".format(counter, fc))
        del insertRows
        arcpy.SetLogHistory = True

        # Rebuild indexes
        try:
            arcpy.AddIndex_management(output_fc, 'Permanent_Identifier',
                                      'IDX_Permanent_Identifier')
        except:
            arcpy.AddWarning(
                'Could not build Permanent_Identifier index because there is no such field.'
            )

    else:
        print(
            "ERROR: One or more feature class paths is not valid. Merged feature class not created."
        )
Exemple #22
0
def pandas_to_features(df, fc, pd_id_fld, arc_id_fld, out_fc):
    """
    Exports a pandas data frame and join it to an existing
    feature class or table. Intended for larger datasts.

    Parameters:
    ----------
    df: pandas.DataFrame
        Data frame to export.
    fc: str
        Full path to feature class or layer to join to.
    pd_id_fld: str
        Name of field in data frame to join on.
    arc_id_fld: str
        Name of field in feature class to join on.
    out_fc: str
        Full path to the output feature class.

    """

    # output the pandas table to a scratch workspace and add an attribute index
    scratch = arcpy.env.scratchGDB

    with TempWork(scratch):

        temp_pd_name = '__pd_temp'
        temp_arc_name = '__polys_temp'

        # output the pandas table to a scratch workspace and add an attribute index
        pandas_to_arc(df, scratch, temp_pd_name, overwrite=True)
        arcpy.AddIndex_management(temp_pd_name, pd_id_fld, pd_id_fld)

        # do the join and export
        create_layer(temp_arc_name, fc)

        arcpy.AddJoin_management(
            temp_arc_name,
            arc_id_fld,
            temp_pd_name,
            pd_id_fld,
            'KEEP_COMMON'  # do we want to make this an input argument?
        )
        with TempQualifiedFields(False):
            arcpy.CopyFeatures_management(temp_arc_name, out_fc)

        # tidy up
        arcpy.Delete_management(temp_pd_name)
        arcpy.Delete_management(temp_arc_name)
 def export_reduced_featureclass(input_path, output_name):
     print("Exporting Building Footprint - BIN only feature class to SDE PROD")
     if arcpy.Exists(input_path):
         print("Adding requisite fields to output feature class")
         fms = arcpy.FieldMappings()
         fm = arcpy.FieldMap()
         fm.addInputField(input_path, "BIN")
         fms.addFieldMap(fm)
         print("Requisite fields added to output feature class")
         print("Exporting reduced NYC Building Footprint Polygon feature class on SDE PROD")
         arcpy.FeatureClassToFeatureClass_conversion(input_path, sde_path, output_name, field_mapping=fms)
         print("Adding Index to BIN field")
         arcpy.AddIndex_management(input_path, ['BIN'], 'BIN_Index')
         print("Reduced NYC Building Footprint Polygon feature class exported to SDE PROD")
         arcpy.MetadataImporter_conversion(os.path.join(sde_path, 'NYC_Building_Footprints_Poly'),
                                           os.path.join(sde_path, output_name))
def run_sed(para, paths):
    """
    Script to calculate the Sediment Trapping Index (SED)

    :param para: input parameters and path names for executing the script
    :param paths: output pathnames
    :return:
    """
    streams_fc = para["streams_fc"]
    dams_fc = para["dams_fc"]
    svol_field = para["svol_field"]
    barrier_inc_field = para["barrier_inc_field"]
    lakes_fc = para["lakes_fc"]
    sed_field = para["sed_field"]
    out_gdb = paths["gdb_full_path"]

    streams = load_streams(streams_fc)
    streams, convert_dict = update_stream_routing_index(streams)

    barriers = load_barriers(dams_fc, convert_dict, svol_field,
                             barrier_inc_field)
    dam_volu_dict = barriers_calculate(barriers, svol_field)

    lakes = load_lakes(lakes_fc, convert_dict)

    small_lake_loss_dict, lake_volu_dict = indices.sed.lakes_calculate(lakes)

    streams = indices.sed.calculate_sed(streams, dam_volu_dict, lake_volu_dict,
                                        small_lake_loss_dict)

    prt("Exporting results sediment table")

    outtbl = export_results_table(streams, out_gdb)

    # Adding indices helps with joining tables to geometry
    arcpy.AddIndex_management(outtbl, fd.GOID, fd.GOID, "UNIQUE", "ASCENDING")

    # Update original database
    if para["update_mode"] == "YES":
        print("Updating SED values in database {} ".format(streams_fc))
        try:
            helper.copy_between(streams_fc, fd.GOID, sed_field, outtbl,
                                fd.GOID, fd.SED, "overwrite", 0)
        except Exception as e:
            print(str(e))
            sys.exit(0)
Exemple #25
0
def BusCount(city_e,input_link,input_link_compare):

    #添加属性索引        
    arcpy.AddIndex_management(input_link, "LINK_PID", "", "NON_UNIQUE", "NON_ASCENDING")

    # Process: 添加连接
    input_link_lyr = arcpy.MakeFeatureLayer_management(input_link, input_link.replace(".shp",".lyr"))
    input_link_compare_lyr = arcpy.MakeFeatureLayer_management(input_link_compare, input_link_compare.replace(".shp",".lyr"))
    arcpy.AddJoin_management(input_link_lyr, "LINK_PID", input_link_compare_lyr, "LINKID", "KEEP_ALL")

    # Process: 按属性选择图层
    arcpy.SelectLayerByAttribute_management(input_link_lyr, "NEW_SELECTION", "\"LINK_COMPARE.TODO\" <> ' ' ")

    # Process: 获取计数
    count = str(arcpy.GetCount_management(input_link_lyr))  #str改成int后count全部为null,why?

    return count
Exemple #26
0
def pandas_to_features(df, fc, pd_id_fld, arc_id_fld, out_fc):
    """
    Exports a pandas data frame and join it to an existing
    feature class or table. Intended for larger datasts.

    Parameters:
    ----------
    df: pandas.DataFrame
        Data frame to export.
    fc: str
        Full path to feature class or layer to join to.
    pd_id_fld: str
        Name of field in data frame to join on.
    arc_id_fld: str
        Name of field in feature class to join on.
    out_fc: str
        Full path to the output feature class.

    """

    # output the pandas table to a scratch workspace and add an attribute index
    temp_out = '{}//{}'.format(arcpy.env.scratchGDB, '___pandas_out')
    pandas_to_arc(df,
                  os.path.dirname(temp_out),
                  os.path.basename(temp_out),
                  overwrite=True)
    arcpy.AddIndex_management(temp_out, pd_id_fld, pd_id_fld)

    # do the join and export
    create_layer('__temp_polys', fc)
    arcpy.AddJoin_management(
        '__temp_polys',
        arc_id_fld,
        temp_out,
        pd_id_fld,
        'KEEP_COMMON'  # do we want to make this an input argument?
    )
    with TempQualifiedFields(False):
        arcpy.CopyFeatures_management('__temp_polys', out_fc)

    # tidy up
    arcpy.Delete_management(temp_out)
    arcpy.Delete_management('__temp_polys')
    arcpy.Delete_management('in_memory//__temp_export')
Exemple #27
0
def addAndCalcField(dataset_path,
                    field_type,
                    field_name,
                    field_alias="",
                    field_length="",
                    field_value=None,
                    code_block="",
                    add_index=False,
                    debug=False):
    if debug:
        arcpy.AddMessage(
            "Adding {} field '{}({})' and setting value to '{}'".format(
                field_type, field_name, field_length, field_value))

    arcpy.AddField_management(dataset_path,
                              field_name,
                              field_type,
                              field_precision="",
                              field_scale="",
                              field_length=field_length,
                              field_alias=field_alias,
                              field_is_nullable="NULLABLE",
                              field_is_required="NON_REQUIRED",
                              field_domain="")
    if debug:
        addToolMessages()

    if add_index:
        arcpy.AddIndex_management(dataset_path,
                                  fields=field_name,
                                  index_name=field_name,
                                  unique="NON_UNIQUE",
                                  ascending="ASCENDING")
        if debug:
            addToolMessages()

    if field_value is not None:
        arcpy.CalculateField_management(in_table=dataset_path,
                                        field=field_name,
                                        expression=field_value,
                                        expression_type="PYTHON_9.3",
                                        code_block=code_block)
        if debug:
            addToolMessages()
def RecreateIndexes(CenterlineSequence, CalibrationPoint, Route, RouteIDs):
    tableArray = [[
        CenterlineSequence, ['ROUTEID', 'NETWORKID', 'FROMDATE', 'TODATE']
    ]]
    tableArray += [[
        CalibrationPoint,
        ['ROUTEID', 'NETWORKID', 'FROMDATE', 'TODATE', 'MEASURE']
    ]]
    tableArray += [[Route, ['FROMDATE', 'TODATE'] + RouteIDs]]
    for indexGroup in tableArray:
        for fieldName in indexGroup[1]:
            tableName = os.path.basename(indexGroup[0])
            indexName = 'IX_%s' % (fieldName)
            if len(arcpy.ListIndexes(indexGroup[0], indexName)) == 1:
                arcpy.RemoveIndex_management(indexGroup[0], indexName)
            try:
                arcpy.AddIndex_management(indexGroup[0], fieldName, indexName)
                arcpy.AddMessage('Created index %s on field %s in table %s' %
                                 (indexName, fieldName, tableName))
            except:
                arcpy.AddWarning(
                    'Unable to create index %s on field %s in table %s' %
                    (indexName, fieldName, tableName))
Exemple #29
0
def CalcGPInt(TimeStep, UPConfig):
    '''
    Create a feature class with the general plan category and the polygon id for the specified timestep
    
    Called By:
    CalcGP
    
    
    Arguments:
    Timestep: which time step is being processed
    UPconfig: the primary settings configuration object
    
    '''

    #TODO: Convert to multiprocess

    Logger("Intersecting General Plan")
    arcpy.SpatialJoin_analysis(UPConfig['BaseGeom_cent'],
                               UPConfig[TimeStep[0]]['gp'][0],
                               'up_bg_gp_{ts}'.format(ts=TimeStep[0]))

    #delete any datetime fields - creating an array later will fail if not
    DateFields = arcpy.ListFields('up_bg_gp_{ts}'.format(ts=TimeStep[0]), '*',
                                  'Date')
    if len(DateFields) != 0:
        DeleteFields = []
        for DateField in DateFields:
            DeleteFields.append(DateField.name)
        arcpy.DeleteField_management('up_bg_gp_{ts}'.format(ts=TimeStep[0]),
                                     DeleteFields)

    arcpy.AddIndex_management('up_bg_gp_{ts}'.format(ts=TimeStep[0]),
                              UPConfig['BaseGeom_id'],
                              'idx_bg_gp_pclid_{ts}'.format(ts=TimeStep[0]),
                              'UNIQUE', 'ASCENDING')

    Logger("General Plan Intersected")
Exemple #30
0
        levRowCol = currentURL.split("/")
        theList = levRowCol[-3:]
        row.LevelID = theList[0]
        row.RowID = theList[1]
        row.ColumnID = theList[2]
        row.TileID = theList[0] + theList[1] + theList[2]
        rows.updateRow(row)
    del row
    del rows

    currentURL = ""

    arcpy.Frequency_analysis(TilingScheme_gdb + '/' + tileHits,
                             TilingScheme_gdb + '/' + Frequency,
                             FrequencyField, "")
    arcpy.AddIndex_management(TilingScheme_gdb + '/' + Frequency, "TileID",
                              "TileHit_Index", "UNIQUE", "NON_ASCENDING")
    arcpy.AddMessage("Done!")

except:
    # Get the traceback object
    #
    tb = sys.exc_info()[2]
    tbinfo = traceback.format_tb(tb)[0]
    # Concatenate information together concerning the error into a
    #   message string
    #
    pymsg = tbinfo + "\n" + str(sys.exc_type) + ": " + str(sys.exc_value)
    # Return python error messages for use with a script tool
    #
    arcpy.AddError(pymsg)
    # Print Python error messages for use in Python/PythonWin