示例#1
0
 def create(self, template=None):
     retval = arcpy.CreateTable_management(self.gname, self.tname, template)
     DisplayMessages()
     return retval
示例#2
0
def MakeUPDisaggWeightsTable(UPConfig, ts, lu):
    '''
    Create an empty table to hold distance data in the database. 
    Only call this if you want to create a new table. This
    function is not intended to overwite exising versions. 
    
    Called By:
    WriteDisaggWeightsByLu
    
    Calls:
    
    Arguments:
    UPConfig
    
    '''

    if not arcpy.Exists(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights')):
        Logger("Creating New up_disagg_weights table")
        arcpy.env.overwriteOutput = False
        arcpy.CreateTable_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname']), 'up_disagg_weights')
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights'),
            'timestep', 'TEXT', "", "", 8)
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights'),
            'lu', 'TEXT', "", "", 8)
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights'),
            'attracter', 'TEXT', "", "", 50)
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights'),
            UPConfig['BaseGeom_id'], 'LONG')
        arcpy.AddField_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights'),
            'weight', 'DOUBLE')
        arcpy.AddIndex_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights'),
            'timestep', 'timestep_wt_idx')
        arcpy.AddIndex_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights'),
            'lu', 'lu_wt_idx')
        arcpy.AddIndex_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights'),
            'attracter', 'attracter_wt_idx')
        arcpy.AddIndex_management(
            os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'], 'up_disagg_weights'),
            UPConfig['BaseGeom_id'],
            "_".join([UPConfig['BaseGeom_id'], 'wt', 'idx']))
        Logger("Created New up_disagg_weights table")
    else:
        Logger("up_disagg_weights table already exists, skipping")
示例#3
0
        pripct.append(1.0 - (np.sum(parceltable['pri_scr'] > parceltable['pri_scr'][j])/float(nparcels)))
    
    new_table = rec_append_fields(parceltable, 'pri_pct', data = pripct, dtypes = '<f8')
    new_table = new_table[new_table['pri_pct'].argsort()]

    ranktable = AutoName(muni + '_rnkst')
    out_table = os.path.join(workspace, os.path.basename(ranktable))
    arcpy.da.NumPyArrayToTable(new_table, out_table)
    muniparcelnames.append(out_table)
    arcpy.Delete_management(muniname)
    arcpy.AddMessage("Finished scoring " + muni + " parcels")
    
    
## Create an empty feature class with the desired schema
outfile = AutoName('Parcels_' + theme)
arcpy.CreateTable_management(workspace, outfile, muniparcelnames[0])

# Append all municipal files onto empty feature class with appropriate schema
arcpy.AddMessage("Re-merging municipalities")
arcpy.Append_management(muniparcelnames, outfile, schema_type = "TEST")

dropfields = ["TN_pctile_scr", "TP_pctile_scr", "TSS_pctile_scr", "aulsite_scr",
              "hsgtype_scr", "OBJECTID_1", "Shape_1", "Shape_2", "LU_type",
              "Code_3_12", "Code_1_2", "Code_Parcel_Database", 
              "Desc_Parcel_Database", "Desc_full"]
arcpy.DeleteField_management(outfile, dropfields)

for k in range(len(muniparcelnames)):
    arcpy.Delete_management(muniparcelnames[k])

#Create table
gdb_and_table = os.path.join(gdb, tablename)

if arcpy.Exists(gdb):
    arcpy.AddMessage(gdb + " already exists.")
else:
    arcpy.AddMessage("Creating " + gdb)
    arcpy.CreateFileGDB_management(
        os.path.split(gdb)[0], os.path.basename(gdb))

if arcpy.Exists(os.path.join(gdb, tablename)):
    arcpy.AddMessage('Table Exists')
else:
    arcpy.AddMessage('Creating Table.')
    arcpy.CreateTable_management(gdb, tablename)

    ## Add fields to table
    arcpy.AddField_management(gdb_and_table, fields[0], 'TEXT', "", "", 48)
    arcpy.AddField_management(gdb_and_table, fields[1], "LONG", "", "", "")

## Insert data into table
c = arcpy.da.InsertCursor(gdb_and_table, fields)
for row in json_rows:
    arcpy.AddMessage(row)
    c.insertRow(row)
del c

## Join Table to Existing feature class
country_join_field = 'NAME'
arcpy.CopyFeatures_management(country_fc, new_country_fc)
示例#5
0
def to_table(geo, location, overwrite=True):
    """
    Exports a geo enabled dataframe to a table.

    ===========================     ====================================================================
    **Argument**                    **Description**
    ---------------------------     --------------------------------------------------------------------
    location                        Required string. The output of the table.
    ---------------------------     --------------------------------------------------------------------
    overwrite                       Optional Boolean.  If True and if the table exists, it will be
                                    deleted and overwritten.  This is default.  If False, the table and
                                    the table exists, and exception will be raised.
    ===========================     ====================================================================

    :returns: String
    """
    out_location= os.path.dirname(location)
    fc_name = os.path.basename(location)
    df = geo._data
    if location.lower().find('.csv') > -1:
        geo._df.to_csv(location)
        return location
    elif HASARCPY:
        columns = df.columns.tolist()
        join_dummy = "AEIOUYAJC81Z"
        try:
            columns.pop(columns.index(df.spatial.name))
        except:
            pass
        dtypes = [(join_dummy, np.int64)]
        if overwrite and arcpy.Exists(location):
            arcpy.Delete_management(location)
        elif overwrite == False and arcpy.Exists(location):
            raise ValueError(('overwrite set to False, Cannot '
                              'overwrite the table. '))
        fc = arcpy.CreateTable_management(out_path=out_location,
                                          out_name=fc_name)[0]
        # 2. Add the Fields and Data Types
        #
        oidfld = da.Describe(fc)['OIDFieldName']
        for col in columns[:]:
            if col.lower() in ['fid', 'oid', 'objectid']:
                dtypes.append((col, np.int32))
            elif df[col].dtype.name == 'datetime64[ns]':
                dtypes.append((col, '<M8[us]'))
            elif df[col].dtype.name == 'object':
                try:
                    u = type(df[col][df[col].first_valid_index()])
                except:
                    u = pd.unique(df[col].apply(type)).tolist()[0]
                if issubclass(u, str):
                    mlen = df[col].str.len().max()
                    dtypes.append((col, '<U%s' % int(mlen)))
                else:
                    try:
                        dtypes.append((col, type(df[col][0])))
                    except:
                        dtypes.append((col, '<U254'))
            elif df[col].dtype.name == 'int64':
                dtypes.append((col, np.int64))
            elif df[col].dtype.name == 'bool':
                dtypes.append((col, np.int32))
            else:
                dtypes.append((col, df[col].dtype.type))

        array = np.array([],
                        np.dtype(dtypes))
        arcpy.da.ExtendTable(fc,
                             oidfld, array,
                             join_dummy, append_only=False)
        # 3. Insert the Data
        #
        fields = arcpy.ListFields(fc)
        icols = [fld.name for fld in fields \
                 if fld.type not in ['OID', 'Geometry'] and \
                 fld.name in df.columns]
        dfcols = [fld.name for fld in fields \
                  if fld.type not in ['OID', 'Geometry'] and\
                  fld.name in df.columns]
        with da.InsertCursor(fc, icols) as irows:
            for idx, row in df[dfcols].iterrows():
                try:
                    irows.insertRow(row.tolist())
                except:
                    print("row %s could not be inserted." % idx)
        return fc

    return
示例#6
0
def geoenrich(directory, featureset, gis_env_config, csv_uri):
    """Intakes a tax parcel shapefile, modifies fields, reprojects, then joins to BS&A table data. The final layer is
    copied to a geodatabase feature class."""

    # Create temporary geodatabase and set path variables
    print('Creating temporary geodatabase...')
    gdb_name = 'temp.gdb'
    arcpy.CreateFileGDB_management(directory, gdb_name)
    gdb_path = os.path.join(directory, gdb_name)

    # GIS environment settings
    arcpy.env.workspace = gdb_path
    arcpy.env.qualifiedFieldNames = False
    arcpy.env.overwriteOutput = gis_env_config['overwrite_output']

    # FeatureSet to FeatureClass (ArcGIS API for Python)
    print('Saving feature set to temporary geodatabase feature class...')
    featureset.save(gdb_path, 'fc_orig_parcel')

    # Correct field names (feature class)
    print('Correcting field names...')
    for field_prop in field_lst_fc:
        arcpy.AlterField_management('fc_orig_parcel', field_prop[0],
                                    field_prop[1], field_prop[2])

    # Delete unnecessary fields (feature class)
    print('Deleting unnecessary fields...')
    arcpy.DeleteField_management('fc_orig_parcel', dropFields_fc)

    # Modify projection if necessary
    print('Assessing coordinate system...')
    in_spatial_ref = arcpy.Describe('fc_orig_parcel').spatialReference
    out_spatial_ref = arcpy.SpatialReference(gis_env_config['out_fc_proj'])
    print(f'Current Spatial Reference: {in_spatial_ref.name}')
    print(f'Output Spatial Reference: {out_spatial_ref.name}')
    if in_spatial_ref.name == 'Unknown':
        change_proj = False
        print(
            'Could not change projection due to undefined input coordinate system'
        )
    elif in_spatial_ref.name == out_spatial_ref.name:
        change_proj = False
        print('Input and output coordinate systems are the same')
    else:
        change_proj = True
        print('Modifying output coordinate system...')

    # Output final_lyr to enterprise geodatabase feature class
    print('Copying features to temporary geodatabase feature class...')
    if change_proj:
        print('Changing projection, making feature layer...')
        arcpy.Project_management('fc_orig_parcel', 'fc_proj_parcel',
                                 out_spatial_ref)
        arcpy.MakeFeatureLayer_management('fc_proj_parcel', 'parcel_lyr')
    else:
        print('Making feature layer...')
        arcpy.MakeFeatureLayer_management('fc_orig_parcel', 'parcel_lyr')

    # Convert CSV to GDB table
    print('Finding table...')
    arcpy.TableToTable_conversion(csv_uri, gdb_path, 'bsa_export')

    # Create empty table to load bsa_export data
    arcpy.CreateTable_management(gdb_path, 'join_table')

    # Add fields from field_lst_tbl
    for field in field_lst_tbl:
        if field[1] == 'TEXT':
            arcpy.AddField_management('join_table',
                                      field[0],
                                      field[1],
                                      field_alias=field[2],
                                      field_length=field[3])
        else:
            arcpy.AddField_management('join_table',
                                      field[0],
                                      field[1],
                                      field_alias=field[2])

    # Create FieldMappings object to manage merge output fields
    field_mappings = arcpy.FieldMappings()

    # Add the target table to the field mappings class to set the schema
    field_mappings.addTable('join_table')

    # Map fields from bsa_export table
    for field in field_lst_tbl:
        if field[4] is not None:
            fld_map = arcpy.FieldMap()
            fld_map.addInputField('bsa_export', field[4])
            # Set name of new output field
            field_name = fld_map.outputField
            field_name.name, field_name.type, field_name.aliasName = field[
                0], field[1], field[2]
            fld_map.outputField = field_name
            # Add output field to field mappings object
            field_mappings.addFieldMap(fld_map)

    # Append the bsa_export data into the join_table
    arcpy.Append_management('bsa_export',
                            'join_table',
                            schema_type='NO_TEST',
                            field_mapping=field_mappings)

    # Create expressions for field calculations
    pin_exp = 'format_pin(!PNUM!, !RELATEDPNUM!)'
    bsa_url_exp = 'format_bsaurl(!PNUM!)'
    data_export_exp = 'datetime.now()'
    acres_recorded_exp = 'find_acres_recorded(!LEGALDESC!)'

    # Calculate fields
    print('Calculating fields...')
    arcpy.CalculateField_management('join_table', 'PIN', pin_exp, 'PYTHON3')
    arcpy.CalculateField_management('join_table', 'BSAURL', bsa_url_exp,
                                    'PYTHON3')
    arcpy.CalculateField_management('join_table', 'DATAEXPORT',
                                    data_export_exp, 'PYTHON3')
    arcpy.CalculateField_management('join_table', 'ACRESRECORDED',
                                    acres_recorded_exp, 'PYTHON3')

    # Join parcel_lyr to bsa_export table
    print('Joining table to parcel layer...')
    arcpy.AddJoin_management('parcel_lyr', 'PIN', 'join_table', 'PIN')
    arcpy.CopyFeatures_management('parcel_lyr', 'fc_join_parcel')

    # Calculate Acres field
    if gis_env_config[
            'out_fc_proj'] == 'NAD 1983 StatePlane Michigan South FIPS 2113 (Intl Feet)':
        print('Calculating acres...')
        arcpy.CalculateGeometryAttributes_management('fc_join_parcel',
                                                     [['acres', 'AREA']],
                                                     area_unit='ACRES')

    # Reorder fields
    print('Reordering fields...')
    reorder_fields('fc_join_parcel', 'fc_ordered_parcel', final_field_order)
    arcpy.DeleteField_management('fc_ordered_parcel', dropFields_fc_final)
    lyr_final_parcel = 'lyr_final_parcel'
    arcpy.MakeFeatureLayer_management('fc_ordered_parcel', lyr_final_parcel)

    return gdb_path, lyr_final_parcel
示例#7
0
    def execute(self, parameters, messages):
        """The source code of the tool."""

        arcpy.env.workspace = parameters[0].value

        # Number of low IDs per hi ID
        # Higher batch sizes mean less updating of the table, lower batch sizes more
        # efficient ID usage especially when multiple processes access the table.
        hi_batchsize = parameters[2].value

        # Name of the table used to maintain hi/lo counter status per feature class. Could also become a parameter.
        generate_ID_table_name = "GenerateID"

        # check whether sequences table has already been created.
        new_table = None
        counter_tbl_list = arcpy.ListTables(generate_ID_table_name)
        if not counter_tbl_list:
            arcpy.AddMessage("Creating new GenerateID table.")
            new_table = True
            generate_ID_table = arcpy.CreateTable_management(arcpy.env.workspace, generate_ID_table_name)
            arcpy.AddField_management(generate_ID_table, "name", "TEXT", None, None, 50, "Feature Class Name", "NON_NULLABLE", "REQUIRED")
            arcpy.AddField_management(generate_ID_table, "hi", "LONG", None, None, None, "Hi counter", "NON_NULLABLE", "REQUIRED")
            arcpy.AddField_management(generate_ID_table, "low", "LONG", None, None, None, "Low counter", "NON_NULLABLE", "REQUIRED")
        else:
            new_table = False
            generate_ID_table = counter_tbl_list[0]

        # go through feature classes to create FIDs where needed.
        fc_list = arcpy.ListFeatureClasses()
        for fc in fc_list:
            arcpy.AddMessage("Processing " + fc)
            hi_counter = 0
            low_counter = 0

            # if we only created the GenerateID table, we know we have to insert the counter.
            if new_table:
                insert_new_counter_cursor = arcpy.da.InsertCursor(generate_ID_table_name, ["name", "hi", "low"])
                insert_new_counter_cursor.insertRow((fc, 0, 0))
                del insert_new_counter_cursor

            # check if a counter of fc_name exists and retrieve value
            with arcpy.da.SearchCursor(generate_ID_table_name, ["name", "hi", "low"]) as rows:
                for row in rows:
                    if row[0] == fc:
                        hi_counter = row[1]
                        low_counter = row[2]
                        break

            # increment hi counter to indicate that it is in active usage
            with arcpy.da.UpdateCursor(generate_ID_table_name, ["name", "hi"]) as rows:
                for row in rows:
                    if row[0] == fc:
                        row[1] = 1 + hi_counter
                        rows.updateRow(row)
                        break

            # check if feature class alread has a FID, add it if not.
            fid_name = fc + "FID"
            fields_list = arcpy.ListFields(fc, fid_name)
            if not fields_list:
                arcpy.AddField_management(fc, fid_name, "TEXT", None, None, 50, "Feature ID", None, None)

            # modify FID of object if required
            with arcpy.da.UpdateCursor(fc, [fid_name]) as rows:
                for row in rows:
                    if row[0] == None:
                        if low_counter >= hi_batchsize:
                            # update hi_counter, reset low_counter
                            arcpy.AddMessage("Hi Sequence " + str(hi_counter) + " exhausted, using next Sequence.")
                            escaped_name = arcpy.AddFieldDelimiters(generate_ID_table_name, "name")
                            where_clause = escaped_name + " = " + "'" + fc + "'"
                            new_hi_row = [row[0] for rows in arcpy.da.SearchCursor(generate_ID_table_name, ["hi"], where_clause)]
                            hi_counter = new_hi_row[0]
                            low_counter = 0
                        row[0] = fc + "/" + str(hi_counter * hi_batchsize + low_counter)
                        low_counter += 1
                        rows.updateRow(row)

            # write back the new low value to the GenerateID table.
            with arcpy.da.UpdateCursor(generate_ID_table_name, ["name", "low"]) as rows:
                for newRow in rows:
                    if newRow[0] == fc:
                        newRow[1] = low_counter
                        rows.updateRow(newRow)
                        break

        arcpy.AddMessage("Completed adding of Feature IDs.")
        return
示例#8
0
    fieldobjs.append(r.orgdoc)
    fieldobjs.append(r.id + str(r.timest))
    fieldobjs.append(r.linkdetails)
    fieldobjs.append((r.long, r.lat))

    iCur.insertRow(fieldobjs)

del iCur

# ----------------------------------
#   Build non-geo entities table
# ----------------------------------
nongeotablename = "netowl_entities"

if arcpy.Exists(nongeotablename) is False:
    arcpy.CreateTable_management(wk, nongeotablename,
                                 "netowl_template_nogeo")  # noqa: E501

iCur_links = arcpy.da.InsertCursor(
    nongeotablename,
    ["RDFID", "RDFVALUE", "TIMEST", "RDFLINKS", "ORGDOC", "UNIQUEID", "TYPE"
     ])  # noqa: E501

for d in rdfobjs:

    fieldobjs = []
    fieldobjs.append(d.id)
    fieldobjs.append(d.value)
    fieldobjs.append(d.timest)
    ll = nof.make_link_list(d.links)
    fieldobjs.append(ll)
    fieldobjs.append(d.orgdoc)
示例#9
0
## Make all the feature datasets and feature classes we will use

    #Create the feature dataset where the empty geological features will be stored
    arcpy.CreateFeatureDataset_management(workingGDBPath, digitisingFDName, coordinateSystem)

    #Create the feature class for contacts
    arcpy.CreateFeatureclass_management(digitisingFDName, contactsFCName, "POLYLINE")

    #Create the feature class for map boundary
    arcpy.CreateFeatureclass_management(digitisingFDName, boundaryFCName, "POLYGON")


##Preparing the Valid Units table

    #create the table of Valid Units
    arcpy.CreateTable_management(workingGDBPath, validUnitTableName)

    #add the Unit Code field to the Valid Units Table
    arcpy.AddField_management(validUnitTableName,unitCodeFieldName,"TEXT","")

    #add the Unit Name field to the Valid Units Table
    arcpy.AddField_management(validUnitTableName,unitNameFieldName,"TEXT","")


##Create accuracy field in the contacts feature class and add sub type containing valid accuracy types
    
    #add the accuracy field
    arcpy.AddField_management(os.path.join(digitisingFDName,contactsFCName),accuracyFieldName,"LONG","")

    #Create empty sub type to go in the Accuracy Field
    arcpy.SetSubtypeField_management(os.path.join(digitisingFDName,contactsFCName),accuracyFieldName)
示例#10
0
climatedata = r'{}\TempFeuchte'.format(data)
fk = ExtractByMask(Raster(r'{}\fk_von_L'.format(data)), basin)
rp = fk * rp_factor
wp = ExtractByMask(Raster(r'{}\wp'.format(data)), basin)
water = ExtractByMask(Raster(r'{}\Gewaesser'.format(data)), basin)
rpwp_dif = rp - wp
s_pre = s_init
p_data = r'{}\N_Zeitreihen'.format(data)
cellsize = s_init.meanCellHeight
lambda_parameter = (
    c /
    (ExtractByMask(Raster(r'{}\L_in_metern'.format(data)), basin) * 1000)**2)

# Erstellen der Ergebnistabelle
result_path = arcpy.CreateTable_management(
    r'{}\Ergebnistabellen.gdb'.format(workspace), outname)
arcpy.AddField_management(result_path, "Datum", "TEXT")
arcpy.AddField_management(result_path, "Q", "DOUBLE")

arcpy.AddMessage(
    time.strftime("%H:%M:%S: ") +
    "Berechnung der Rasterdatensaetze war erfolgreich.")

########################################################################################################################
#  Beginn des Hauptprogramms
#  Start der Modellierung
#  Iteration durch die Klimadaten des Untersuchungszeitraums
########################################################################################################################

with arcpy.da.SearchCursor(
        climatedata, ['Tagesid', 'Jahr', 'Monat', 'Tag', 'RelFeu', 'Temp'],
#
# Locate Features Along Routes: locate selected TMCs along selected MassDOT route
# Output is: tmc_event_table
# Note: An XY tolerance of ***40*** meters was found to be necessary some cases, e.g., I-95 @ new bridge over Merrimack River.
# tmc_event_table_properties = "route_id LINE from_meas to_meas"
# arcpy.LocateFeaturesAlongRoutes_lr(INRIX_TMCS, Selected_LRSN_Route, "route_id", XY_tolerance + " Meters", tmc_event_table,
#                                    tmc_event_table_properties, "FIRST", "DISTANCE", "ZERO", "FIELDS", "M_DIRECTON")
# Delete un-needed fields from tmc_event_table
# arcpy.DeleteField_management(tmc_event_table, "linrtmc;frc;lenmiles;strtlat;strtlong;endlat;endlong;roadname;country;state;zipcode")
#
# *** End of original code
#
# *** Beginning of replacement code:
#
# Make a copy of the "template" TMC event table into which the raw (unsorted) TMC events will be written
arcpy.CreateTable_management(tmc_event_table_gdb, tmc_event_table_name_raw,
                             tmc_template_event_table)

# Indices in the vector of fields (i.e., attributes) to be read in from the TMC FC
route_feat_route_id_ix = 0
route_feat_shape_ix = 1
#
# Get the geometry of the selected LRSN route
route_sc = arcpy.da.SearchCursor(Selected_LRSN_Route, ['route_id', 'shape@'])
route_feat = route_sc.next()
route_feat_last_m_value = route_feat[route_feat_shape_ix].lastPoint.M

# Names of fields (i.e., attributes) read in from the TMC FC
tmc_fc_fieldnames = [
    'tmc', 'tmctype', 'roadnum', 'firstnm', 'direction', 'shape@'
]
# Indices in the vector of fields (i.e., attributes) read in from the TMC FC
示例#12
0
spatial_reference = arcpy.Describe(titleFC).spatialReference

#TABLE VIEW
titles_table = os.path.join(sde_connection, config.owner_table)
arcpy.MakeTableView_management (titles_table, "table_titles")

#SELECT BY LOCATION
arcpy.SelectLayerByLocation_management("title_lyr", "WITHIN_A_DISTANCE", addr, distance, "NEW_SELECTION")
#arcpy.CopyFeatures_management('title_lyr', "in_memory/selected_title")

#TABLE JOIN TITLE PARCELS
owners_fl = arcpy.AddJoin_management ("title_lyr", "PID", "table_titles", "PID")
#arcpy.CopyFeatures_management("title_lyr", "tableTest")

#FORMAT THE TABLE TO A STANDARD FORMAT WE USE - EXPORT AS A RECORD SET
owners_table = arcpy.CreateTable_management("in_memory", "owners_table")

#ADD FIELDS TO THE TABLE
fields = [
   ("PID","TEXT"),
   ("owner_count","TEXT"),
   ("owner_name","TEXT"),
   ("owner_address","TEXT"),
   ("postal_code","TEXT"),
   ("province","TEXT")
]

for field in fields:
   arcpy.AddField_management(*(owners_table,) + field)

fieldList = arcpy.ListFields("title_lyr")
示例#13
0
            if not arcpy.Exists(subdir_fc2 + '\\' + folder3):
                arcpy.CreateFolder_management(subdir_fc2, folder3)
            if not arcpy.Exists(subdir_fc2 + '\\' + folder4):
                arcpy.CreateFolder_management(subdir_fc2, folder4)
            if not arcpy.Exists(subdir_fc2 + '\\' + folder5):
                arcpy.CreateFolder_management(subdir_fc2, folder5)
            if not arcpy.Exists(subdir_fc2 + '\\' + folder6):
                arcpy.CreateFolder_management(subdir_fc2, folder6)

if fc_one == fc_two or round_trip is False:
    directory = subdir
else:
    directory = subdir_fc1

# Creates dummy table to store results of each pairwise iteration of the analysis.
table = arcpy.CreateTable_management(directory, 'maintable.dbf')
arcpy.AddField_management(table, 'Source', 'TEXT')
arcpy.AddField_management(table, 'Dest', 'TEXT')
arcpy.AddField_management(table, 'PathCost', 'FLOAT')
arcpy.AddField_management(table, 'Distance', 'FLOAT')

fields = ['Source', 'Dest', 'PathCost', 'Distance']

# Creates log file
log = open(directory + '\log' + str(int(time()))[-8:] + '.txt', 'a+')
log.write('------------------------------------------------------------------------------------------' + '\n')
log.write('Event log for least cost path analysis between locations in: ' + '\n')
log.write(fc_one + '\n')
log.write(fc_two + '\n')
log.write('Event log created: ' + asctime() + '\n')
log.write('------------------------------------------------------------------------------------------' + '\n')
        self.data = data


if __name__ == '__main__':

    try:

        # Start time recording
        startTime = time()
        startTimeStr = strftime('%m-%d-%Y %H:%M:%S')
        print 'Starting program: ', startTimeStr, '\n'

        #create output table
        if arcpy.Exists(outTable):
            arcpy.Delete_management(outTable)
        arcpy.CreateTable_management(outTableLocation, outTableName)

        #add featureID field
        if not findField(outTable, uniqueFeatureIDfield):
            arcpy.AddField_management(outTable, uniqueFeatureIDfield, 'TEXT',
                                      '', '', 20)

        #add fields for each possible value in categorical raster
        if analysisType == 'categorical':
            arcpy.AddField_management(outTable, fieldPrefix + '0', 'DOUBLE',
                                      '', '', 20)
            for row in arcpy.da.SearchCursor(inRaster, uniqueRasterIDfield):
                if not findField(outTable, fieldPrefix + str(row[0])):
                    arcpy.AddField_management(outTable,
                                              fieldPrefix + str(row[0]),
                                              'DOUBLE', '', '', 20)
示例#15
0
def SLEM(Line, Distance, Output, TF):

    CopyLine = arcpy.CopyFeatures_management(Line, r"in_memory\CopyLine")

    fieldnames = [f.name for f in arcpy.ListFields(CopyLine)]

    #/identification of the polyline type : raw, UGOs, sequenced UGOs, or AGOs
    k = 0
    if "Rank_AGO" in fieldnames:
        k = 3
    elif "Order_ID" in fieldnames:
        k = 2
    elif "Rank_UGO" in fieldnames:
        k = 1

    ################################
    ########## Raw polyline ########
    ################################
    if k == 0:

        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "Rank_UGO", "LONG", "", "", "", "",
                                  "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "Rank_UGO",
                                        "!" + fieldnames[0] + "!",
                                        "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "",
                                  "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0",
                                        "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "",
                                  "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "To_Measure",
                                        "!shape.length!", "PYTHON_9.3", "")

        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_UGO",
                                           r"in_memory\LineRoutes",
                                           "TWO_FIELDS", "From_Measure",
                                           "To_Measure")

        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("in_memory",
                                                      "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "",
                                  "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Distance", "DOUBLE", "", "",
                                  "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "",
                                  "", "NULLABLE", "NON_REQUIRED", "")

        UPD_SL.UpToDateShapeLengthField(LineRoutes)

        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(line.Shape_Length)
            while (tempdistance > float(0)):
                row = rowsevents.newRow()
                row.Rank_UGO = line.Rank_UGO
                row.To_M = max(0, tempdistance - float(Distance))
                row.Distance = tempdistance
                rowsevents.insertRow(row)
                tempdistance = tempdistance - float(Distance)
        del rowslines
        del rowsevents

        #/creation of the route event layer
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(
            LineRoutes, "Rank_UGO", PointEventTEMP,
            "Rank_UGO LINE Distance To_M", r"in_memory\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP,
                                              r"in_memory\Split", "", "0", "0",
                                              "0")
        Sort = arcpy.Sort_management(
            Split, Output,
            [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        arcpy.DeleteField_management(Sort, "To_M")

        #/calculation of the "Distance" field
        UPD_SL.UpToDateShapeLengthField(Sort)

        rows1 = arcpy.UpdateCursor(Sort)
        rows2 = arcpy.UpdateCursor(Sort)
        line2 = rows2.next()
        line2.Distance = 0
        rows2.updateRow(line2)
        nrows = int(str(arcpy.GetCount_management(Sort)))
        n = 0
        for line1 in rows1:
            line2 = rows2.next()
            if n == nrows - 1:
                break
            if n == 0:
                line1.Distance = 0
            if line2.Rank_UGO == line1.Rank_UGO:
                line2.Distance = line1.Distance + line1.Shape_Length
                rows2.updateRow(line2)
            if line2.Rank_UGO != line1.Rank_UGO:
                line2.Distance = 0
                rows2.updateRow(line2)

            n += 1

        #/deleting of the temporary files
        if str(TF) == "true":
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)

    return Sort
    def makeTable():
        txtFile = arcpy.GetParameterAsText(0)
        newTB = arcpy.GetParameterAsText(1)

        arcpy.CreateTable_management(
            os.path.dirname(newTB),
            os.path.basename(newTB),
        )

        #Add attribute fields to table
        arcpy.AddField_management(newTB, "NodeID", "long")
        arcpy.AddField_management(newTB, "Value1", "float")
        arcpy.AddField_management(newTB, "Value2", "float")
        arcpy.AddField_management(newTB, "Value3", "float")
        arcpy.AddField_management(newTB, "Value4", "float")
        arcpy.AddField_management(newTB, "Value5", "float")
        arcpy.AddField_management(newTB, "Value6", "float")
        arcpy.AddField_management(newTB, "Value7", "float")
        arcpy.AddField_management(newTB, "Value8", "float")
        arcpy.AddField_management(newTB, "Value9", "float")
        arcpy.AddField_management(newTB, "Value10", "float")
        arcpy.AddField_management(newTB, "Value11", "float")
        arcpy.AddField_management(newTB, "Value12", "float")

        #Open the text file and read the number of nodes
        input = file(txtFile, "r")
        input.readline()
        line = input.readline()
        totalNodes = line.split()  #Number of nodes
        nunnodes = int(totalNodes[0])
        line = input.readline()
        data = line.split()
        nunparameter = int(data[0])  #Number of attributes

        print('This ADCIRC GRID has: ' + str(nunnodes) + ' nodes')
        print('This Fort.13 file has: ' + str(nunparameter) + ' parameters')
        print('Importing the parameters from the Fort.13 ...')

        #Reads line until line with parameter
        count = 0
        while count == 0:
            line = input.readline()
            data = line.split()
            if str(data[0]) == str(
                    "surface_directional_effective_roughness_length"):
                count = +1

        line = input.readline()
        line = input.readline()
        line = input.readline()
        data = line.split()
        defaultValue = float(data[0])

        cur = arcpy.InsertCursor(newTB)
        index = 1
        for row in range(0, nunnodes):
            row = cur.newRow()
            row.NodeID = long(index)

            row.Value1 = defaultValue
            row.Value2 = defaultValue
            row.Value3 = defaultValue
            row.Value4 = defaultValue
            row.Value5 = defaultValue
            row.Value6 = defaultValue
            row.Value7 = defaultValue
            row.Value8 = defaultValue
            row.Value9 = defaultValue
            row.Value10 = defaultValue
            row.Value11 = defaultValue
            row.Value12 = defaultValue

            cur.insertRow(row)
            index = index + 1
        del cur, row

        #Adds non-default values
        count2 = 0
        while count2 == 0:
            line = input.readline()
            data = line.split()
            if str(data[0]) == str(
                    'surface_directional_effective_roughness_length'):
                count2 = +1

        line = input.readline()
        totalValues = line.split()

        if int(line) > 0:
            cur2 = arcpy.UpdateCursor(newTB)
            for i in range(int(line)):
                line = input.readline()
                data = line.split()
                tmpnode = long(data[0])
                row = cur2.next()
                tmpnode2 = row.NodeID
                while tmpnode <> tmpnode2:
                    row = cur2.next()
                    tmpnode2 = row.NodeID
                row.Value1 = float(data[1])
                row.Value2 = float(data[2])
                row.Value3 = float(data[3])
                row.Value4 = float(data[4])
                row.Value5 = float(data[5])
                row.Value6 = float(data[6])
                row.Value7 = float(data[7])
                row.Value8 = float(data[8])
                row.Value9 = float(data[9])
                row.Value10 = float(data[10])
                row.Value11 = float(data[11])
                row.Value12 = float(data[12])

                cur2.updateRow(row)
            del row, cur2
            line = input.readline()

        arcpy.DeleteField_management(newTB, "Field1")
        input.close()
    def run(self):

        params = self.par
        projektname = self.projectname

        lib_einnahmen.create_gemeindebilanzen(self, projektname)

        fields = ['AGS', 'Gemeindetyp']
        tablepath_rahmendaten = self.folders.get_table(
            'Projektrahmendaten', "FGDB_Definition_Projekt.gdb")
        cursor = arcpy.da.SearchCursor(tablepath_rahmendaten, fields)
        for row in cursor:
            ags = row[0]
            gemeindetyp = row[1]

        tablepath_gemeinden = self.folders.get_base_table(
            "FGDB_Basisdaten_deutschland.gdb", "bkg_gemeinden")
        tablepath_einkommen = self.folders.get_db("FGDB_Einnahmen.gdb",
                                                  params.name.value)
        tablepath_hebesteuer = os.path.join(tablepath_einkommen,
                                            "GrSt_Hebesatz_B")
        fields = ["Hebesatz_GrStB"]
        if arcpy.Exists(tablepath_hebesteuer):
            cursor = arcpy.da.UpdateCursor(tablepath_hebesteuer, fields)
            for row in cursor:
                row[0] = params.slider1.value
                cursor.updateRow(row)
        else:
            arcpy.CreateTable_management(
                self.folders.get_db("FGDB_Einnahmen.gdb", params.name.value),
                "GrSt_Hebesatz_B")
            arcpy.AddField_management(tablepath_hebesteuer, "Hebesatz_GrStB",
                                      "LONG")
            cursor = arcpy.da.InsertCursor(tablepath_hebesteuer, fields)
            cursor.insertRow([params.slider1.value])

        fields = [
            "EFH_Rohmiete", 'DHH_Rohmiete', 'RHW_Rohmiete', 'MFH_Rohmiete',
            'Bodenwert_Sachwertverfahren', 'qm_Grundstueck_pro_WE_EFH',
            'BGF_Buero', 'BGF_Halle'
        ]
        tablepath_basisdaten = self.folders.get_table('GrSt_Basisdaten',
                                                      "FGDB_Einnahmen.gdb")
        cursor = arcpy.da.UpdateCursor(tablepath_basisdaten, fields)
        for row in cursor:
            cursor.deleteRow()
        cursor = arcpy.da.InsertCursor(tablepath_basisdaten, fields)
        cursor.insertRow([
            params.slider2.value, params.slider3.value, params.slider4.value,
            params.slider5.value, params.slider6.value, params.slider7.value,
            params.slider8.value, params.slider9.value
        ])

        einheitswert_efh = 0
        einheitswert_dh = 0
        einheitswert_rh = 0
        einheitswert_mfh = 0

        wohnflaeche_efh = 0
        wohnflaeche_dh = 0
        wohnflaeche_rh = 0
        wohnflaeche_mfh = 0

        rohmiete_efh = 0
        rohmiete_dh = 0
        rohmiete_rh = 0
        rohmiete_mfh = 0

        garagen_efh = 0
        garagen_dh = 0
        garagen_rh = 0
        garagen_mfh = 0

        multiplikator_efh = 0
        multiplikator_dh = 0
        multiplikator_rh = 0
        multiplikator_mfh = 0

        table_wohnflaeche = self.folders.get_base_table(
            "FGDB_Einnahmen_Tool.gdb", "GrSt_Wohnflaeche_und_Steuermesszahlen")
        fields = [
            "IDGebaeudetyp", "Mittlere_Wohnflaeche",
            "Aufschlag_Garagen_Carport"
        ]
        cursor = arcpy.da.SearchCursor(table_wohnflaeche, fields)
        for row in cursor:
            if row[0] == 1:
                wohnflaeche_efh = row[1]
                garagen_efh = row[2]
            if row[0] == 2:
                wohnflaeche_dh = row[1]
                garagen_dh = row[2]
            if row[0] == 3:
                wohnflaeche_rh = row[1]
                garagen_rh = row[2]
            if row[0] == 4:
                wohnflaeche_mfh = row[1]
                garagen_mfh = row[2]

        if int(ags) <= 10999999:
            rohmiete_efh = params.slider2.value / 100.0
            rohmiete_dh = params.slider3.value / 100.0
            rohmiete_rh = params.slider4.value / 100.0
            rohmiete_mfh = params.slider5.value / 100.0
        else:
            rohmiete_efh, rohmiete_dh, rohmiete_rh, rohmiete_mfh = 0.46

        fields = ["AGS", "GemGroessKlass64"]
        where_clause = '"AGS"' + "='" + ags + "'"
        cursor = arcpy.da.SearchCursor(tablepath_gemeinden, fields,
                                       where_clause)
        for row in cursor:
            gemeinde_klasse = row[1]

        table_wohnflaeche = self.folders.get_base_table(
            "FGDB_Einnahmen_Tool.gdb", "GrSt_Vervielfaeltiger")
        where_clause = '"Gemeindegroessenklasse64"' + "='" + gemeinde_klasse + "'"
        fields = [
            "Gemeindegroessenklasse64", "IDGebaeudetyp", "Vervielfaeltiger"
        ]
        cursor = arcpy.da.SearchCursor(table_wohnflaeche, fields, where_clause)
        for row in cursor:
            if row[1] == 1:
                multiplikator_efh = row[2]
            if row[1] == 2:
                multiplikator_dh = row[2]
            if row[1] == 3:
                multiplikator_rh = row[2]
            if row[1] == 4:
                multiplikator_mfh = row[2]

        einheitswert_efh = (12 * rohmiete_efh * wohnflaeche_efh +
                            garagen_efh) * multiplikator_efh
        einheitswert_dh = (12 * rohmiete_dh * wohnflaeche_dh +
                           garagen_dh) * multiplikator_dh
        einheitswert_rh = (12 * rohmiete_rh * wohnflaeche_rh +
                           garagen_rh) * multiplikator_rh
        einheitswert_mfh = (12 * rohmiete_mfh * wohnflaeche_mfh +
                            garagen_mfh) * multiplikator_mfh

        if int(ags) >= 11000000:
            table_wohnflaeche = self.folders.get_base_table(
                "FGDB_Einnahmen_Tool.gdb",
                "GrSt_Wohnflaeche_und_Steuermesszahlen")
            where_clause = '"IDGebaeudetyp"' + "='" + 1 + "'"
            fields = ["IDGebaeudetyp", "Umbauter_Raum_m3"]
            cursor = arcpy.da.SearchCursor(table_wohnflaeche, fields,
                                           where_clause)
            for row in cursor:
                umbauter_raum_m3 = row[1]

            einheitswert_efh = 24 / 1.95583 * umbauter_raum_m3 + 550 + params.slider7.value * (
                params.slider6.value / 100.0)

        if params.slider8.value != 0 or params.slider9.value != 0:
            einheitswert_gewerbe = (1685 * params.slider8.value +
                                    800 * params.slider9.value) * 0.1554
        else:
            einheitswert_gewerbe = 0

        we_efh = 0
        we_dh = 0
        we_rh = 0
        we_mfh = 0

        fields = ['IDGebaeudetyp', 'WE']
        tablepath_wohnen = self.folders.get_table(
            'Wohnen_WE_in_Gebaeudetypen', "FGDB_Definition_Projekt.gdb")
        cursor = arcpy.da.SearchCursor(tablepath_wohnen, fields)
        for row in cursor:
            if row[0] == 1:
                we_efh += row[1]
            if row[0] == 2:
                we_dh += row[1]
            if row[0] == 3:
                we_rh += row[1]
            if row[0] == 4:
                we_mfh += row[1]

        arcpy.AddMessage("Einheitswert EFH = {}".format(einheitswert_efh))

        if einheitswert_efh <= 38346:
            einheitswert_bis_38346_EUR_efh = einheitswert_efh
            einheitswert_ab_38346_EUR_efh = 0
        else:
            einheitswert_bis_38346_EUR_efh = 38346
            einheitswert_ab_38346_EUR_efh = einheitswert_efh - 38346

        if einheitswert_dh <= 38346:
            einheitswert_bis_38346_EUR_dh = einheitswert_dh
            einheitswert_ab_38346_EUR_dh = 0
        else:
            einheitswert_bis_38346_EUR_dh = 38346
            einheitswert_ab_38346_EUR_dh = einheitswert_dh - 38346

        if einheitswert_rh <= 38346:
            einheitswert_bis_38346_EUR_rh = einheitswert_rh
            einheitswert_ab_38346_EUR_rh = 0
        else:
            einheitswert_bis_38346_EUR_rh = 38346
            einheitswert_ab_38346_EUR_rh = einheitswert_rh - 38346

        if einheitswert_mfh <= 38346:
            einheitswert_bis_38346_EUR_mfh = einheitswert_mfh
            einheitswert_ab_38346_EUR_mfh = 0
        else:
            einheitswert_bis_38346_EUR_mfh = 38346
            einheitswert_ab_38346_EUR_mfh = einheitswert_mfh - 38346

        table_wohnflaeche = self.folders.get_base_table(
            "FGDB_Einnahmen_Tool.gdb", "GrSt_Wohnflaeche_und_Steuermesszahlen")
        fields = [
            "IDGebaeudetyp", "Steuermesszahl_bis_38346_EUR",
            "Steuermesszahl_ab_38346_EUR"
        ]
        cursor = arcpy.da.SearchCursor(table_wohnflaeche, fields)
        for row in cursor:
            if row[0] == 1:
                steuermesszahl_bis_38346_EUR_efh = row[1]
                steuermesszahl_ab_38346_EUR_efh = row[2]
            if row[0] == 2:
                steuermesszahl_bis_38346_EUR_dh = row[1]
                steuermesszahl_ab_38346_EUR_dh = row[2]
            if row[0] == 3:
                steuermesszahl_bis_38346_EUR_rh = row[1]
                steuermesszahl_ab_38346_EUR_rh = row[2]
            if row[0] == 4:
                steuermesszahl_bis_38346_EUR_mfh = row[1]
                steuermesszahl_ab_38346_EUR_mfh = row[2]

        messbetrag_efh = we_efh * (
            einheitswert_bis_38346_EUR_efh * steuermesszahl_bis_38346_EUR_efh +
            einheitswert_ab_38346_EUR_efh * steuermesszahl_ab_38346_EUR_efh)
        messbetrag_dh = we_dh * (
            einheitswert_bis_38346_EUR_dh * steuermesszahl_bis_38346_EUR_dh +
            einheitswert_ab_38346_EUR_dh * steuermesszahl_ab_38346_EUR_dh)
        messbetrag_rh = we_rh * (
            einheitswert_bis_38346_EUR_rh * steuermesszahl_bis_38346_EUR_rh +
            einheitswert_ab_38346_EUR_rh * steuermesszahl_ab_38346_EUR_rh)
        messbetrag_mfh = we_mfh * (
            einheitswert_bis_38346_EUR_mfh * steuermesszahl_bis_38346_EUR_mfh +
            einheitswert_ab_38346_EUR_mfh * steuermesszahl_ab_38346_EUR_mfh)

        messbetrag_gewerbe = einheitswert_gewerbe * 0.0035

        def roundup(number, multiple):
            num = number + (multiple - 1)
            return num - (num % multiple)

        GRUNDSTEUERAUFKOMMEN = (
            messbetrag_efh + messbetrag_dh + messbetrag_rh + messbetrag_mfh +
            messbetrag_gewerbe) * params.slider1.value / 100.0

        table_bilanzen = self.folders.get_table("Gemeindebilanzen",
                                                "FGDB_Einnahmen.gdb")
        fields = ["AGS", "GrSt"]
        cursor = arcpy.da.UpdateCursor(table_bilanzen, fields)
        for row in cursor:
            if row[0] == ags:
                row[1] = GRUNDSTEUERAUFKOMMEN
            else:
                row[1] = 0
            cursor.updateRow(row)

        c.set_chronicle(
            "Grundsteuer",
            self.folders.get_table(tablename='Chronik_Nutzung',
                                   workspace="FGDB_Einnahmen.gdb",
                                   project=projektname))
示例#18
0
                                    field='Area_mi2',
                                    expression=basin_area_mi2)
    arcpy.SelectLayerByAttribute_management(in_layer_or_view=basins,
                                            selection_type='CLEAR_SELECTION')

    # calculate length-weighted slope
    # 1. split longest flow path at regular points along the line
    # 2. calculate slope of each split part
    # 3. use the slopes and lengths to get the length-weighted slope
    lfp_points = [
        f for f in
        arcpy.da.SearchCursor('{}\Layers\longestflowpath_{}.shp'.format(
            HOME_DIRECTORY, i), ['SHAPE@X', 'SHAPE@Y'],
                              explode_to_points=True)
    ]
    arcpy.CreateTable_management(PROCESS_GDB, 'lfp_points_table_{}'.format(i))
    arcpy.AddField_management('{}lfp_points_table_{}'.format(PROCESS_GDB, i),
                              'X', 'DOUBLE')
    arcpy.AddField_management('{}lfp_points_table_{}'.format(PROCESS_GDB, i),
                              'Y', 'DOUBLE')
    with arcpy.da.InsertCursor('{}lfp_points_table_{}'.format(PROCESS_GDB, i),
                               ['X', 'Y']) as cur:
        for row in lfp_points:
            cur.insertRow(row)
    arcpy.MakeXYEventLayer_management(
        '{}lfp_points_table_{}'.format(PROCESS_GDB, i), 'X', 'Y',
        'lfp_points_layer')
    arcpy.FeatureToPoint_management(
        'lfp_points_layer', '{}/lfp_points_points_{}'.format(PROCESS_GDB, i))
    arcpy.SplitLineAtPoint_management(
        '{}/Layers/longestflowpath_{}.shp'.format(HOME_DIRECTORY, i),
def main(thisDB,coordSystem,nCrossSections):
    # create feature dataset GeologicMap
    addMsgAndPrint('  Creating feature dataset GeologicMap...')
    try:
        arcpy.CreateFeatureDataset_management(thisDB,'GeologicMap',coordSystem)
    except:
        addMsgAndPrint(arcpy.GetMessages(2))

    # create feature classes in GeologicMap
    # poly feature classes
    featureClasses = ['MapUnitPolys']
    for fc in ['DataSourcePolys','MapUnitOverlayPolys','OverlayPolys']:
        if fc in OptionalElements:
            featureClasses.append(fc)
    for featureClass in featureClasses:
        fieldDefs = tableDict[featureClass]
        createFeatureClass(thisDB,'GeologicMap',featureClass,'POLYGON',fieldDefs)
            
    # line feature classes
    featureClasses = ['ContactsAndFaults']
    for fc in ['GeologicLines','CartographicLines','IsoValueLines']:
        if fc in OptionalElements:
            featureClasses.append(fc)
    if debug:
        addMsgAndPrint('Feature classes = '+str(featureClasses))
    for featureClass in featureClasses:
        fieldDefs = tableDict[featureClass]
        if featureClass in ['ContactsAndFaults','GeologicLines'] and addLTYPE:
            fieldDefs.append(['LTYPE','String','NullsOK',50])
        createFeatureClass(thisDB,'GeologicMap',featureClass,'POLYLINE',fieldDefs)

    # point feature classes
    featureClasses = []
    for fc in ['OrientationPoints','GeochronPoints','FossilPoints','Stations',
                  'GenericSamples','GenericPoints']:
        if fc in OptionalElements:
            featureClasses.append(fc)
    for featureClass in featureClasses:
        if featureClass == 'MapUnitPoints': 
            fieldDefs = tableDict['MapUnitPolys']
            if addLTYPE:
                fieldDefs.append(['PTYPE','String','NullsOK',50])
        else:	
            fieldDefs = tableDict[featureClass]
            if addLTYPE and featureClass in ['OrientationPoints']:
                fieldDefs.append(['PTTYPE','String','NullsOK',50])
        createFeatureClass(thisDB,'GeologicMap',featureClass,'POINT',fieldDefs)

    # create feature dataset CorrelationOfMapUnits
    if 'CorrelationOfMapUnits' in OptionalElements:
        addMsgAndPrint('  Creating feature dataset CorrelationOfMapUnits...')
        arcpy.CreateFeatureDataset_management(thisDB,'CorrelationOfMapUnits',coordSystem)
        fieldDefs = tableDict['CMUMapUnitPolys']
        createFeatureClass(thisDB,'CorrelationOfMapUnits','CMUMapUnitPolys','POLYGON',fieldDefs)
        fieldDefs = tableDict['CMULines']
        createFeatureClass(thisDB,'CorrelationOfMapUnits','CMULines','POLYLINE',fieldDefs)
        fieldDefs = tableDict['CMUPoints']
        createFeatureClass(thisDB,'CorrelationOfMapUnits','CMUPoints','POINT',fieldDefs)
    
    # create CrossSections
    if nCrossSections > 26:
        nCrossSections = 26
    if nCrossSections < 0:
        nCrossSections = 0
    # note space in position 0
    alphabet = ' ABCDEFGHIJKLMNOPQRSTUVWXYZ'
    
    for n in range(1,nCrossSections+1):
        xsLetter = alphabet[n]
        xsName = 'CrossSection'+xsLetter
        xsN = 'CS'+xsLetter
        #create feature dataset CrossSectionA
        addMsgAndPrint('  Creating feature data set CrossSection'+xsLetter+'...')
        arcpy.CreateFeatureDataset_management(thisDB,xsName)
        fieldDefs = tableDict['MapUnitPolys']
        fieldDefs[0][0] = xsN+'MapUnitPolys_ID'
        createFeatureClass(thisDB,xsName,xsN+'MapUnitPolys','POLYGON',fieldDefs)
        fieldDefs = tableDict['ContactsAndFaults']
        if addLTYPE:
            fieldDefs.append(['LTYPE','String','NullsOK',50])
        fieldDefs[0][0] = xsN+'ContactsAndFaults_ID'
        createFeatureClass(thisDB,xsName,xsN+'ContactsAndFaults','POLYLINE',fieldDefs)
        fieldDefs = tableDict['OrientationPoints']
        if addLTYPE:
            fieldDefs.append(['PTTYPE','String','NullsOK',50]) 
        fieldDefs[0][0] = xsN+'OrientationPoints_ID'
        createFeatureClass(thisDB,xsName,xsN+'OrientationPoints','POINT',fieldDefs)

    # create tables
    tables = ['DescriptionOfMapUnits','DataSources','Glossary']
    for tb in ['RepurposedSymbols','StandardLithology','GeologicEvents','MiscellaneousMapInformation']:
        if tb in OptionalElements:
            tables.append(tb)
    for table in tables:
        addMsgAndPrint('  Creating table '+table+'...')
        try:
            arcpy.CreateTable_management(thisDB,table)
            fieldDefs = tableDict[table]
            for fDef in fieldDefs:
                try:
                    if fDef[1] == 'String':
                        arcpy.AddField_management(thisDB+'/'+table,fDef[0],transDict[fDef[1]],'#','#',fDef[3],'#',transDict[fDef[2]])
                    else:
                        arcpy.AddField_management(thisDB+'/'+table,fDef[0],transDict[fDef[1]],'#','#','#','#',transDict[fDef[2]])
                except:
                    addMsgAndPrint('Failed to add field '+fDef[0]+' to table '+table)
                    addMsgAndPrint(arcpy.GetMessages(2))		    
        except:
            addMsgAndPrint(arcpy.GetMessages())

    ### GeoMaterials
    addMsgAndPrint('  Setting up GeoMaterials table and domains...')
    #  Copy GeoMaterials table
    arcpy.Copy_management(os.path.dirname(sys.argv[0])+'/../Resources/GeMS_lib.gdb/GeoMaterialDict', thisDB+'/GeoMaterialDict')
    #   make GeoMaterials domain
    arcpy.TableToDomain_management(thisDB+'/GeoMaterialDict','GeoMaterial','IndentedName',thisDB,'GeoMaterials')
    #   attach it to DMU field GeoMaterial
    arcpy.AssignDomainToField_management(thisDB+'/DescriptionOfMapUnits','GeoMaterial','GeoMaterials')       
    #  Make GeoMaterialConfs domain, attach it to DMU field GeoMaterialConf
    arcpy.CreateDomain_management(thisDB,'GeoMaterialConfidenceValues','','TEXT','CODED')
    for val in GeoMaterialConfidenceValues:
        arcpy.AddCodedValueToDomain_management(thisDB,'GeoMaterialConfidenceValues',val,val)
    arcpy.AssignDomainToField_management(thisDB+'/DescriptionOfMapUnits','GeoMaterialConfidence','GeoMaterialConfidenceValues')
    
    #Confidence domains, Glossary entries, and DataSources entry
    if addConfs:
        addMsgAndPrint('  Adding standard ExistenceConfidence and IdentityConfidence domains')
        #  create domain, add domain values, and link domain to appropriate fields
        addMsgAndPrint('    Creating domain, linking domain to appropriate fields')
        arcpy.CreateDomain_management(thisDB,'ExIDConfidenceValues','','TEXT','CODED')
        for item in DefaultExIDConfidenceValues:  # items are [term, definition, source]
            code = item[0]
            arcpy.AddCodedValueToDomain_management(thisDB,'ExIDConfidenceValues',code,code)
        arcpy.env.workspace = thisDB
        dataSets = arcpy.ListDatasets()
        for ds in dataSets:
            arcpy.env.workspace = thisDB+'/'+ds
            fcs = arcpy.ListFeatureClasses()
            for fc in fcs:
                fieldNames = fieldNameList(fc)
                for fn in fieldNames:
                    if fn in ('ExistenceConfidence', 'IdentityConfidence','ScientificConfidence'):
                        #addMsgAndPrint('    '+ds+'/'+fc+':'+fn)
                        arcpy.AssignDomainToField_management(thisDB+'/'+ds+'/'+fc,fn,'ExIDConfidenceValues')
        # add definitions of domain values to Glossary
        addMsgAndPrint('    Adding domain values to Glossary')
        ## create insert cursor on Glossary
        cursor = arcpy.da.InsertCursor(thisDB+'/Glossary',['Term','Definition','DefinitionSourceID'])
        for item in DefaultExIDConfidenceValues:
            cursor.insertRow((item[0],item[1],item[2]))
        del cursor
        # add definitionsource to DataSources
        addMsgAndPrint('    Adding definition source to DataSources')        
        ## create insert cursor on DataSources
        cursor = arcpy.da.InsertCursor(thisDB+'/DataSources',['DataSources_ID','Source','URL'])
        cursor.insertRow(('FGDC-STD-013-2006','Federal Geographic Data Committee [prepared for the Federal Geographic Data Committee by the U.S. Geological Survey], 2006, FGDC Digital Cartographic Standard for Geologic Map Symbolization: Reston, Va., Federal Geographic Data Committee Document Number FGDC-STD-013-2006, 290 p., 2 plates.','https://ngmdb.usgs.gov/fgdc_gds/geolsymstd.php'))
        del cursor 

    # if cartoReps, add cartographic representations to all feature classes
    # trackEdits, add editor tracking to all feature classes and tables
    if trackEdits:
        arcpy.env.workspace = thisDB
        tables = arcpy.ListTables()
        datasets = arcpy.ListDatasets()
        for dataset in datasets:
            addMsgAndPrint('  Dataset '+dataset)
            arcpy.env.workspace = thisDB+'/'+dataset
            fcs = arcpy.ListFeatureClasses()
            for fc in fcs:
                # hasReps,repLyr = cartoRepsExistAndLayer(fc)
                # if cartoReps and hasReps:
                    # addMsgAndPrint('    Adding cartographic representations to '+fc)
                    # try:
                        # arcpy.AddRepresentation_cartography(fc,fc+'_rep1','RuleID1','Override1',default,repLyr,'NO_ASSIGN')
                        # """
                            # Note the 1 suffix on the representation name (fc+'_rep1') and the RuleID1 and Override1 fields.
                        # If at some later time we wish to add additional representations to a feature class, each will
                        # require it's own RuleID and Override fields which may be identified, and tied to the appropriate
                        # representation, by suffixes 2, 3, ...
                            # Naming representations fc+'_rep'+str(n) should be sufficient to identify each representation in a 
                        # geodatabase uniquely, and allow for multiple representations within a single feature class.
                            # It appears that ArcGIS provides no means of scripting an inventory of representations within
                        # feature class or geodatabase. So, the convenience of establishing a coded-value domain that ties
                        # representation rule IDs (consecutive integers) to some sort of useful text identifier becomes a
                        # necessity for flagging the presence of a representation: One CAN script the inventory of domains
                        # in a geodatabase. Run arcpy.da.ListDomains. Check the result for names of the form
                        # <featureClassName>_rep??_Rule and voila, you've got a list of representations (and their associated
                        # feature classes) in the geodatabase.
                            # Moral: If you add a representation, be sure to add an associated coded-value domain and name
                        # it appropriately!
                        # """
                    # except:
                        # addMsgAndPrint(arcpy.GetMessages(2))
                if trackEdits:
                    addTracking(fc)
        if trackEdits:
            addMsgAndPrint('  Tables ')
            arcpy.env.workspace = thisDB
            for aTable in tables:
                if aTable != 'GeoMaterialDict':
                    addTracking(aTable)

template = r"D:\智能化管线项目\新气\新气数据处理\新气建设期转到新气数据库\new20190709gdb_hww\new20190709.gdb\Pipe_Risk"
arcpy.env.workspace = r'Database Connections\Connection to 127.0.0.1.sde'
# arcpy.env.workspace = r'D:\智能化管线项目\新气\新气数据处理\新气数据入库_0916\新库0910zlm.gdb'#r'Database Connections\Connection to 10.246.146.120.sde'#r"D:\智能化管线项目\销售华北1023数据表字段add\总部空库20180105.gdb"
arcpy.env.workspace = r'Database Connections\Connection to 10.246.146.120.sde'  #r"D:\智能化管线项目\销售华北1023数据表字段add\总部空库20180105.gdb"
xlspath = r"D:\智能化管线项目\新气\新气数据处理\标准数据库-20200115.xlsx".decode('utf-8')
myworkbook = xlrd.open_workbook(xlspath)
for i in range(myworkbook.nsheets):

    table = myworkbook.sheet_by_index(i)  # 定位到sheet页
    ncols = table.ncols
    nrows = table.nrows
    #全部为属性表
    print myworkbook.sheet_names()[i]
    arcpy.CreateTable_management(arcpy.env.workspace, table.cell(2, 0).value)
    arcpy.AlterAliasName(table.cell(2, 0).value, table.cell(1, 0).value)
    addfld()
    #有空间表的情况
    # if i == 0 or i == myworkbook.nsheets-1:
    #     print myworkbook.sheet_names()[i]
    #     # arcpy.CreateFeatureclass_management()
    #     arcpy.CreateFeatureclass_management("SDE.Pipe_Integrity",table.cell(2,0).value,"POLYLINE","",'ENABLED', 'DISABLED', template)
    #     arcpy.AlterAliasName(table.cell(2,0).value,table.cell(1,0).value)
    #     addfld()
    # # elif i == 1:
    # #     arcpy.CreateFeatureclass_management("Pipe_Risk", table.cell(2, 0).value, "POLYLINE", "", 'ENABLED', 'ENABLED',
    # #                                         template)
    # #     arcpy.AlterAliasName(table.cell(2, 0).value, table.cell(1, 0).value)
    # #     addfld()
    # else:
示例#21
0
def create_gdb_table(gdb, name):
    gdb_table = arcpy.CreateTable_management(gdb, name)
    print("Table {} created".format(name))
    return gdb_table
示例#22
0
def WriteToFile(Walk,WalkPath):
    '''
    Write the random walking result to file
    '''
    try:
        if Walk:
            arcpy.AddMessage("Start to write result into files.") 

            env.workspace=(OutputFolder)
            arcpy.env.overwriteOutput = True
            
            '''
            Insert point rvalue to point table
            '''
            arcpy.CreateTable_management(OutputFolder,str(PointTable)+".dbf")
            arcpy.AddField_management(str(PointTable)+".dbf","NodeFID" , "SHORT")
            arcpy.AddField_management(str(PointTable)+".dbf","Degree" , "SHORT")            
            arcpy.AddField_management(str(PointTable)+".dbf",RField , "Long")
            pointrows = arcpy.InsertCursor(str(OutputFolder)+"\\"+str(PointTable)+".dbf")
            for p in Net.NodeList:
                pointrow = pointrows.newRow()
                pointrow.setValue("NodeFID",p.ID)
                pointrow.setValue("Degree",p.degree)
                pointrow.setValue(RField,p.rvalue)
                pointrows.insertRow(pointrow)
            
            '''
            Insert link rvalue to link table
            '''    
            
            arcpy.CreateTable_management(OutputFolder,str(LinkTable)+".dbf")
            
            arcpy.AddField_management(str(LinkTable)+".dbf","LinkFID" , "SHORT")
            arcpy.AddField_management(str(LinkTable)+".dbf","Connect" , "SHORT")
            arcpy.AddField_management(str(LinkTable)+".dbf",RField , "Long")
            linkrows = arcpy.InsertCursor(str(OutputFolder)+"\\"+str(LinkTable)+".dbf")
            for l in Net.EdgeList:
                linkrow = linkrows.newRow()
                linkrow.setValue("LinkFID",l.ID)
                linkrow.setValue("Connect",l.connect)
                linkrow.setValue(RField,l.rvalue)
                linkrows.insertRow(linkrow)
                
            '''
            Export the walking path to a file
            '''
            
            sys.setrecursionlimit(1000000)
            ## enlarge the cursive limit
            # dump walking path to a file
            OutPutFiel = open(OutputFolder+"\\"+WalkPathName+".wlk","wb")
            ## Export the point path to a file

            pickle.dump(WalkPath,OutPutFiel,2)
            ## dump the class into binary
            OutPutFiel.close()

            arcpy.AddMessage("Done!")
        else:
            arcpy.AddWarning("The walking calculation is failed." )   
    except:

        # Get the traceback object
        #
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]
    
        # Concatenate information together concerning the error into a message string
        #
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n"
    
        # Return python error messages for use in script tool or Python Window
        #
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)
def SLEM(Line, Distance, Output, TempFolder, TF):
    
    CopyLine = arcpy.CopyFeatures_management(Line, "%ScratchWorkspace%\CopyLine")
    
    fieldnames = [f.name for f in arcpy.ListFields(CopyLine)]

    #/identification of the polyline type : raw, UGOs, sequenced UGOs, or AGOs
    k = 0
    if "Rank_AGO" in fieldnames :
        k = 3
    elif "Order_ID" in fieldnames :
        k = 2
    elif "Rank_UGO" in fieldnames :
        k = 1
            
    arcpy.AddMessage(k)
    
            

    ################################
    ########## Raw polyline ########
    ################################
    #
    if k == 0 :
        
        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "Rank_UGO", "!"+fieldnames[0]+"!", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "To_Measure", "!shape.length!", "PYTHON_9.3", "")
        
        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_UGO", "%ScratchWorkspace%\\LineRoutes", "TWO_FIELDS", "From_Measure", "To_Measure")
        
        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("%ScratchWorkspace%", "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Distance", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        
        UPD_SL.UpToDateShapeLengthField(LineRoutes)

        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(0)
            while (tempdistance < float(line.Shape_Length)):
                row = rowsevents.newRow()
                row.Rank_UGO = line.Rank_UGO
                row.To_M = tempdistance + float(Distance)
                row.Distance = tempdistance
                rowsevents.insertRow(row)
                tempdistance = tempdistance + float(Distance)
        del rowslines
        del rowsevents

        #/creation of the route event layer
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(LineRoutes, "Rank_UGO", PointEventTEMP, "Rank_UGO LINE Distance To_M", "%ScratchWorkspace%\\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP, "%ScratchWorkspace%\\Split", "", "0", "0", "0")
        Sort = arcpy.Sort_management(Split, Output, [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        arcpy.DeleteField_management(Sort, "To_M")
        
        #/calculation of the "Distance" field
        UPD_SL.UpToDateShapeLengthField(Sort)
        
        rows1 = arcpy.UpdateCursor(Sort)
        rows2 = arcpy.UpdateCursor(Sort)
        line2 = rows2.next()
        line2.Distance = 0
        rows2.updateRow(line2)
        nrows = int(str(arcpy.GetCount_management(Sort)))
        n = 0
        for line1 in rows1 :
            line2 = rows2.next()          
            if n == nrows-1 :
                break
            if n == 0 :
                line1.Distance = 0
            if line2.Rank_UGO == line1.Rank_UGO :
                line2.Distance = line1.Distance + line1.Shape_Length
                rows2.updateRow(line2)
            if line2.Rank_UGO != line1.Rank_UGO :
                line2.Distance = 0
                rows2.updateRow(line2)
            
            n+=1
        
        #/deleting of the temporary files
        if str(TF) == "true" :
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)
    
    
         
    
    
    
    
    
    ##################
    ###### UGO #######
    ##################
    if k == 1 :    
        
        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "To_Measure", "!shape.length!", "PYTHON_9.3", "")
        
        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_UGO", "%ScratchWorkspace%\\LineRoutes", "TWO_FIELDS", "From_Measure", "To_Measure")
        
        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("%ScratchWorkspace%", "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Distance", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        
        UPD_SL.UpToDateShapeLengthField(LineRoutes)

        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(0)
            while (tempdistance < float(line.Shape_Length)):
                row = rowsevents.newRow()
                row.Rank_UGO = line.Rank_UGO
                row.To_M = tempdistance + float(Distance)
                row.Distance = tempdistance
                rowsevents.insertRow(row)
                tempdistance = tempdistance + float(Distance)
        del rowslines
        del rowsevents
        
        #/creation of the route event layer
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(LineRoutes, "Rank_UGO", PointEventTEMP, "Rank_UGO LINE Distance To_M", "%ScratchWorkspace%\\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP, "%ScratchWorkspace%\\Split", "", "0", "0", "0")
        Sort = arcpy.Sort_management(Split, Output, [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        arcpy.DeleteField_management(Sort, "To_M")
        
        #/calculation of the "Distance" field
        UPD_SL.UpToDateShapeLengthField(Sort)
        
        rows1 = arcpy.UpdateCursor(Sort)
        rows2 = arcpy.UpdateCursor(Sort)
        line2 = rows2.next()
        line2.Distance = 0
        rows2.updateRow(line2)
        nrows = int(str(arcpy.GetCount_management(Sort)))
        n = 0
        for line1 in rows1 :
            line2 = rows2.next()          
            if n == nrows-1 :
                break
            if n == 0 :
                line1.Distance = 0
            if line2.Rank_UGO == line1.Rank_UGO :
                line2.Distance = line1.Distance + line1.Shape_Length
                rows2.updateRow(line2)
            if line2.Rank_UGO != line1.Rank_UGO :
                line2.Distance = 0
                rows2.updateRow(line2)
            
            n+=1
        
        #/deleting of the temporary files
        if str(TF) == "true" :
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)
    
    
    
    
    
    
    
    
    ################################
    ######### Sequenced UGO ########
    ################################
    if k == 2 :    
        
        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "To_Measure", "!Shape_Length!", "PYTHON_9.3", "")
          
        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_UGO", "%ScratchWorkspace%\\LineRoutes", "TWO_FIELDS", "From_Measure", "To_Measure")
        arcpy.AddField_management(LineRoutes, "Order_ID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        Sort = arcpy.Sort_management(Line, "%ScratchWorkspace%\\Sort", [["Rank_UGO", "ASCENDING"]])

        rows1 = arcpy.UpdateCursor(LineRoutes)
        rows2 = arcpy.SearchCursor(Sort)
        
        for line1 in rows1 :
            line2 = rows2.next()
            line1.Order_ID = line2.Order_ID
            rows1.updateRow(line1)
            
        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("%ScratchWorkspace%", "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Order_ID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Distance", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
     
        UPD_SL.UpToDateShapeLengthField(LineRoutes)

        
        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(0)
            while (tempdistance < float(line.Shape_Length)):
                row = rowsevents.newRow()
                row.To_M = tempdistance + float(Distance)
                row.Order_ID = line.Order_ID
                row.Rank_UGO = line.Rank_UGO
                row.Distance = tempdistance
                rowsevents.insertRow(row)
                tempdistance = tempdistance + float(Distance)
        del rowslines
        del rowsevents
        
        
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(LineRoutes, "Rank_UGO", PointEventTEMP, "Rank_UGO LINE Distance To_M", "%ScratchWorkspace%\\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP, "%ScratchWorkspace%\\Split", "", "0", "0", "0")
        Sort = arcpy.Sort_management(Split, Output, [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        arcpy.DeleteField_management(Sort, "To_M")
        
        #/calculation of the "Distance" field
        UPD_SL.UpToDateShapeLengthField(Sort)
        
        rows1 = arcpy.UpdateCursor(Sort)
        rows2 = arcpy.UpdateCursor(Sort)
        line2 = rows2.next()
        line2.Distance = 0
        rows2.updateRow(line2)
        nrows = int(str(arcpy.GetCount_management(Split)))
        n = 0
        for line1 in rows1 :
            line2 = rows2.next()         
            if n >= nrows-1 :
                break
            if n == 0 :
                line1.Distance = 0
            if line2.Rank_UGO == line1.Rank_UGO :
                line2.Distance = line1.Distance + line1.Shape_Length
                rows2.updateRow(line2)
            if line2.Rank_UGO != line1.Rank_UGO :
                line2.Distance = 0
                rows2.updateRow(line2)
            
            n+=1
        #/deleting of the temporary files
        if str(TF) == "true" :
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)

    
    
    
    
    
    
    
    #############
    #### AGO ####
    #############
    if k == 3 :   
        
        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        try :
            arcpy.CalculateField_management(CopyLine, "To_Measure", "!shape.length!", "PYTHON_9.3", "")
        except :
            arcpy.CalculateField_management(CopyLine, "To_Measure", "!forme.length!", "PYTHON_9.3", "")
        
        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_AGO", "%ScratchWorkspace%\\LineRoutes", "TWO_FIELDS", "From_Measure", "To_Measure")
        arcpy.AddField_management(LineRoutes, "Order_ID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(LineRoutes, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(LineRoutes, "AGO_Val", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        
        UPD_SL.UpToDateShapeLengthField(LineRoutes)
        

        Ext.Export(CopyLine,TempFolder,"ExportTable")       

        fichier = open(TempFolder+"\\ExportTable.txt", 'r')        
        Order_ID = []
        Rank_UGO = []
        Dist = []
        Rank_AGO = []
        AGO_Val = []
        
        head = fichier.readline().split('\n')[0].split(';')
        iOrder_ID = head.index("Order_ID")
        iRank_UGO = head.index("Rank_UGO")
        iRank_AGO = head.index("Rank_AGO")
        iAGO_Val = head.index("AGO_Val")
        
        for l in fichier:
            Order_ID.append(int(l.split('\n')[0].split(';')[iOrder_ID]))
            Rank_UGO.append(int(l.split('\n')[0].split(';')[iRank_UGO]))
            Rank_AGO.append(float(l.split('\n')[0].split(';')[iRank_AGO]))
            AGO_Val.append(float(l.split('\n')[0].split(';')[iAGO_Val].replace(',','.')))

        p=0
        rows1 = arcpy.UpdateCursor(LineRoutes)
        for line1 in rows1 :
            line1.Order_ID = Order_ID[p]
            line1.Rank_UGO = Rank_UGO[p]
            line1.Rank_AGO = Rank_AGO[p]
            line1.AGO_Val = AGO_Val[p]
            rows1.updateRow(line1)
            p+=1
    
        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("%ScratchWorkspace%", "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "Distance_From_Start", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Order_ID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_AGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "AGO_Val", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")

        
        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(0)
            while (tempdistance < float(line.Shape_Length)):
                row = rowsevents.newRow()
                row.Distance_From_Start = tempdistance
                row.To_M = tempdistance + float(Distance)
                row.Order_ID = line.Order_ID
                row.Rank_UGO = line.Rank_UGO
                row.Rank_AGO = line.Rank_AGO
                row.AGO_Val = line.AGO_Val
                rowsevents.insertRow(row)
                tempdistance = tempdistance + float(Distance)
        del rowslines
        del rowsevents
        
        
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(LineRoutes, "Rank_AGO", PointEventTEMP, "Rank_AGO LINE Distance_From_Start To_M", "%ScratchWorkspace%\\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP, "%ScratchWorkspace%\\Split", "", "0", "0", "0")
        arcpy.AddField_management(Split, "Distance", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(Split, "Distance", "!Distance_From_Start!", "PYTHON_9.3", "")
        arcpy.DeleteField_management(Split, ["To_M","Distance_From_Start"])
        Sort = arcpy.Sort_management(Split, Output, [["Order_ID", "ASCENDING"], ["Rank_UGO", "ASCENDING"], ["Rank_AGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        UPD_SL.UpToDateShapeLengthField(Sort)
        
        #/deleting of the temporary files
        if str(TF) == "true" :
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)
    
    
    
    
    return Sort
示例#24
0
def createTables(surveyGDB, outWorkspace, prefix):
    '''Creates the doamins, tables and relationships of the survey in the target workspace'''
    arcpy.AddMessage('\t-Creating Tables')
    arcpy.env.workspace = surveyGDB
    allTables = getSurveyTables(surveyGDB)

    dscW = arcpy.Describe(arcpy.env.workspace)
    #migrate the domains
    arcpy.AddMessage('\t\t-Creating Domains')
    for domainName in dscW.domains:
        if domainName[0:3] == 'cvd':
            arcpy.AddMessage('\t\t\t-'.format(domainName))
            tempTable = 'in_memory\{0}'.format(domainName)
            domainTable = arcpy.DomainToTable_management(surveyGDB, domainName, tempTable,'CODE', 'DESC')
            newDomain = arcpy.TableToDomain_management(tempTable, 'CODE', 'DESC', outWorkspace, domainName, update_option='REPLACE')
            arcpy.Delete_management(tempTable)

    arcpy.AddMessage("\t\t-Creating Feature Classes & Tables")
    for table in allTables:
        dsc = arcpy.Describe(table)
        newTableName = "{0}_{1}".format(prefix, table)
        templateTable = template=os.path.join(surveyGDB, table)

        if dsc.datatype == u'FeatureClass':
            newTable = arcpy.CreateFeatureclass_management(outWorkspace, newTableName, "POINT", template=templateTable, spatial_reference=dsc.spatialReference)
        else:
            newTable = arcpy.CreateTable_management(outWorkspace, newTableName, template=templateTable)
        arcpy.AddMessage("\t\t\t-Created {0}".format(newTableName))

        #Attach domains to fields
        tableFields = arcpy.ListFields(table)
        for field in tableFields:
            if field.domain != '':
                arcpy.AssignDomainToField_management(newTable, field.name, field.domain)
        if dscW.workspaceType == "RemoteDatabase":
            arcpy.RegisterAsVersioned_management(newTable)

    arcpy.AddMessage('\t\t-Creating Relationships')
    #Reconnect Relationship classes, checking for attachments
    CARDINALITIES = {
    'OneToOne': "ONE_TO_ONE",
    'OneToMany': "ONE_TO_MANY",
    'ManyToMany': "MANY_TO_MANY"
    }

    for child in [(c.name, c.datatype) for c in dscW.children if c.datatype == u'RelationshipClass']:
        dscRC = arcpy.Describe(child[0])
        RCOriginTable = dscRC.originClassNames[0]
        RCDestTable = dscRC.destinationClassNames[0]
        newOriginTable = "{0}_{1}".format(prefix, RCOriginTable)
        newOriginPath = os.path.join(outWorkspace, newOriginTable)
        if dscRC.isAttachmentRelationship:
            #Simple case - attachments have a dedicated tool
            arcpy.EnableAttachments_management(newOriginPath)
        else:
            newDestTable = "{0}_{1}".format(prefix, RCDestTable)
            newDestPath = os.path.join(outWorkspace, newDestTable)
            newRC = os.path.join(outWorkspace, "{0}_{1}".format(prefix, child[0]))
            relationshipType = "COMPOSITE" if dscRC.isComposite else "SIMPLE"
            fwd_label = dscRC.forwardPathLabel if dscRC.forwardPathLabel != '' else 'Repeat'
            bck_label = dscRC.backwardPathLabel if dscRC.backwardPathLabel != '' else 'MainForm'
            msg_dir = dscRC.notification.upper()
            cardinality = CARDINALITIES[dscRC.cardinality]
            attributed = "ATTRIBUTED" if dscRC.isAttributed else "NONE"
            originclassKeys = dscRC.originClassKeys
            originclassKeys_dict = {}
            for key in originclassKeys:
                originclassKeys_dict[key[1]] = key[0]
            originPrimaryKey = originclassKeys_dict[u'OriginPrimary']
            originForiegnKey = originclassKeys_dict[u'OriginForeign']
            arcpy.CreateRelationshipClass_management(newOriginPath, newDestPath, newRC, relationshipType, fwd_label, bck_label, msg_dir, cardinality, attributed, originPrimaryKey, originForiegnKey)
示例#25
0
            # Remove invalid keys from master list
            i = 0
            while i < len(crashKey):
                if crashKey[i] not in validKey:
                    crashKey.pop(i)
                else:
                    i += 1

# Check if any crashes were selected before continuing
if not crashKey:
    arcpy.AddMessage('ERROR!  No crashes were selected!  The script will now terminate.')
    quit()

# Create table for selected crashes
arcpy.CreateTable_management('in_memory', 'crash')
arcpy.AddField_management('in_memory/crash', 'CRASH_KEY', 'DOUBLE',11,0)
crashKeyTable = 'in_memory/crash'
crashCursor = arcpy.InsertCursor(crashKeyTable)
for key in crashKey:  # Loop through list of CRASH_KEYs and place key in table
    record = crashCursor.newRow()
    record.CRASH_KEY = key

    crashCursor.insertRow(record)

# Create project folder
arcpy.AddMessage('\nCreating project folder...')
arcpy.CreateFolder_management(PROJECT_FOLDER, projectNumber + '-' + projectName.replace(' ', '_'))
arcpy.AddMessage('done!')

# Set project folder variable
示例#26
0
    header = updated_route_system[0][0]
    header_updated = updated_route_system[0][1]
    arcpy.AddMessage('-- ' + header + '...')
    arcpy.TruncateTable_management(header)
    arcpy.Delete_management(header)
    arcpy.CopyFeatures_management(header_updated, header)
    arcpy.Delete_management(header_updated)
    # Itinerary table:
    itin = updated_route_system[1][0]
    itin_updated = updated_route_system[1][1]
    arcpy.AddMessage('-- ' + itin + '...')
    arcpy.TruncateTable_management(itin)
    arcpy.Delete_management(itin)
    itin_path = MHN.break_path(itin)
    # CreateTable & Append because CopyFeatures crashes randomly with large tables.
    arcpy.CreateTable_management(itin_path['dir'], itin_path['name'],
                                 itin_updated)
    arcpy.Append_management(itin_updated, itin, 'TEST')
    arcpy.Delete_management(itin_updated)

# Replace old nodes.
arcpy.AddMessage('-- ' + MHN.node + '...')
arcpy.Delete_management(MHN.node)
arcpy.CopyFeatures_management(new_nodes_CZ, MHN.node)
arcpy.Delete_management(new_nodes_CZ)

# Replace old arcs.
arcpy.AddMessage('-- ' + MHN.arc + '...')
arcpy.Delete_management(MHN.arc)
arcpy.CopyFeatures_management(temp_arcs, MHN.arc)
arcpy.Delete_management(temp_arcs)
示例#27
0
文件: roctool.py 项目: irvcaza/ArcSDM
def execute(self, parameters, messages):

    positives_param, negatives_param, models_param, output_param = parameters

    arcpy.env.workspace = output_param.valueAsText

    positives_descr = arcpy.Describe(positives_param.valueAsText)
    positives_x, positives_y = FetchCoordinates(positives_descr.catalogPath).T
    positives_sref = positives_descr.spatialReference

    if negatives_param.valueAsText:
        negatives_descr = arcpy.Describe(negatives_param.valueAsText)
        negatives_sref = positives_descr.spatialReference
        if not SpatialReferencesAreEqual(positives_sref, negatives_sref,
                                         messages):
            raise ValueError(
                "Positives and negatives have different spatial references: '%s' and '%s'."
                % (positives_sref.name, negatives_sref.name))
    else:
        negatives_descr = None

    pylab.figure()
    handle, = pylab.plot([0, 1], [0, 1], "k--", lw=2)

    legend_items = ["Random guess"]
    plot_handles = [handle]

    # Iterates through each model and calculates and plots ROC curves for them.

    max_rows = 0
    model_names, roc_curves, auc_values, auc_confints = [], [], [], []

    tokens = models_param.valueAsText.split(";")

    for i in range(len(tokens)):

        raster_descr = arcpy.Describe(tokens[i])
        raster_sref = raster_descr.spatialReference
        if not SpatialReferencesAreEqual(positives_sref, raster_sref,
                                         messages):
            raise ValueError(
                "Positives and %s have different spatial references: '%s' and '%s'."
                % (raster_descr.name, positives_sref.name, raster_sref.name))

        color = COLOR_TABLE[i % NUM_COLORS]

        _roc_curve, _roc_confints, _auc_value, _auc_confints = CalculateROCCurveAndAUCValueForModel(
            messages, raster_descr, negatives_descr, positives_x, positives_y)

        if _roc_confints:
            pylab.fill_between(_roc_confints[0][:, 0],
                               _roc_confints[0][:, 1],
                               _roc_confints[1][:, 1],
                               color=color,
                               alpha=0.1)
        handle, = pylab.plot(_roc_curve[:, 0],
                             _roc_curve[:, 1],
                             lw=2,
                             color=color)

        plot_handles.append(handle)
        legend_items.append("%s (AUC = %.3f)" %
                            (raster_descr.name, _auc_value))

        messages.addMessage("%s: AUC = %.3f." %
                            (raster_descr.name, _auc_value))
        if _auc_confints:
            messages.addMessage(
                "%s: 95%% confidence interval = %.3f-%.3f." %
                (raster_descr.name, _auc_confints[0], _auc_confints[1]))

        model_names.append(raster_descr.name)
        roc_curves.append(_roc_curve)
        auc_values.append(_auc_value)
        auc_confints.append(_auc_confints)
        max_rows = numpy.max([max_rows, len(_roc_curve)])

    # Configures the plot and saves it.

    png_path = arcpy.CreateUniqueName("results.png")

    pylab.gca().set_xlim([0, 1])
    pylab.gca().set_ylim([0, 1])

    pylab.xlabel("False Positive Rate")
    pylab.ylabel("True Positive Rate")

    pylab.legend(plot_handles, legend_items, 4)

    pylab.savefig(png_path)

    messages.addMessage("Saved ROC curve plot to '%s'." % png_path)

    # Creates a database table for storing the essential results.

    table_path = arcpy.CreateUniqueName("results.dbf")
    dbf_path, dbf_name = os.path.split(table_path)

    arcpy.CreateTable_management(dbf_path, dbf_name)

    arcpy.AddField_management(table_path, "MODEL", "TEXT", field_length=10)
    arcpy.AddField_management(table_path, "AUC", "TEXT", field_length=10)

    if not negatives_descr:
        arcpy.AddField_management(table_path,
                                  "AUC_LO",
                                  "TEXT",
                                  field_length=10)
        arcpy.AddField_management(table_path,
                                  "AUC_HI",
                                  "TEXT",
                                  field_length=10)

    for i in range(len(model_names)):
        arcpy.AddField_management(table_path,
                                  "FPR_%d" % (i + 1),
                                  "DOUBLE",
                                  20,
                                  10,
                                  field_length=10)
        arcpy.AddField_management(table_path,
                                  "TPR_%d" % (i + 1),
                                  "DOUBLE",
                                  20,
                                  10,
                                  field_length=10)

    arcpy.DeleteField_management(table_path,
                                 "Field1")  # Deletes a nuisance field!?

    # Populates the database table.

    cursor = arcpy.InsertCursor(table_path)

    for i in range(max_rows):

        row = cursor.newRow()

        if i < len(model_names):
            row.setValue("MODEL", model_names[i])
            row.setValue("AUC", "%.3f" % auc_values[i])
            if not negatives_descr:
                row.setValue("AUC_LO", "%.3f" % auc_confints[i][0])
                row.setValue("AUC_HI", "%.3f" % auc_confints[i][1])

        for j in range(len(model_names)):
            if len(roc_curves[j]) > i:
                row.setValue("FPR_%d" % (j + 1), roc_curves[j][i, 0])
                row.setValue("TPR_%d" % (j + 1), roc_curves[j][i, 1])

        cursor.insertRow(row)

    del cursor, row

    messages.addMessage("Saved results database table to '%s'." % table_path)
示例#28
0
def update_route_system(header,
                        itin,
                        vertices_comprising,
                        split_dict_ABB,
                        new_ABB_values,
                        common_id_field,
                        order_field=None):
    ''' A method for updating any of the MHN's route systems: hwyproj,
        bus_base, bus_current, and bus_future. order_field argument allows for
        separate treatment of hwyproj and the bus routes. '''

    # Copy itinerary table to memory for non-destructive editing
    header_name = MHN.break_path(header)['name']
    itin_name = MHN.break_path(itin)['name']
    arcpy.AddMessage('-- ' + header_name + '...')
    itin_copy_path = MHN.mem
    itin_copy_name = itin_name + '_copy'
    itin_copy = os.path.join(itin_copy_path, itin_copy_name)
    arcpy.CreateTable_management(itin_copy_path, itin_copy_name, itin)

    itin_OID_field = MHN.determine_OID_fieldname(itin)
    itin_dict = MHN.make_attribute_dict(itin, itin_OID_field)

    # Check validity of ABB value on each line, adjusting the itinerary when
    # invalidity is due to a split
    max_itin_OID = max([OID for OID in itin_dict])
    split_itin_dict = {}
    all_itin_OIDs = list(itin_dict.keys())
    all_itin_OIDs.sort(
    )  # For processing in itinerary order, rather than in the dict's pseudo-random order
    bad_itin_OIDs = []
    if order_field:
        order_bump = 0
    for OID in all_itin_OIDs:
        common_id = itin_dict[OID][common_id_field]
        if order_field:
            order = itin_dict[OID][order_field]
            if order == 1:
                order_bump = 0
        ABB = itin_dict[OID]['ABB']
        if ABB != None:
            anode = int(ABB.split('-')[0])
            bnode = int(ABB.split('-')[1])
            baselink = int(ABB.split('-')[2])
        else:
            anode = 0
            bnode = 0
            baselink = 0
        if ABB not in new_ABB_values:
            if not order_field:  # For hwyproj, all deleted links should be removed from coding. Split links will be replaced.
                bad_itin_OIDs.append(OID)
            if (
                    anode, bnode, baselink
            ) in split_dict_ABB:  # If ABB is invalid because it was split, find new ABB values
                ordered_segments = split_dict_ABB[(anode, bnode, baselink)]
                if order_field:
                    bad_itin_OIDs.append(
                        OID
                    )  # For bus routes, only split links should be removed (and replaced).
                    itin_a = itin_dict[OID]['ITIN_A']
                    itin_b = itin_dict[OID]['ITIN_B']
                    if itin_b == anode or itin_a == bnode:
                        backwards = True
                        ordered_segments = ordered_segments[::
                                                            -1]  # Make a reversed copy of the ordered segments
                    else:
                        backwards = False
                for split_ABB in ordered_segments:
                    split_anode = int(split_ABB[0].split('-')[0])
                    split_bnode = int(split_ABB[0].split('-')[1])
                    split_baselink = int(split_ABB[0].split('-')[2])
                    split_length_ratio = split_ABB[3]
                    max_itin_OID += 1
                    split_itin_dict[max_itin_OID] = itin_dict[OID].copy()
                    split_itin_dict[max_itin_OID]['ABB'] = split_ABB[0]

                    if order_field:
                        if backwards:
                            split_itin_a = split_bnode
                            split_itin_b = split_anode
                            split_start_ratio = 1 - (split_ABB[2] +
                                                     split_length_ratio)
                        else:
                            split_itin_a = split_anode
                            split_itin_b = split_bnode
                            split_start_ratio = split_ABB[2]

                        # Adjust itinerary nodes and order:
                        split_itin_dict[max_itin_OID]['ITIN_A'] = split_itin_a
                        split_itin_dict[max_itin_OID]['ITIN_B'] = split_itin_b
                        if split_itin_a != itin_a:  # First split segment receives the same order as the original
                            order_bump += 1
                        split_itin_dict[max_itin_OID][
                            order_field] += order_bump

                        # Adjust variables that only apply to original link's itin_b:
                        if split_itin_dict[max_itin_OID][
                                'LAYOVER'] > 0 and split_itin_b != itin_b:
                            split_itin_dict[max_itin_OID]['LAYOVER'] = 0

                        # Apportion length-dependent variables:
                        split_itin_dict[max_itin_OID][
                            'LINE_SERV_TIME'] *= split_length_ratio
                        F_MEAS = split_itin_dict[max_itin_OID]['F_MEAS']
                        T_MEAS = split_itin_dict[max_itin_OID]['T_MEAS']
                        meas_diff = T_MEAS - F_MEAS
                        if header_name == 'bus_future':
                            future = True
                        else:
                            future = False
                        if not future:  # bus_future has no DEP_TIME or ARR_TIME
                            DEP_TIME = split_itin_dict[max_itin_OID][
                                'DEP_TIME']
                            ARR_TIME = split_itin_dict[max_itin_OID][
                                'ARR_TIME']
                            time_diff = ARR_TIME - DEP_TIME
                        if split_itin_a != itin_a:
                            split_itin_dict[max_itin_OID][
                                'F_MEAS'] += meas_diff * split_start_ratio
                            if not future:
                                split_itin_dict[max_itin_OID][
                                    'DEP_TIME'] += time_diff * split_start_ratio
                        else:
                            pass  # F_MEAS & DEP_TIME are already correct for itin_a
                        if split_itin_b != itin_b:
                            split_itin_dict[max_itin_OID][
                                'T_MEAS'] = F_MEAS + meas_diff * (
                                    split_start_ratio + split_length_ratio)
                            if not future:
                                split_itin_dict[max_itin_OID][
                                    'ARR_TIME'] = DEP_TIME + time_diff * (
                                        split_start_ratio + split_length_ratio)
                        else:
                            pass  # T_MEAS & ARR_TIME are already correct for itin_b
        else:
            if order_field:
                itin_dict[OID][order_field] += order_bump

    for OID in bad_itin_OIDs:
        del itin_dict[
            OID]  # Remove invalid ABB records after accounting for splits

    # Combine itinerary dicts, adjust ITIN_ORDER and report new gaps and write
    # updated records to table in memory.
    itin_dict.update(split_itin_dict)
    itin_fields = [
        field.name for field in arcpy.ListFields(itin_copy)
        if field.type != 'OID'
    ]
    with arcpy.da.InsertCursor(itin_copy, itin_fields) as coding_cursor:
        for OID in itin_dict:
            coding_cursor.insertRow(
                [itin_dict[OID][field] for field in itin_fields])

    # Sort records into a second table in memory.
    itin_updated = os.path.join(MHN.mem,
                                '{0}_itin_updated'.format(header_name))
    if order_field:
        arcpy.Sort_management(
            itin_copy, itin_updated,
            [[common_id_field, 'ASCENDING'], [order_field, 'ASCENDING']])
    else:
        arcpy.Sort_management(itin_copy, itin_updated,
                              [[common_id_field, 'ASCENDING']])
    arcpy.Delete_management(itin_copy)

    # Re-build line features.
    header_updated_path = MHN.mem
    header_updated_name = '{0}_updated'.format(header_name)
    header_updated = os.path.join(header_updated_path, header_updated_name)
    arcs_traversed_by = {}
    field_list = ['ABB', common_id_field]
    with arcpy.da.SearchCursor(itin_updated, field_list) as itin_cursor:
        for row in itin_cursor:
            abb = row[0]
            common_id = row[1]
            if common_id in arcs_traversed_by:
                arcs_traversed_by[common_id].append(abb)
            else:
                arcs_traversed_by[common_id] = [abb]

    common_id_list = [
        row[0] for row in arcpy.da.SearchCursor(header, [common_id_field])
    ]
    arcpy.CreateFeatureclass_management(header_updated_path,
                                        header_updated_name, 'POLYLINE',
                                        header)
    with arcpy.da.InsertCursor(header_updated,
                               ['SHAPE@', common_id_field]) as routes_cursor:
        for common_id in common_id_list:
            route_vertices = arcpy.Array([
                vertices_comprising[abb]
                for abb in arcs_traversed_by[common_id]
                if abb in vertices_comprising
            ])
            try:
                route = arcpy.Polyline(route_vertices)
                routes_cursor.insertRow([route, common_id])
            except:
                itin_delete_query = ''' "{0}" = '{1}' '''.format(
                    common_id_field, common_id)
                with arcpy.da.UpdateCursor(
                        itin_updated, ['OID@'],
                        itin_delete_query) as itin_delete_cursor:
                    for row in itin_delete_cursor:
                        itin_delete_cursor.deleteRow()
                arcpy.AddWarning(
                    '   - {0} = {1} cannot be rebuilt because the arcs comprising '
                    'it no longer exist (or have new ABB). It cannot be rebuilt '
                    'and is being deleted. Please re-import it if necessary.'.
                    format(common_id_field, common_id))

    # Append the header file attribute values from a search cursor of the original.
    attributes = MHN.make_attribute_dict(header, common_id_field)
    update_fields = [
        field.name for field in arcpy.ListFields(header)
        if field.type not in ['OID', 'Geometry']
        and field.name.upper() != 'SHAPE_LENGTH'
    ]
    with arcpy.da.UpdateCursor(header_updated,
                               update_fields) as attribute_cursor:
        for row in attribute_cursor:
            common_id = row[update_fields.index(common_id_field)]
            for field in [
                    field for field in update_fields
                    if field != common_id_field
            ]:
                row[update_fields.index(field)] = attributes[common_id][field]
            attribute_cursor.updateRow(row)

    return ((header, header_updated), (itin, itin_updated))
示例#29
0
def add_to_geodatabase(input_items, out_gdb, is_fds):
    """Adds items to a geodatabase."""
    added = 0
    skipped = 0
    errors = 0
    global processed_count
    global layer_name
    global  existing_fields
    global new_fields
    global  field_values

    for ds, out_name in input_items.iteritems():
        try:
            # -----------------------------------------------
            # If the item is a service layer, process and continue.
            # -----------------------------------------------
            if ds.startswith('http'):
                try:
                    service_layer = task_utils.ServiceLayer(ds)
                    arcpy.env.overwriteOutput = True
                    oid_groups = service_layer.object_ids
                    out_features = None
                    g = 0.
                    group_cnt = service_layer.object_ids_cnt
                    for group in oid_groups:
                        g += 1
                        group = [oid for oid in group if oid]
                        where = '{0} IN {1}'.format(service_layer.oid_field_name, tuple(group))
                        url = ds + "/query?where={}&outFields={}&returnGeometry=true&geometryType=esriGeometryPolygon&f=json".format(where, '*')
                        feature_set = arcpy.FeatureSet()
                        feature_set.load(url)
                        if not out_features:
                            out_features = arcpy.CopyFeatures_management(feature_set, task_utils.create_unique_name(out_name, out_gdb))
                        else:
                            features = arcpy.CopyFeatures_management(feature_set, task_utils.create_unique_name(out_name, out_gdb))
                            arcpy.Append_management(features, out_features, 'NO_TEST')
                            try:
                                arcpy.Delete_management(features)
                            except arcpy.ExecuteError:
                                pass
                        status_writer.send_percent(float(g) / group_cnt * 100, '', 'add_to_geodatabase')
                    processed_count += 1.
                    added += 1
                    status_writer.send_percent(processed_count / result_count, _('Added: {0}').format(ds), 'add_to_geodatabase')
                    continue
                except Exception as ex:
                    status_writer.send_state(status.STAT_WARNING, str(ex))
                    errors_reasons[ds] = ex.message
                    errors += 1
                    continue

            # ------------------------------
            # Is the input a mxd data frame.
            # ------------------------------
            map_frame_name = task_utils.get_data_frame_name(ds)
            if map_frame_name:
                ds = ds.split('|')[0].strip()

            # -------------------------------
            # Is the input a geometry feature
            # -------------------------------
            if isinstance(out_name, list):
                increment = task_utils.get_increment(result_count)
                for row in out_name:
                    try:
                        name = os.path.join(out_gdb, arcpy.ValidateTableName(ds, out_gdb))
                        # Create the geometry if it exists.
                        geom = None
                        try:
                            geo_json = row['[geo]']
                            geom = arcpy.AsShape(geo_json)
                            row.pop('[geo]')
                        except KeyError:
                            pass

                        if geom:
                            if not arcpy.Exists(name):
                                if arcpy.env.outputCoordinateSystem:
                                    arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                else:
                                    arcpy.env.outputCoordinateSystem = 4326
                                    arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                layer_name = arcpy.MakeFeatureLayer_management(name, 'flayer_{0}'.format(os.path.basename(name)))
                                existing_fields = [f.name for f in arcpy.ListFields(layer_name)]
                                new_fields = []
                                field_values = []
                                for field, value in row.iteritems():
                                    valid_field = arcpy.ValidateFieldName(field, out_gdb)
                                    new_fields.append(valid_field)
                                    field_values.append(value)
                                    arcpy.AddField_management(layer_name, valid_field, 'TEXT')
                            else:
                                if not geom.type.upper() == arcpy.Describe(name).shapeType.upper():
                                    name = arcpy.CreateUniqueName(os.path.basename(name), out_gdb)
                                    if arcpy.env.outputCoordinateSystem:
                                        arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                    else:
                                        arcpy.env.outputCoordinateSystem = 4326
                                        arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                    layer_name = arcpy.MakeFeatureLayer_management(name, 'flayer_{0}'.format(os.path.basename(name)))
                                    existing_fields = [f.name for f in arcpy.ListFields(layer_name)]
                                    new_fields = []
                                    field_values = []
                                    for field, value in row.iteritems():
                                        valid_field = arcpy.ValidateFieldName(field, out_gdb)
                                        new_fields.append(valid_field)
                                        field_values.append(value)
                                        if valid_field not in existing_fields:
                                            arcpy.AddField_management(layer_name, valid_field, 'TEXT')
                        else:
                            if not arcpy.Exists(name):
                                arcpy.CreateTable_management(out_gdb, os.path.basename(name))
                                view_name = arcpy.MakeTableView_management(name, 'tableview')
                                existing_fields = [f.name for f in arcpy.ListFields(view_name)]
                                new_fields = []
                                field_values = []
                                for field, value in row.iteritems():
                                    valid_field = arcpy.ValidateFieldName(field, out_gdb)
                                    new_fields.append(valid_field)
                                    field_values.append(value)
                                    if valid_field not in existing_fields:
                                        arcpy.AddField_management(view_name, valid_field, 'TEXT')


                        if geom:
                            with arcpy.da.InsertCursor(layer_name, ["SHAPE@"] + new_fields) as icur:
                                icur.insertRow([geom] + field_values)
                        else:
                            with arcpy.da.InsertCursor(view_name, new_fields) as icur:
                                icur.insertRow(field_values)

                        processed_count += 1
                        if (processed_count % increment) == 0:
                            status_writer.send_percent(float(processed_count) / result_count, _('Added: {0}').format(row['name']), 'add_to_geodatabase')
                        added += 1
                        continue
                    except Exception as ex:
                        processed_count += 1
                        errors += 1
                        errors_reasons[name] = ex.message
                        continue
                continue
            # -----------------------------
            # Check the data type and clip.
            # -----------------------------
            dsc = arcpy.Describe(ds)
            if dsc.dataType == 'FeatureClass':
                if out_name == '':
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'ShapeFile':
                if out_name == '':
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(dsc.name[:-4], out_gdb))
                else:
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'FeatureDataset':
                if not is_fds:
                    fds_name = os.path.basename(task_utils.create_unique_name(dsc.name, out_gdb))
                    fds = arcpy.CreateFeatureDataset_management(out_gdb, fds_name).getOutput(0)
                else:
                    fds = out_gdb
                arcpy.env.workspace = dsc.catalogPath
                for fc in arcpy.ListFeatureClasses():
                    name = os.path.basename(task_utils.create_unique_name(fc, out_gdb))
                    arcpy.CopyFeatures_management(fc, os.path.join(fds, name))
                arcpy.env.workspace = out_gdb

            elif dsc.dataType == 'RasterDataset':
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                if out_name == '':
                    arcpy.CopyRaster_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyRaster_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'RasterCatalog':
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                if out_name == '':
                    arcpy.CopyRasterCatalogItems_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyRasterCatalogItems_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'Layer':
                layer_from_file = arcpy.mapping.Layer(dsc.catalogPath)
                layers = arcpy.mapping.ListLayers(layer_from_file)
                for layer in layers:
                    if out_name == '':
                        name = task_utils.create_unique_name(layer.name, out_gdb)
                    else:
                        name = task_utils.create_unique_name(out_name, out_gdb)
                    if layer.isFeatureLayer:
                        arcpy.CopyFeatures_management(layer.dataSource, name)
                    elif layer.isRasterLayer:
                        if is_fds:
                            name = os.path.dirname(name)
                        arcpy.CopyRaster_management(layer.dataSource, name)

            elif dsc.dataType == 'CadDrawingDataset':
                arcpy.env.workspace = dsc.catalogPath
                cad_wks_name = os.path.splitext(dsc.name)[0]
                for cad_fc in arcpy.ListFeatureClasses():
                    arcpy.CopyFeatures_management(
                        cad_fc,
                        task_utils.create_unique_name('{0}_{1}'.format(cad_wks_name, cad_fc), out_gdb)
                    )
                arcpy.env.workspace = out_gdb

            elif dsc.dataType == 'File':
                if dsc.catalogPath.endswith('.kml') or dsc.catalogPath.endswith('.kmz'):
                    name = os.path.splitext(dsc.name)[0]
                    temp_dir = tempfile.mkdtemp()
                    kml_layer = arcpy.KMLToLayer_conversion(dsc.catalogPath, temp_dir, name)
                    group_layer = arcpy.mapping.Layer(os.path.join(temp_dir, '{}.lyr'.format(name)))
                    for layer in arcpy.mapping.ListLayers(group_layer):
                        if layer.isFeatureLayer:
                            arcpy.CopyFeatures_management(layer, task_utils.create_unique_name(layer, out_gdb))
                        elif layer.isRasterLayer:
                            if is_fds:
                                out_gdb = os.path.dirname(out_gdb)
                            arcpy.CopyRaster_management(layer, task_utils.create_unique_name(layer, out_gdb))
                    # Clean up temp KML results.
                    arcpy.Delete_management(os.path.join(temp_dir, '{}.lyr'.format(name)))
                    arcpy.Delete_management(kml_layer)
                else:
                    processed_count += 1
                    status_writer.send_percent(processed_count / result_count, _('Invalid input type: {0}').format(dsc.name), 'add_to_geodatabase')
                    skipped += 1
                    skipped_reasons[ds] = _('Invalid input type: {0}').format(dsc.dataType)
                    continue

            elif dsc.dataType == 'MapDocument':
                mxd = arcpy.mapping.MapDocument(dsc.catalogPath)
                if map_frame_name:
                    df = arcpy.mapping.ListDataFrames(mxd, map_frame_name)[0]
                    layers = arcpy.mapping.ListLayers(mxd, data_frame=df)
                else:
                    layers = arcpy.mapping.ListLayers(mxd)
                for layer in layers:
                    if layer.isFeatureLayer:
                        arcpy.CopyFeatures_management(layer.dataSource,
                                                      task_utils.create_unique_name(layer.name, out_gdb))
                    elif layer.isRasterLayer:
                        if is_fds:
                            out_gdb = os.path.dirname(out_gdb)
                        arcpy.CopyRaster_management(layer.dataSource,
                                                    task_utils.create_unique_name(layer.name, out_gdb))
                table_views = arcpy.mapping.ListTableViews(mxd)
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                for table_view in table_views:
                    arcpy.CopyRows_management(table_view.dataSource,
                                              task_utils.create_unique_name(table_view.name, out_gdb))
                out_gdb = arcpy.env.workspace

            elif dsc.dataType.find('Table') > 0:
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                if out_name == '':
                    arcpy.CopyRows_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyRows_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            else:
                # Try to copy any other types such as topologies, network datasets, etc.
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                arcpy.Copy_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))

            out_gdb = arcpy.env.workspace
            processed_count += 1.
            status_writer.send_percent(processed_count / result_count, _('Added: {0}').format(ds), 'add_to_geodatabase')
            status_writer.send_status(_('Added: {0}').format(ds))
            added += 1
        # Continue if an error. Process as many as possible.
        except Exception as ex:
            processed_count += 1
            status_writer.send_percent(processed_count / result_count, _('Skipped: {0}').format(ds), 'add_to_geodatabase')
            status_writer.send_status(_('FAIL: {0}').format(repr(ex)))
            errors_reasons[ds] = repr(ex)
            errors += 1
            continue

    return added, errors, skipped
arcpy.AddGeometryAttributes_management("notakeall_diss_project","AREA_GEODESIC","","SQUARE_KILOMETERS",in_mollweideprj)
arcpy.Statistics_analysis("notakeall_diss_project","sum_NOTAKEall",[["AREA_GEO","SUM"]])

arcpy.Select_analysis("all_wdpa_polybuffpnt", r"in_memory\notake_part","NO_TAKE = 'Part'")
arcpy.Statistics_analysis(r"in_memory\notake_part","sum_NOTAKEpart",[["NO_TK_AREA","SUM"]])

elapsed_hours = (time.clock() - start)/3600
print(("Stage 1 took " + str(elapsed_hours) + " hours"))

##-------------------------------------------------------------------------------------------------------------------------
#Stage 2: National and National PAME analysis

print ("Stage 2 of 2: National & National PAME Analyses")

# create the summary tables for appending in individual natioanl summary statistics
out_national_current_schema = arcpy.CreateTable_management(workspace,"out_national_current_schema")
arcpy.AddFields_management(out_national_current_schema,[['WDPA_ISO3','TEXT'],['type','TEXT'],['FREQUENCY','LONG'],['SUM_AREA_GEO','DOUBLE']])

out_national_temporal_schema = arcpy.CreateTable_management(workspace,"out_national_temporal_schema")
arcpy.AddFields_management(out_national_temporal_schema,[['WDPA_ISO3','TEXT'],['MIN_STATUS_YR','DOUBLE'],['type','TEXT'],['FREQUENCY','LONG'],['SUM_AREA_GEO','DOUBLE']])

out_national_current_schema_pame = arcpy.CreateTable_management(workspace,"out_national_current_schema_pame")
arcpy.AddFields_management(out_national_current_schema_pame,[['WDPA_ISO3','TEXT'],['type','TEXT'],['FREQUENCY','LONG'],['SUM_AREA_GEO','DOUBLE']])

out_national_temporal_schema_pame = arcpy.CreateTable_management(workspace,"out_national_temporal_schema_pame")
arcpy.AddFields_management(out_national_temporal_schema_pame,[['WDPA_ISO3','TEXT'],['MIN_STATUS_YR','DOUBLE'],['type','TEXT'],['FREQUENCY','LONG'],['SUM_AREA_GEO','DOUBLE']])

# join pame list to polybuffpnt
arcpy.JoinField_management("all_wdpa_polybuffpnt","WDPAID",in_pame_sites,"wdpa_id","evaluation_id")

# update field (0) for those that don't have id