#Madeline Schwarz
#1/22/2020

#Reprojects feature classes in a desired folder to the spatial reference of another specified fc (similar to Batch Projection Tool)

import arcpy
from arcpy import env

inFolder = arcpy.GetParameterAsText(0)                                              #allows user to define input folder
target_projection_data = arcpy.GetParameterAsText(1)                                #define fc with desired spatial reference
arcpy.env.workspace = inFolder #defines env to desired user folder

out_coordinate_system = arcpy.Describe(target_projection_data).spatialReference     #sets output coordinate system as desired shapefile's

featureClassList = arcpy.ListFeatureClasses() #creates a list of feature classes in input folder
try:
    for input_features in featureClassList:
        in_coord = arcpy.Describe(input_features).spatialReference                  #describes original SR of feature class and sets as a variable
        if in_coord.Name != out_coordinate_system.Name:                             #if fc's SR doesn't match target fc.....
            input_featuresName = input_features.replace(".shp","")                  #remove .shp from fc's original name
            output_feature_class = inFolder + "\\" + input_featuresName + "_projected2.shp"             #defines path and new name of reprojected fc
            arcpy.Project_management(input_features, output_feature_class, out_coordinate_system)       #execute project tool
            arcpy.AddMessage("Successfully Reprojected: " + output_feature_class)                       #print message in arcpy
            print ("Successfully Reprojected: " + output_feature_class)
        else:
            arcpy.AddMessage("skipped this feature class; already in proper SR: " + input_features)     #if fc already in desired SR, print message
            print("skipped this feature class; already in proper SR: " + input_features)
except:
 arcpy.AddError("Projection Failed")
 print ("Projection Failed")
 print (arcpy.GetMessages())
示例#2
0
def updateFGDBfromSDE(fgdb, sde, logger=None):
    global changes
    """
    fgdb: file geodatabase
    sde: sde geodatabase connection
    logger: agrc.logging.Logger (optional)

    returns: String[] - the list of errors

    Loops through the file geodatabase feature classes and looks for
    matches in the SDE database. If there is a match, it does a schema check
    and then updates the data.
    """
    def log(msg):
        if logger:
            logger.logMsg(msg)
        else:
            print msg

    def updateData(isTable):
        try:
            # validate that there was not a schema change
            arcpy.env.workspace = fgdb
            layer = sdeFC + '_Layer'
            if not isTable:
                arcpy.MakeFeatureLayer_management(sdeFC, layer, '1 = 2')
            else:
                arcpy.MakeTableView_management(sdeFC, layer, '1 = 2')

            try:
                arcpy.Append_management(layer, f, 'TEST')
                log('schema test passed')
                passed = True
            except arcpy.ExecuteError as e:
                if '000466' in e.message:
                    log(e.message)
                    msg = 'schema change detected'
                    msg += '\n\n{0}'.format(getFieldDifferences(sdeFC, f))
                    errors.append('{}: {}'.format(f, msg))
                    log(msg)
                    passed = False
                    return passed
                else:
                    raise e
            arcpy.Delete_management(layer)

            log('checking for changes...')
            if checkForChanges(f, sdeFC, isTable) and passed:
                log('updating data...')
                arcpy.TruncateTable_management(f)

                # edit session required for data that participates in relationships
                editSession = arcpy.da.Editor(fgdb)
                editSession.startEditing(False, False)
                editSession.startOperation()

                fields = [fld.name for fld in arcpy.ListFields(f)]
                fields = filter_fields(fields)
                if not isTable:
                    fields.append('SHAPE@')
                    outputSR = arcpy.Describe(f).spatialReference
                else:
                    outputSR = None
                with arcpy.da.InsertCursor(f, fields) as icursor, \
                    arcpy.da.SearchCursor(sdeFC, fields, sql_clause=(None, 'ORDER BY OBJECTID'),
                                          spatial_reference=outputSR) as cursor:
                    for row in cursor:
                        icursor.insertRow(row)

                editSession.stopOperation()
                editSession.stopEditing(True)

                changes.append(f.upper())
            else:
                log('no changes found')
        except:
            errors.append('Error updating: {}'.format(f))
            if logger:
                logger.logError()

    log('** Updating {} from {}'.format(fgdb, sde))
    errors = []

    # loop through local feature classes
    arcpy.env.workspace = fgdb
    fcs = arcpy.ListFeatureClasses() + arcpy.ListTables()
    totalFcs = len(fcs)
    i = 0
    for f in fcs:
        i = i + 1
        log('{} of {} | {}'.format(i, totalFcs, f))

        found = False

        # search for match in stand-alone feature classes
        arcpy.env.workspace = sde
        matches = arcpy.ListFeatureClasses(
            '*.{}'.format(f)) + arcpy.ListTables('*.{}'.format(f))
        if matches is not None and len(matches) > 0:
            match = matches[0]
            sdeFC = join(sde, match)
            found = True
        else:
            # search in feature datasets
            datasets = arcpy.ListDatasets()
            if len(datasets) > 0:
                # loop through datasets
                for ds in datasets:
                    matches = arcpy.ListFeatureClasses('*.{}'.format(f), None,
                                                       ds)
                    if matches is not None and len(matches) > 0:
                        match = matches[0]
                        sdeFC = join(sde, match)
                        found = True
                        break
        if not found:
            msg = 'no match found in sde'
            errors.append("{}: {}".format(f, msg))
            log(msg)
            continue

        updateData(arcpy.Describe(join(fgdb, f)).datasetType == 'Table')

    return (errors, changes)
def add_to_geodatabase(input_items, out_gdb, is_fds):
    """Adds items to a geodatabase."""
    added = 0
    skipped = 0
    errors = 0
    global processed_count
    global layer_name
    global  existing_fields
    global new_fields
    global  field_values

    for ds, out_name in input_items.iteritems():
        try:
            # -----------------------------------------------
            # If the item is a service layer, process and continue.
            # -----------------------------------------------
            if ds.startswith('http'):
                try:
                    service_layer = task_utils.ServiceLayer(ds)
                    arcpy.env.overwriteOutput = True
                    oid_groups = service_layer.object_ids
                    out_features = None
                    g = 0.
                    group_cnt = service_layer.object_ids_cnt
                    for group in oid_groups:
                        g += 1
                        group = [oid for oid in group if oid]
                        where = '{0} IN {1}'.format(service_layer.oid_field_name, tuple(group))
                        url = ds + "/query?where={}&outFields={}&returnGeometry=true&geometryType=esriGeometryPolygon&f=json".format(where, '*')
                        feature_set = arcpy.FeatureSet()
                        feature_set.load(url)
                        if not out_features:
                            out_features = arcpy.CopyFeatures_management(feature_set, task_utils.create_unique_name(out_name, out_gdb))
                        else:
                            features = arcpy.CopyFeatures_management(feature_set, task_utils.create_unique_name(out_name, out_gdb))
                            arcpy.Append_management(features, out_features, 'NO_TEST')
                            try:
                                arcpy.Delete_management(features)
                            except arcpy.ExecuteError:
                                pass
                        status_writer.send_percent(float(g) / group_cnt * 100, '', 'add_to_geodatabase')
                    processed_count += 1.
                    added += 1
                    status_writer.send_percent(processed_count / result_count, _('Added: {0}').format(ds), 'add_to_geodatabase')
                    continue
                except Exception as ex:
                    status_writer.send_state(status.STAT_WARNING, str(ex))
                    errors_reasons[ds] = ex.message
                    errors += 1
                    continue

            # ------------------------------
            # Is the input a mxd data frame.
            # ------------------------------
            map_frame_name = task_utils.get_data_frame_name(ds)
            if map_frame_name:
                ds = ds.split('|')[0].strip()

            # -------------------------------
            # Is the input a geometry feature
            # -------------------------------
            if isinstance(out_name, list):
                increment = task_utils.get_increment(result_count)
                for row in out_name:
                    try:
                        name = os.path.join(out_gdb, arcpy.ValidateTableName(ds, out_gdb))
                        # Create the geometry if it exists.
                        geom = None
                        try:
                            geo_json = row['[geo]']
                            geom = arcpy.AsShape(geo_json)
                            row.pop('[geo]')
                        except KeyError:
                            pass

                        if geom:
                            if not arcpy.Exists(name):
                                if arcpy.env.outputCoordinateSystem:
                                    arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                else:
                                    arcpy.env.outputCoordinateSystem = 4326
                                    arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                layer_name = arcpy.MakeFeatureLayer_management(name, 'flayer_{0}'.format(os.path.basename(name)))
                                existing_fields = [f.name for f in arcpy.ListFields(layer_name)]
                                new_fields = []
                                field_values = []
                                for field, value in row.iteritems():
                                    valid_field = arcpy.ValidateFieldName(field, out_gdb)
                                    new_fields.append(valid_field)
                                    field_values.append(value)
                                    arcpy.AddField_management(layer_name, valid_field, 'TEXT')
                            else:
                                if not geom.type.upper() == arcpy.Describe(name).shapeType.upper():
                                    name = arcpy.CreateUniqueName(os.path.basename(name), out_gdb)
                                    if arcpy.env.outputCoordinateSystem:
                                        arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                    else:
                                        arcpy.env.outputCoordinateSystem = 4326
                                        arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                    layer_name = arcpy.MakeFeatureLayer_management(name, 'flayer_{0}'.format(os.path.basename(name)))
                                    existing_fields = [f.name for f in arcpy.ListFields(layer_name)]
                                    new_fields = []
                                    field_values = []
                                    for field, value in row.iteritems():
                                        valid_field = arcpy.ValidateFieldName(field, out_gdb)
                                        new_fields.append(valid_field)
                                        field_values.append(value)
                                        if valid_field not in existing_fields:
                                            arcpy.AddField_management(layer_name, valid_field, 'TEXT')
                        else:
                            if not arcpy.Exists(name):
                                arcpy.CreateTable_management(out_gdb, os.path.basename(name))
                                view_name = arcpy.MakeTableView_management(name, 'tableview')
                                existing_fields = [f.name for f in arcpy.ListFields(view_name)]
                                new_fields = []
                                field_values = []
                                for field, value in row.iteritems():
                                    valid_field = arcpy.ValidateFieldName(field, out_gdb)
                                    new_fields.append(valid_field)
                                    field_values.append(value)
                                    if valid_field not in existing_fields:
                                        arcpy.AddField_management(view_name, valid_field, 'TEXT')


                        if geom:
                            with arcpy.da.InsertCursor(layer_name, ["SHAPE@"] + new_fields) as icur:
                                icur.insertRow([geom] + field_values)
                        else:
                            with arcpy.da.InsertCursor(view_name, new_fields) as icur:
                                icur.insertRow(field_values)

                        processed_count += 1
                        if (processed_count % increment) == 0:
                            status_writer.send_percent(float(processed_count) / result_count, _('Added: {0}').format(row['name']), 'add_to_geodatabase')
                        added += 1
                        continue
                    except Exception as ex:
                        processed_count += 1
                        errors += 1
                        errors_reasons[name] = ex.message
                        continue
                continue
            # -----------------------------
            # Check the data type and clip.
            # -----------------------------
            dsc = arcpy.Describe(ds)
            if dsc.dataType == 'FeatureClass':
                if out_name == '':
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'ShapeFile':
                if out_name == '':
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(dsc.name[:-4], out_gdb))
                else:
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'FeatureDataset':
                if not is_fds:
                    fds_name = os.path.basename(task_utils.create_unique_name(dsc.name, out_gdb))
                    fds = arcpy.CreateFeatureDataset_management(out_gdb, fds_name).getOutput(0)
                else:
                    fds = out_gdb
                arcpy.env.workspace = dsc.catalogPath
                for fc in arcpy.ListFeatureClasses():
                    name = os.path.basename(task_utils.create_unique_name(fc, out_gdb))
                    arcpy.CopyFeatures_management(fc, os.path.join(fds, name))
                arcpy.env.workspace = out_gdb

            elif dsc.dataType == 'RasterDataset':
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                if out_name == '':
                    arcpy.CopyRaster_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyRaster_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'RasterCatalog':
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                if out_name == '':
                    arcpy.CopyRasterCatalogItems_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyRasterCatalogItems_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'Layer':
                layer_from_file = arcpy.mapping.Layer(dsc.catalogPath)
                layers = arcpy.mapping.ListLayers(layer_from_file)
                for layer in layers:
                    if out_name == '':
                        name = task_utils.create_unique_name(layer.name, out_gdb)
                    else:
                        name = task_utils.create_unique_name(out_name, out_gdb)
                    if layer.isFeatureLayer:
                        arcpy.CopyFeatures_management(layer.dataSource, name)
                    elif layer.isRasterLayer:
                        if is_fds:
                            name = os.path.dirname(name)
                        arcpy.CopyRaster_management(layer.dataSource, name)

            elif dsc.dataType == 'CadDrawingDataset':
                arcpy.env.workspace = dsc.catalogPath
                cad_wks_name = os.path.splitext(dsc.name)[0]
                for cad_fc in arcpy.ListFeatureClasses():
                    arcpy.CopyFeatures_management(
                        cad_fc,
                        task_utils.create_unique_name('{0}_{1}'.format(cad_wks_name, cad_fc), out_gdb)
                    )
                arcpy.env.workspace = out_gdb

            elif dsc.dataType == 'File':
                if dsc.catalogPath.endswith('.kml') or dsc.catalogPath.endswith('.kmz'):
                    name = os.path.splitext(dsc.name)[0]
                    temp_dir = tempfile.mkdtemp()
                    kml_layer = arcpy.KMLToLayer_conversion(dsc.catalogPath, temp_dir, name)
                    group_layer = arcpy.mapping.Layer(os.path.join(temp_dir, '{}.lyr'.format(name)))
                    for layer in arcpy.mapping.ListLayers(group_layer):
                        if layer.isFeatureLayer:
                            arcpy.CopyFeatures_management(layer, task_utils.create_unique_name(layer, out_gdb))
                        elif layer.isRasterLayer:
                            if is_fds:
                                out_gdb = os.path.dirname(out_gdb)
                            arcpy.CopyRaster_management(layer, task_utils.create_unique_name(layer, out_gdb))
                    # Clean up temp KML results.
                    arcpy.Delete_management(os.path.join(temp_dir, '{}.lyr'.format(name)))
                    arcpy.Delete_management(kml_layer)
                else:
                    processed_count += 1
                    status_writer.send_percent(processed_count / result_count, _('Invalid input type: {0}').format(dsc.name), 'add_to_geodatabase')
                    skipped += 1
                    skipped_reasons[ds] = _('Invalid input type: {0}').format(dsc.dataType)
                    continue

            elif dsc.dataType == 'MapDocument':
                mxd = arcpy.mapping.MapDocument(dsc.catalogPath)
                if map_frame_name:
                    df = arcpy.mapping.ListDataFrames(mxd, map_frame_name)[0]
                    layers = arcpy.mapping.ListLayers(mxd, data_frame=df)
                else:
                    layers = arcpy.mapping.ListLayers(mxd)
                for layer in layers:
                    if layer.isFeatureLayer:
                        arcpy.CopyFeatures_management(layer.dataSource,
                                                      task_utils.create_unique_name(layer.name, out_gdb))
                    elif layer.isRasterLayer:
                        if is_fds:
                            out_gdb = os.path.dirname(out_gdb)
                        arcpy.CopyRaster_management(layer.dataSource,
                                                    task_utils.create_unique_name(layer.name, out_gdb))
                table_views = arcpy.mapping.ListTableViews(mxd)
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                for table_view in table_views:
                    arcpy.CopyRows_management(table_view.dataSource,
                                              task_utils.create_unique_name(table_view.name, out_gdb))
                out_gdb = arcpy.env.workspace

            elif dsc.dataType.find('Table') > 0:
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                if out_name == '':
                    arcpy.CopyRows_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyRows_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            else:
                # Try to copy any other types such as topologies, network datasets, etc.
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                arcpy.Copy_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))

            out_gdb = arcpy.env.workspace
            processed_count += 1.
            status_writer.send_percent(processed_count / result_count, _('Added: {0}').format(ds), 'add_to_geodatabase')
            status_writer.send_status(_('Added: {0}').format(ds))
            added += 1
        # Continue if an error. Process as many as possible.
        except Exception as ex:
            processed_count += 1
            status_writer.send_percent(processed_count / result_count, _('Skipped: {0}').format(ds), 'add_to_geodatabase')
            status_writer.send_status(_('FAIL: {0}').format(repr(ex)))
            errors_reasons[ds] = repr(ex)
            errors += 1
            continue

    return added, errors, skipped
示例#4
0
def main():
    arcpy.env.workspace = MAZ_SHAPEFILE_PATH
    featureClasses = arcpy.ListFeatureClasses()
    if (os.path.exists(TARGET_FC_PATH)):
        arcpy.Delete_management(TARGET_FC_PATH)
        print "    Deleted existing merge feature class " + TARGET_FC_NAME

    # Create the target feature class. Use an MAZ shapefile as the template schema.
    arcpy.CreateFeatureclass_management(
        out_path=OUT_SHAPEFILE_PATH,
        out_name=TARGET_FC_NAME,
        template=MAZ_SHAPEFILE_PATH + os.sep + featureClasses[0],
        spatial_reference=MAZ_SHAPEFILE_PATH + os.sep + featureClasses[0])
    # delete the MAZ field from the target
    arcpy.DeleteField_management(TARGET_FC_PATH, "MAZ")
    # add the renumbered zone field, N
    arcpy.AddField_management(in_table=TARGET_FC_PATH,
                              field_name="N",
                              field_type="LONG")
    print "    Created merge feature class " + TARGET_FC_NAME

    #loop through all MAZ FCs, add N field, and set it
    for fc in featureClasses:
        workOnFc(
            fc, "MAZ", "!MAZ! + (!COUNTY! - 1) * " + str(COUNTY_OFFSET) +
            " + " + str(MAZ_OFFSET))
        # fieldsList = [f.name for f in arcpy.Describe(fc).fields]
        # if ("COUNTY" not in fieldsList):
        # raise Exception("Feature class {0} missing field 'COUNTY'".format(fc))
        # if ("MAZ" not in fieldsList):
        # raise Exception("Feature class {0} missing field 'MAZ'".format(fc))
        # if ("N" in fieldsList):
        # arcpy.DeleteField_management(fc, "N")
        # arcpy.AddField_management(in_table = fc, field_name = "N", field_type = "LONG")
        # arcpy.CalculateField_management(fc, "N",
        # "!MAZ! + (!COUNTY! - 1) * " + str(COUNTY_OFFSET) + " + " + str(MAZ_OFFSET),
        # "PYTHON")
    print "    Beginning MAZ Merge"
    arcpy.Append_management(featureClasses, TARGET_FC_PATH, "NO_TEST")
    mazCount = int(arcpy.GetCount_management(TARGET_FC_PATH).getOutput(0))
    print "         finished MAZ Merge"
    print "         merged " + str(mazCount) + " MAZs"

    # now merge in all the TAZs
    arcpy.env.workspace = TAZ_SHAPEFILE_PATH
    featureClasses = arcpy.ListFeatureClasses()
    print "found {0} TAZ centroids in {1}".format(len(featureClasses),
                                                  TAZ_SHAPEFILE_PATH)
    for fc in featureClasses:
        workOnFc(fc, "TAZ", "!TAZ! + (!COUNTY! - 1) * " + str(COUNTY_OFFSET))
        # fieldsList = [f.name for f in arcpy.Describe(fc).fields]
        # if ("COUNTY" not in fieldsList):
        # raise Exception("Feature class {0} missing field 'COUNTY'".format(fc))
        # if ("TAZ" not in fieldsList):
        # raise Exception("Feature class {0} missing field 'TAZ'".format(fc))
        # if ("N" in fieldsList):
        # arcpy.DeleteField_management(fc, "N")
        # arcpy.AddField_management(in_table = fc, field_name = "N", field_type = "LONG")
        # arcpy.CalculateField_management(fc, "N", "!TAZ! + (!COUNTY! - 1) * " + str(COUNTY_OFFSET),
        # "PYTHON")
    if (len(featureClasses) > 0):
        print "    Beginning TAZ Merge"
        arcpy.Append_management(featureClasses, TARGET_FC_PATH, "NO_TEST")
        tazCount = int(arcpy.GetCount_management(TARGET_FC_PATH).getOutput(0))
        print "         finished TAZ Merge"
        print "         merged " + str(tazCount - mazCount) + " TAZs"
    print "Merged shapefile written to " + TARGET_FC_PATH
示例#5
0
saveGDBDir = arcpy.GetParameterAsText(1)
save_spatialReference = arcpy.GetParameterAsText(2)

gdbPathList = in_GDBPaths.split(';')

pFeaDsName = []  #dataset name list
pFeaClsPathList = []
pFeaName = []

for gdbPath in gdbPathList:
    env.workspace = gdbPath
    dsList = arcpy.ListDatasets()
    for ds in dsList:
        pFeaDsName.append(ds)  #get dataset name list

        FeaClsList = arcpy.ListFeatureClasses(feature_dataset=ds)
        for lis in FeaClsList:
            pFeaClsPathList.append(gdbPath + "\\" + ds + "\\" +
                                   lis)  #get featureClass path list
            pFeaName.append(lis)  # get featureClass name list
arcpy.AddMessage("Get Datasets and FeatureClasses  SuccessFully!")

pFeaName = list(set(pFeaName))
pFeaDsName = list(set(pFeaDsName))

#create db and dataset
time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
dbName = "MegerDB_" + time
arcpy.CreateFileGDB_management(saveGDBDir, dbName)
arcpy.AddMessage("Create db SuccessFully!")
dbpath = saveGDBDir + "\\" + dbName + ".gdb"
示例#6
0
    if arcpy == None:
        import arcpy


if len(sys.argv) != 3:
    print "Usage:  List03.py <FeatureClassName> <FeatureType>\n"
    print "where FeatureType is one of "
    print "     'Annotation', 'Arc', 'Dimension',"
    print "     'Edge', 'Junction', 'Label', 'Line', 'Multipatch', 'Node', 'Point', "
    print "     'Polygon', 'Polyline', 'Region', 'Route', 'Tic', 'All'"
    sys.exit()

else:
    setArcPy()
    fcdirectory=sys.argv[1]
    fctype=sys.argv[2]
    fctypes = ['Annotation', 'Arc', 'Dimension', 'Edge', 'Junction', 'Label',
               'Line', 'Multipatch', 'Node', 'Point', 'Polygon', 'Polyline', 'Region', 'Route', 'Tic', 'All']
    if fc in fctypes:
        print "Feature Class is not found"


    env.workspace=fcdirectory
    if arcpy.Exists(fcdirectory):
        fclist = arcpy.ListFeatureClasses("",fctype)
        for fc in fclist:
            descFC=arcpy.Describe(fc)
            print descFC.BaseName

    else:
        print "{} does not exists".format(fcdirectory)
def main_create_shape_files():
    stewardship_site_csv = read_csv('stewardship_site')
    users_csv = read_csv('users')

    #################################################################
    #
    #           border
    #
    #################################################################
    csv_records = read_csv('border')
    fc_fields = (("id_2", "SHORT"), ("site_id", "SHORT"), ("site", "TEXT"),
                 ("county", "TEXT"), ("kml", "TEXT"), ("user_id", "SHORT"),
                 ("email", "TEXT"), ("f_name", "TEXT"), ("l_name", "TEXT"))
    fcs = create_feature_classes('border', fc_fields)

    for fc in fcs:
        with arcpy.da.InsertCursor(
                fc,
            ['SHAPE@'] + list(tuple(item[0] for item in fc_fields))) as cursor:
            for i, csv_record in csv_records.iterrows():
                s_site = get_stewardship_site_info(
                    stewardship_site_csv, csv_record['stewardshipsite_id'])
                u_info = get_user_info(users_csv, csv_record['user_id'])
                geo_object = get_geo_object(csv_record['coordinates'],
                                            arcpy.Describe(fc).shapeType)
                if geo_object is not None:
                    cursor.insertRow(
                        (geo_object, csv_record['id'],
                         csv_record['stewardshipsite_id'], s_site['site'],
                         s_site['county'], s_site['kml'],
                         csv_record['user_id'], u_info['email'],
                         u_info['f_name'], u_info['l_name']))
        del cursor

    #################################################################
    #
    #           brush
    #
    #################################################################
    csv_records = read_csv('brush')
    fc_fields = (("id_2", "SHORT"), ("site_id", "SHORT"), ("site", "TEXT"),
                 ("county", "TEXT"), ("kml", "TEXT"), ("date", "TEXT"),
                 ("title", "TEXT"), ("descr", "TEXT"), ("user_id", "SHORT"),
                 ("email", "TEXT"), ("f_name", "TEXT"), ("l_name", "TEXT"))
    fcs = create_feature_classes('brush', fc_fields)

    for fc in fcs:
        with arcpy.da.InsertCursor(
                fc,
            ['SHAPE@'] + list(tuple(item[0] for item in fc_fields))) as cursor:
            for i, csv_record in csv_records.iterrows():
                s_site = get_stewardship_site_info(
                    stewardship_site_csv, csv_record['stewardshipsite_id'])
                u_info = get_user_info(users_csv, csv_record['user_id'])
                geo_object = get_geo_object(csv_record['coordinates'],
                                            arcpy.Describe(fc).shapeType)
                if geo_object is not None:
                    cursor.insertRow(
                        (geo_object, csv_record['id'],
                         csv_record['stewardshipsite_id'], s_site['site'],
                         s_site['county'], s_site['kml'], csv_record['date'],
                         csv_record['title'], csv_record['description'],
                         csv_record['user_id'], u_info['email'],
                         u_info['f_name'], u_info['l_name']))
        del cursor

    #################################################################
    #
    #           landmark
    #
    #################################################################
    csv_records = read_csv('landmark')
    fc_fields = (("id_2", "SHORT"), ("site_id", "SHORT"), ("site", "TEXT"),
                 ("county", "TEXT"), ("kml", "TEXT"), ("name", "TEXT"),
                 ("descr", "TEXT"), ("user_id", "SHORT"), ("email", "TEXT"),
                 ("f_name", "TEXT"), ("l_name", "TEXT"))
    fcs = create_feature_classes('landmark', fc_fields)

    for fc in fcs:
        with arcpy.da.InsertCursor(
                fc,
            ['SHAPE@'] + list(tuple(item[0] for item in fc_fields))) as cursor:
            for i, csv_record in csv_records.iterrows():
                s_site = get_stewardship_site_info(
                    stewardship_site_csv, csv_record['stewardshipsite_id'])
                u_info = get_user_info(users_csv, csv_record['user_id'])
                geo_object = get_geo_object(csv_record['coordinates'],
                                            arcpy.Describe(fc).shapeType)
                if geo_object is not None:
                    cursor.insertRow(
                        (geo_object, csv_record['id'],
                         csv_record['stewardshipsite_id'], s_site['site'],
                         s_site['county'], s_site['kml'], csv_record['name'],
                         csv_record['description'], csv_record['user_id'],
                         u_info['email'], u_info['f_name'], u_info['l_name']))
        del cursor

    #################################################################
    #
    #           other
    #
    #################################################################
    csv_records = read_csv('other')
    fc_fields = (("id_2", "SHORT"), ("site_id", "SHORT"), ("site", "TEXT"),
                 ("county", "TEXT"), ("kml", "TEXT"), ("date", "TEXT"),
                 ("title", "TEXT"), ("descr", "TEXT"), ("user_id", "SHORT"),
                 ("email", "TEXT"), ("f_name", "TEXT"), ("l_name", "TEXT"))
    fcs = create_feature_classes('other', fc_fields)

    for fc in fcs:
        with arcpy.da.InsertCursor(
                fc,
            ['SHAPE@'] + list(tuple(item[0] for item in fc_fields))) as cursor:
            for i, csv_record in csv_records.iterrows():
                s_site = get_stewardship_site_info(
                    stewardship_site_csv, csv_record['stewardshipsite_id'])
                u_info = get_user_info(users_csv, csv_record['user_id'])
                geo_object = get_geo_object(csv_record['coordinates'],
                                            arcpy.Describe(fc).shapeType)
                if geo_object is not None:
                    cursor.insertRow(
                        (geo_object, csv_record['id'],
                         csv_record['stewardshipsite_id'], s_site['site'],
                         s_site['county'], s_site['kml'], csv_record['date'],
                         csv_record['title'], csv_record['description'],
                         csv_record['user_id'], u_info['email'],
                         u_info['f_name'], u_info['l_name']))
        del cursor

    #################################################################
    #
    #           seed
    #
    #################################################################
    csv_records = read_csv('seed')
    fc_fields = (("id_2", "SHORT"), ("site_id", "SHORT"), ("site", "TEXT"),
                 ("county", "TEXT"), ("kml", "TEXT"), ("date", "TEXT"),
                 ("title", "TEXT"), ("descr", "TEXT"), ("user_id", "SHORT"),
                 ("email", "TEXT"), ("f_name", "TEXT"), ("l_name", "TEXT"))
    fcs = create_feature_classes('seed', fc_fields)

    for fc in fcs:
        with arcpy.da.InsertCursor(
                fc,
            ['SHAPE@'] + list(tuple(item[0] for item in fc_fields))) as cursor:
            for i, csv_record in csv_records.iterrows():
                s_site = get_stewardship_site_info(
                    stewardship_site_csv, csv_record['stewardshipsite_id'])
                u_info = get_user_info(users_csv, csv_record['user_id'])
                geo_object = get_geo_object(csv_record['coordinates'],
                                            arcpy.Describe(fc).shapeType)
                if geo_object is not None:
                    cursor.insertRow(
                        (geo_object, csv_record['id'],
                         csv_record['stewardshipsite_id'], s_site['site'],
                         s_site['county'], s_site['kml'], csv_record['date'],
                         csv_record['title'], csv_record['description'],
                         csv_record['user_id'], u_info['email'],
                         u_info['f_name'], u_info['l_name']))
        del cursor

    #################################################################
    #
    #           trails
    #
    #################################################################
    csv_records = read_csv('trails')
    fc_fields = (("id_2", "SHORT"), ("site_id", "SHORT"), ("site", "TEXT"),
                 ("county", "TEXT"), ("kml", "TEXT"), ("name", "TEXT"),
                 ("user_id", "SHORT"), ("email", "TEXT"), ("f_name", "TEXT"),
                 ("l_name", "TEXT"))
    fcs = create_feature_classes('trails', fc_fields)

    for fc in fcs:
        if arcpy.Describe(fc).shapeType == 'Polyline':
            with arcpy.da.InsertCursor(
                    fc,
                ['SHAPE@'] + list(tuple(item[0]
                                        for item in fc_fields))) as cursor:
                for i, csv_record in csv_records.iterrows():
                    s_site = get_stewardship_site_info(
                        stewardship_site_csv, csv_record['stewardshipsite_id'])
                    u_info = get_user_info(users_csv, csv_record['user_id'])
                    geo_object = get_geo_line(csv_record['coordinates'])
                    if geo_object is not None:
                        cursor.insertRow(
                            (geo_object, csv_record['id'],
                             csv_record['stewardshipsite_id'], s_site['site'],
                             s_site['county'], s_site['kml'],
                             csv_record['name'], csv_record['user_id'],
                             u_info['email'], u_info['f_name'],
                             u_info['l_name']))
            del cursor

    #################################################################
    #
    #           weed
    #
    #################################################################
    csv_records = read_csv('weed')
    fc_fields = (("id_2", "SHORT"), ("site_id", "SHORT"), ("site", "TEXT"),
                 ("county", "TEXT"), ("kml", "TEXT"), ("date", "TEXT"),
                 ("title", "TEXT"), ("descr", "TEXT"), ("user_id", "SHORT"),
                 ("email", "TEXT"), ("f_name", "TEXT"), ("l_name", "TEXT"))
    fcs = create_feature_classes('weed', fc_fields)

    for fc in fcs:
        with arcpy.da.InsertCursor(
                fc,
            ['SHAPE@'] + list(tuple(item[0] for item in fc_fields))) as cursor:
            for i, csv_record in csv_records.iterrows():
                s_site = get_stewardship_site_info(
                    stewardship_site_csv, csv_record['stewardshipsite_id'])
                u_info = get_user_info(users_csv, csv_record['user_id'])
                geo_object = get_geo_object(csv_record['coordinates'],
                                            arcpy.Describe(fc).shapeType)
                if geo_object is not None:
                    cursor.insertRow(
                        (geo_object, csv_record['id'],
                         csv_record['stewardshipsite_id'], s_site['site'],
                         s_site['county'], s_site['kml'], csv_record['date'],
                         csv_record['title'], csv_record['description'],
                         csv_record['user_id'], u_info['email'],
                         u_info['f_name'], u_info['l_name']))
        del cursor

    # Make another pass to add additional fields and delete empty shape files
    fcs = arcpy.ListFeatureClasses()
    for fc in fcs:
        if arcpy.management.GetCount(fc)[0] == "0":
            arcpy.Delete_management(fc)
        else:
            arcpy.AddField_management(fc, 'centroid', 'TEXT')
            arcpy.CalculateField_management(fc, 'centroid', '!SHAPE.centroid!',
                                            'PYTHON')
            if arcpy.Describe(fc).shapeType == 'Polyline':
                arcpy.AddField_management(fc, 'length', 'LONG')
                arcpy.CalculateField_management(
                    fc, 'length', 'int(float(!SHAPE.length@feet!))', 'PYTHON')
            if arcpy.Describe(fc).shapeType == 'Polygon':
                arcpy.AddField_management(fc, 'area', 'LONG')
                arcpy.AddField_management(fc, 'p_length', 'LONG')
                arcpy.CalculateField_management(
                    fc, 'area', 'int(float(!SHAPE.area@squarefeet!))',
                    'PYTHON')
                arcpy.CalculateField_management(
                    fc, 'p_length', 'int(float(!SHAPE.length@feet!))',
                    'PYTHON')
示例#8
0
                fieldinfo.addField(field.name, field.name, "HIDDEN", "")

        compView = "compView"
        if arcpy.Exists(compView):
            arcpy.Delete_management(compView)

##        if onlyMajorComp == "true":
##            # The created component_view layer will have fields as set in fieldinfo object
##            majCompFlagYes = arcpy.AddFieldDelimiters(compTablePath,majorField) + " = 'Yes'"
##            arcpy.MakeTableView_management(compTablePath, compView, majCompFlagYes, "", fieldinfo)
##
##        else:
        arcpy.MakeTableView_management(compTablePath, compView, "", "",
                                       fieldinfo)
        """ ------------------------------------ Prepare MUPOLYGON Layer --------------------------"""
        fcList = arcpy.ListFeatureClasses("MUPOLYGON", "Polygon")

        if not len(fcList):
            raise ExitError, "\nMUPOLYGON was not found in " + os.path.basename(
                ssurgoFGDB)

        muPolygonPath = arcpy.env.workspace + os.sep + fcList[0]
        muPolyMUKEY = FindField(fcList[0], "mukey")

        if not muPolyMUKEY:
            raise ExitError, "\nMUPOLYGON feature class is missing MUKEY field"

        # Create a feature layer from the MUPOLYGON
        muPolyLayer = "muPolyLayer"
        if arcpy.Exists(muPolyLayer):
            arcpy.Delete_management(muPolyLayer)
示例#9
0
def listfolder(path):
    arcpy.env.workspace = path

    featureclass = arcpy.ListFeatureClasses()
    raster = arcpy.ListRasters()
    workspace = arcpy.ListWorkspaces()
    mxd = arcpy.ListFiles("*.mxd")

    featureclass = arcpy.ListFeatureClasses()
    raster = arcpy.ListRasters()
    cadlist = arcpy.ListDatasets("*.dwg")
    workspace = arcpy.ListWorkspaces()
    mxd = arcpy.ListFiles("*.mxd")

    print(len(featureclass))
    csvfile.writerow(["The shapefiles in this folder are Listed Below:"])
    for fc in featureclass:
        desc = arcpy.Describe(fc)
        try:
            csvfile.writerow([
                desc.name, "Shapefile", arcpy.env.workspace + "\\" + fc,
                desc.featureType + " " + desc.shapeType,
                desc.spatialreference.name,
                arcpy.GetCount_management(fc)
            ])
        except:
            print(fc + " Did not seem to be able to be opened")
            continue

    csvfile.writerow([""])
    csvfile.writerow(["The CAD datasets within this folder are listed below:"])
    print(len(cadlist))
    for cad in cadlist:
        try:
            desc = arcpy.Describe(cad)
            csvfile.writerow([
                desc.name, "CAD File", arcpy.env.workspace + "\\" + cad,
                desc.spatialreference.name
            ])
        except:
            print("Could not open cad data")
            continue

    csvfile.writerow([""])
    csvfile.writerow(["The rasters within this folder are listed below:"])
    for ras in raster:
        try:
            desc = arcpy.Describe(ras)
            csvfile.writerow([
                desc.name, desc.format, arcpy.env.workspace + "\\" + ras,
                desc.compressionType, desc.spatialreference.name
            ])
        except:
            print(ras + " did not seem to be able to be opened")
            continue

    for maps in mxd:
        mxd = arcpy.mapping.MapDocument(arcpy.env.workspace + "\\" + maps)
        csvfile.writerow(
            ["The Projections for " + maps + " dataframes are listed below"])
        for df in arcpy.mapping.ListDataFrames(mxd):
            csvfile.writerow([df.name, df.spatialReference.name])

        csvfile.writerow(["A list of the layers in " + maps])
        for lyr in arcpy.mapping.ListLayers(mxd):
            if lyr.supports("DATASOURCE"):
                csvfile.writerow([lyr.datasetName, lyr.dataSource])
        csvfile.writerow([""])

    csvfile.writerow([""])
    csvfile.writerow([""])
    for work in workspace:
        print(work)
        if work.endswith(".gdb"):
            #print(work)
            listgeodatabase(work)
            #call list file geodatabase function
        elif os.path.isdir(work):
            #print(work)
            listfolder(work)

    file.flush()
示例#10
0
#  temp workspace variable ===================
tempsp = r"D:\LiDAR_factor\tempStats.gdb"

#------PUT IN NAME OF LOGFILE!!!!!!  ********************************************************
"""scriptName = sys.argv[0]
logName = sys.argv[0].split("\\")[len(sys.argv[0].split("\\")) - 1][0:-3]
logfile = logpath + "\\" + logName + ".log"
outfile = open(logfile ,'w')

#------OPEN LOG FILE AND PUT IN NAME OF PYTHON SCRIPT!!!!!!********************************************************
outfile.write('\n' + "WORKSPACE: " + ws  + '\n' + scriptName + "----------------------------------------" '\n')
outfile.close()"""


#Loop through the list of feature classes
fcs = arcpy.ListFeatureClasses("", "")


for fc in fcs:

    #  important name variables ======================    
    
    #BookSec = fc[6:]
    BookSec = fc[6:11]
        
    # create a tuple of local time data
    timeYearMonDay = datetime.date.today()
    timeHour = time.localtime()[3]
    timeMin = time.localtime()[4]
    
    #Record the fc and the time processing starts in the log file
示例#11
0
        #for each feature class clip to the clip feature memClipFeature
        memClip = arcpy.Clip_analysis(inputFC, memClipFeature,
                                      "in_memory\\outClipFeatureClass")

        #need to copy from in_memory to folder to the desktop folder
        FCmem = arcpy.CopyFeatures_management(memClip, outClipFeatureClass)

        ### keep log of number of feature classes that are empty...
        if arcpy.management.GetCount(FCmem)[0] == "0":
            emptFC += 1
        arcpy.Delete_management(memClip)

        #counter used to determine if loop is completely finished and ready to move on on to CAD conversion
        if fileCount == 0:
            arcpy.Delete_management(memClipFeature)
            dwgShapefiles = arcpy.ListFeatureClasses()
            outCAD = arcpy.ExportCAD_conversion(dwgShapefiles, "DWG_R2013",
                                                "{0}.dwg".format(x),
                                                "IGNORE_FILENAMES_IN_TABLES",
                                                "APPEND_TO_EXISTING_FILES", "")
            #### ZIPFILE BELOW ####
            shutil.make_archive(segFold, 'zip', folderPath)
# End process...
endtime = datetime.datetime.now()

# Total feature classes in clip that are empty
f.write('Total number of empty input feature classes: ' + str(emptFC) + '\n')
f.write('\n')

# Process Completed...
f.write('CAD extractions completed successfully in ' +
示例#12
0
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")

env.overwriteOutput = True

beginTime = time.clock()

rawFolder = "C:/Data/cci_connectivity/scratch/intersected_spp"

outFolder = "C:/Data/cci_connectivity/scratch/intersected_spp"

tempFolder = "C:/Data/cci_connectivity/scratch"

env.workspace = rawFolder + "/"

fcList = arcpy.ListFeatureClasses("*int_*")
i = 0

print fcList

in_feature_class = outFolder + "/" + "_" + fc

for fc in fcList[0:1]:
    beginTime2 = time.clock()
    print "Aggregating nodes using a raster buffer: \nN.B. currently this is lat long only and so varies with latitude, \n so further work could involve a equal area or equidistant version."
    cellSize = "0.005"
    expansionFactor = "1"
    print "Cellsize is {0} and expansion is {1}".format(
        cellSize, expansionFactor)
    rast = tempFolder + "/" + "tempRast.tif"
    rastExp = tempFolder + "/" + "tempRastExp.tif"
示例#13
0
def run_job(esri_job):
    """Determines the data type and each dataset is sent to the worker to be processed."""
    status_writer.send_percent(0.0, "Initializing... 0.0%", 'esri_worker')
    job = esri_job

    # if job.path.startswith('http'):
    if job.service_connection:
        global_job(job)
        worker(job.service_connection, esri_service=True)
        return

    dsc = arcpy.Describe(job.path)
    # A single feature class or table.
    if dsc.dataType in ('DbaseTable', 'FeatureClass', 'ShapeFile', 'Shapefile',
                        'Table'):
        global_job(job, int(arcpy.GetCount_management(job.path).getOutput(0)))
        job.tables_to_keep()  # This will populate field mapping.
        worker(job.path)
        return

    # A folder (for shapefiles).
    elif dsc.dataType == 'Folder':
        arcpy.env.workspace = job.path
        tables = []
        tables_to_keep = job.tables_to_keep()
        tables_to_skip = job.tables_to_skip()
        if job.tables_to_keep:
            for t in tables_to_keep:
                [
                    tables.append(os.path.join(job.path, fc))
                    for fc in arcpy.ListFeatureClasses(t)
                ]
        else:
            [
                tables.append(os.path.join(job.path, fc))
                for fc in arcpy.ListFeatureClasses()
            ]

        if tables_to_skip:
            for t in tables_to_keep:
                [
                    tables.remove(os.path.join(job.path, fc))
                    for fc in arcpy.ListFeatureClasses(t)
                ]

    # A geodatabase (.mdb, .gdb, or .sde).
    elif dsc.dataType == 'Workspace':
        # Create a geodatabase entry with links to tables.
        gdb_links = []
        gdb_entry = {}
        gdb_properties = {}
        gdb_properties['id'] = job.location_id + os.path.splitext(dsc.name)[0]
        gdb_properties['name'] = dsc.name
        gdb_properties['path'] = dsc.catalogPath
        gdb_properties['_discoveryID'] = job.discovery_id
        gdb_properties['format'] = dsc.workspaceFactoryProgID
        if hasattr(dsc, 'domains'):
            if dsc.domains:
                gdb_properties['meta_has_domains'] = True
            else:
                gdb_properties['meta_has_domains'] = 'false'
        if dsc.release == '3,0,0':
            gdb_properties[
                'fs_arcgis_version'] = "10.0, 10.1, 10.2, 10.3, 10.4, 10.5 or ArcGIS Pro 1.0, 1.1, 1.2"
        elif dsc.release == '2,3,0':
            gdb_properties['fs_arcgis_version'] = "9.3, 9.3.1"
        else:
            gdb_properties['fs_arcgis_version'] = "9.2"
        if hasattr(dsc.connectionProperties, 'version'):
            cp = dsc.connectionProperties
            gdb_properties['fs_server'] = cp.server
            gdb_properties['fs_instance'] = cp.instance
            gdb_properties['fs_database'] = cp.database
            gdb_properties['fs_version'] = cp.version
        gdb_entry['location'] = job.location_id
        gdb_entry['action'] = job.action_type
        gdb_entry['entry'] = {'fields': gdb_properties}

        arcpy.env.workspace = job.path
        feature_datasets = arcpy.ListDatasets('*', 'Feature')
        tables = []
        tables_to_keep = job.tables_to_keep()
        tables_to_skip = job.tables_to_skip()
        if job.tables_to_keep:
            for t in tables_to_keep:
                [
                    tables.append(os.path.join(job.path, tbl))
                    for tbl in arcpy.ListTables(t)
                ]
                [
                    tables.append(os.path.join(job.path, fc))
                    for fc in arcpy.ListFeatureClasses(t)
                ]
                for fds in feature_datasets:
                    [
                        tables.append(os.path.join(job.path, fds, fc))
                        for fc in arcpy.ListFeatureClasses(wild_card=t,
                                                           feature_dataset=fds)
                    ]
        else:
            [
                tables.append(os.path.join(job.path, tbl))
                for tbl in arcpy.ListTables()
            ]
            [
                tables.append(os.path.join(job.path, fc))
                for fc in arcpy.ListFeatureClasses()
            ]
            for fds in feature_datasets:
                [
                    tables.append(os.path.join(job.path, fds, fc))
                    for fc in arcpy.ListFeatureClasses(feature_dataset=fds)
                ]

        if tables_to_skip:
            for t in tables_to_keep:
                [
                    tables.remove(os.path.join(job.path, tbl))
                    for tbl in arcpy.ListTables(t)
                ]
                [
                    tables.remove(os.path.join(job.path, fc))
                    for fc in arcpy.ListFeatureClasses(t)
                ]
                for fds in feature_datasets:
                    [
                        tables.remove(os.path.join(job.path, fds, fc))
                        for fc in arcpy.ListFeatureClasses(wild_card=t,
                                                           feature_dataset=fds)
                    ]

    # A geodatabase feature dataset, SDC data, or CAD dataset.
    elif dsc.dataType == 'FeatureDataset' or dsc.dataType == 'CadDrawingDataset':
        tables_to_keep = job.tables_to_keep()
        tables_to_skip = job.tables_to_skip()
        arcpy.env.workspace = job.path
        if tables_to_keep:
            tables = []
            for tbl in tables_to_keep:
                [
                    tables.append(os.path.join(job.path, fc))
                    for fc in arcpy.ListFeatureClasses(tbl)
                ]
                tables = list(set(tables))
        else:
            tables = [
                os.path.join(job.path, fc)
                for fc in arcpy.ListFeatureClasses()
            ]
        if tables_to_skip:
            for tbl in tables_to_skip:
                [
                    tables.remove(os.path.join(job.path, fc))
                    for fc in arcpy.ListFeatureClasses(tbl) if fc in tables
                ]

    # Not a recognized data type.
    else:
        sys.exit(1)

    if job.multiprocess:
        # Multiprocess larger databases and feature datasets.
        multiprocessing.log_to_stderr()
        logger = multiprocessing.get_logger()
        logger.setLevel(logging.INFO)
        pool = multiprocessing.Pool(initializer=global_job, initargs=(job, ))
        for i, _ in enumerate(pool.imap_unordered(worker, tables), 1):
            status_writer.send_percent(i / len(tables),
                                       "{0:%}".format(i / len(tables)),
                                       'esri_worker')
        # Synchronize the main process with the job processes to ensure proper cleanup.
        pool.close()
        pool.join()
    else:
        for i, tbl in enumerate(tables, 1):
            try:
                global_job(job)
                te = worker(tbl)
                if te:
                    gdb_links.append({'relation': 'contains', 'id': te['id']})
                status_writer.send_percent(
                    i / len(tables), "{0} {1:%}".format(tbl, i / len(tables)),
                    'esri_worker')
            except Exception:
                continue
    gdb_entry['entry']['links'] = gdb_links
    job.send_entry(gdb_entry)
    return
import arcpy
arcpy.env.workspace = "C:/ArcpyBook/data/CityOfSanAntonio.gdb"
fcList = arcpy.ListFeatureClasses("C*", "polygon")
for fc in fcList:
    print(fc)
示例#15
0
    def _CopyDataTypeProcess(self, type="FeatureClasses", ds="", fc=""):
        try:
            #Set workspaces
            arcpy.env.workspace = self.start_db
            wk2 = self.end_db
            result = {}
            if (self.calledFromApp):
                if isinstance(self.standaloneFeatures, dict):
                    for key, featClass in self.standaloneFeatures.items():
                        if arcpy.Exists(dataset=featClass):

                            fcName = os.path.basename(featClass)
                            if '.' in fcName:
                                fcSplit = fcName.split('.')
                                fcName = fcSplit[len(fcSplit) - 1]

                            #fcDes = arcpy.Describe(featClass)
                            #workspace =featClass.replace(featClassBase,"")
                            #fullName = arcpy.ParseTableName(name=featClassBase,workspace=fcDes.workspace)
                            #nameList = fullName.split(",")
                            #databaseName = str(nameList[0].encode('utf-8')).strip()
                            #ownerName = str(nameList[1].encode('utf-8')).strip()
                            #fcName = str(nameList[2].encode('utf-8')).strip()

                            fcRes = arcpy.FeatureClassToFeatureClass_conversion(
                                featClass, wk2, fcName)
                            result[key] = str(fcRes)

                            print "Completed copy on {0}".format(fcName)
                        else:
                            result[key] = featClass

                else:
                    for featClass in self.standaloneFeatures:
                        if featClass.upper().find(".SDE") != -1:
                            featName = featClass.split('.')[-1]
                        else:
                            featName = featClass.split('/')[-1]
                        if arcpy.Exists(dataset=featClass):
                            arcpy.FeatureClassToFeatureClass_conversion(
                                featClass, wk2, featName)
                        print "Completed copy on {0}".format(featName)
            else:

                # if ds passed value exist then this call came from a copy dataset child object request.
                if ds != "":
                    if arcpy.Exists(wk2 + os.sep + ds.split('.')[-1] + os.sep +
                                    fc.split('.')[-1]) == False:
                        if type == "FeatureClasses":
                            arcpy.FeatureClassToFeatureClass_conversion(
                                self.start_db + os.sep + ds + os.sep + fc,
                                wk2 + os.sep + ds.split('.')[-1],
                                fc.split('.')[-1])
                            #arcpy.Copy_management(self.start_db + os.sep + ds + os.sep + fc, wk2 + os.sep + ds.split('.')[-1] + os.sep + fc.split('.')[-1])
                            print "Completed copy on {0}".format(fc)
                else:
                    # This function was called independently
                    #Check GDB if not created already, create it now
                    if self._CheckCreateGDBProcess():
                        #Determine the object type and List out
                        if type == "Tables":
                            dataTypeList = arcpy.ListTables()
                        else:
                            dataTypeList = arcpy.ListFeatureClasses()

                        for dtl in dataTypeList:
                            name = arcpy.Describe(dtl)
                            new_data = name.name.split('.')[-1]

                            # Checks to see if user wants to copy all features or just the ones that match the supplied list.
                            if "*" in self.standaloneFeatures and len(
                                    self.standaloneFeatures) == 1:
                                #print "Reading: {0}".format(dtl)
                                if arcpy.Exists(wk2 + os.sep +
                                                new_data) == False:
                                    if type == "Tables":
                                        arcpy.TableToTable_conversion(
                                            dtl, wk2, new_data)
                                    else:
                                        arcpy.FeatureClassToFeatureClass_conversion(
                                            dtl, wk2, new_data)
                                    print "Completed copy on {0}".format(
                                        new_data)
                            else:
                                if new_data in self.standaloneFeatures:
                                    print "Reading here: {0}".format(dtl)
                                    if arcpy.Exists(wk2 + os.sep +
                                                    new_data) == False:
                                        if type == "Tables":
                                            arcpy.TableToTable_conversion(
                                                dtl, wk2, new_data)
                                        else:
                                            arcpy.FeatureClassToFeatureClass_conversion(
                                                dtl, wk2, new_data)
                                        print "Completed copy on {0}".format(
                                            new_data)
                                    else:
                                        print "Feature class {0} already exists in the end_db so skipping".format(
                                            new_data)
                    #Clear memory
                    del dtl
            return True
        except arcpy.ExecuteError:
            line, filename, synerror = trace()
            raise DataPrepError({
                "function": "CopyData",
                "line": line,
                "filename": filename,
                "synerror": synerror,
                "arcpyError": arcpy.GetMessages(2),
            })
        except:
            line, filename, synerror = trace()
            raise DataPrepError({
                "function": "CopyData",
                "line": line,
                "filename": filename,
                "synerror": synerror,
            })
#Make variable to isolate starting point shapefile name
loc_analysis_name = search_shp.split('\\')[-1]

#Make variable to create new folder
location_analysis_fld = os.path.join(
    output_folder, 'Location_Analysis_' + loc_analysis_name[:-4])

#Use os library to create a new folder to export our selected shapefiles
print('Creating output folder')
os.mkdir(location_analysis_fld)

#Set overwrite output to true so we can overwrite feature layers we create
arcpy.env.overwriteOutput = True

#Get list of the shapefiles that we want to select from
shp_list = arcpy.ListFeatureClasses()

for shp in shp_list:
    print('Selecting shps near', shp)
    #Create temporary feature layer
    arcpy.MakeFeatureLayer_management(shp, "flyr")

    #Select feature layer by distance to input point
    arcpy.SelectLayerByLocation_management("flyr", "WITHIN_A_DISTANCE",
                                           search_shp, search_dist)

    #Export the selected features to a shapefile in the output folder
    arcpy.FeatureClassToFeatureClass_conversion(
        "flyr", location_analysis_fld, shp[:-4] + '_' + loc_analysis_name)
示例#17
0
def pmpAnalysis(aoiBasin, stormType, durList):

    ###########################################################################
    ## Create PMP Point Feature Class from points within AOI basin and add fields
    def createPMPfc():

        arcpy.AddMessage(
            "\nCreating feature class: 'PMP_Points' in Scratch.gdb...")
        dm.MakeFeatureLayer(
            home + "\\Input\Non_Storm_Data.gdb\Vector_Grid",
            "vgLayer")  # make a feature layer of vector grid cells
        dm.SelectLayerByLocation(
            "vgLayer", "INTERSECT", aoiBasin
        )  # select the vector grid cells that intersect the aoiBasin polygon
        dm.MakeFeatureLayer(home + "\\Input\Non_Storm_Data.gdb\Grid_Points",
                            "gpLayer")  # make a feature layer of grid points
        dm.SelectLayerByLocation(
            "gpLayer", "HAVE_THEIR_CENTER_IN", "vgLayer"
        )  # select the grid points within the vector grid selection
        con.FeatureClassToFeatureClass(
            "gpLayer", env.scratchGDB,
            "PMP_Points")  # save feature layer as "PMP_Points" feature class
        arcpy.AddMessage("(" + str(dm.GetCount("gpLayer")) +
                         " grid points will be analyzed)\n")

        # Add PMP Fields
        for dur in durList:
            arcpy.AddMessage("\t...adding field: PMP_" + str(dur))
            dm.AddField(env.scratchGDB + "\\PMP_Points", "PMP_" + dur,
                        "DOUBLE")

        # Add STORM Fields (this string values identifies the driving storm by SPAS ID number)
        for dur in durList:
            arcpy.AddMessage("\t...adding field: STORM_" + str(dur))
            dm.AddField(env.scratchGDB + "\\PMP_Points", "STORM_" + dur,
                        "TEXT", "", "", 16)

        return

    ###########################################################################
    ##  Define getAOIarea() function:
    ##  getAOIarea() calculates the area of AOI (basin outline) input shapefile/
    ##  featureclass.  The basin outline shapefile must be projected.  The area
    ##  is sqaure miles, converted from the basin layers projected units (feet
    ##  or meters).  The aoiBasin feature class should only have a single feature
    ##  (the basin outline).  If there are multiple features, the area will be stored
    ##  for the final feature only.

    def getAOIarea():
        sr = arcpy.Describe(
            aoiBasin
        ).SpatialReference  # Determine aoiBasin spatial reference system
        srname = sr.name
        srtype = sr.type
        srunitname = sr.linearUnitName  # Units
        arcpy.AddMessage("\nAOI basin spatial reference:  " + srname +
                         "\nUnit type: " + srunitname +
                         "\nSpatial reference type: " + srtype)

        aoiArea = 0.0
        rows = arcpy.SearchCursor(aoiBasin)
        for row in rows:
            feat = row.getValue("Shape")
            aoiArea += feat.area
        if srtype == 'Geographic':  # Must have a surface projection.  If one doesn't exist it projects a temporary file and uses that.
            arcpy.AddMessage(
                "\n***The basin shapefile's spatial reference 'Geographic' is not supported.  Projecting temporary shapefile for AOI.***"
            )
            arcpy.Project_management(
                aoiBasin, env.scratchGDB + "\\TempBasin", 102039
            )  #Projects AOI Basin (102039 = USA_Contiguous_Albers_Equal_Area_Conic_USGS_version)
            TempBasin = env.scratchGDB + "\\TempBasin"  # Path to temporary basin created in scratch geodatabase
            sr = arcpy.Describe(
                TempBasin
            ).SpatialReference  # Determine Spatial Reference of temporary basin
            aoiArea = 0.0
            rows = arcpy.SearchCursor(
                TempBasin)  # Assign area size in square meters
            for row in rows:
                feat = row.getValue("Shape")
                aoiArea += feat.area
            aoiArea = aoiArea * 0.000000386102  # Converts square meters to square miles
        elif srtype == 'Projected':  # If a projection exists, it re-projects a temporary file and uses that for data consistency.
            arcpy.AddMessage(
                "\n***The basin shapefile's spatial reference will be reprojected to USA_Contiguous_Albers_Equal_Area_Conic_USGS_version for data consistency.  Projecting temporary shapefile for AOI.***"
            )
            arcpy.Project_management(
                aoiBasin, env.scratchGDB + "\\TempBasin", 102039
            )  #Projects AOI Basin (102039 = USA_Contiguous_Albers_Equal_Area_Conic_USGS_version)
            TempBasin = env.scratchGDB + "\\TempBasin"  # Path to temporary basin created in scratch geodatabase
            sr = arcpy.Describe(
                TempBasin
            ).SpatialReference  # Determine Spatial Reference of temporary basin
            aoiArea = 0.0
            rows = arcpy.SearchCursor(
                TempBasin)  # Assign area size in square meters
            for row in rows:
                feat = row.getValue("Shape")
                aoiArea += feat.area
            aoiArea = aoiArea * 0.000000386102  # Converts square meters to square miles

        aoiArea = round(aoiArea, 3)
        arcpy.AddMessage("\nArea of interest: " + str(aoiArea) +
                         " square miles.")

        if arcpy.GetParameter(5) == False:
            aoiArea = arcpy.GetParameter(6)  # Enable a constant area size
        aoiArea = round(aoiArea, 1)
        arcpy.AddMessage("\n***Area used for PMP analysis: " + str(aoiArea) +
                         " sqmi***")
        return aoiArea

    ###########################################################################
    ##  Define dadLookup() function:
    ##  The dadLookup() function determines the DAD value for the current storm
    ##  and duration according to the basin area size.  The DAD depth is interpolated
    ##  linearly between the two nearest areal values within the DAD table.
    def dadLookup(
        stormLayer, duration, area
    ):  # dadLookup() accepts the current storm layer name (string), the current duration (string), and AOI area size (float)
        #arcpy.AddMessage("\t\tfunction dadLookup() called.")
        durField = "H_" + duration  # defines the name of the duration field (eg., "H_06" for 6-hour)
        dadTable = dadGDB + "\\" + stormLayer
        rows = arcpy.SearchCursor(dadTable)

        try:
            row = rows.next(
            )  # Sets DAD area x1 to the value in the first row of the DAD table.
            x1 = row.AREASQMI
            y1 = row.getValue(durField)
            xFlag = "FALSE"  # xFlag will remain false for basins that are larger than the largest DAD area.
        except RuntimeError:  # return if duration does not exist in DAD table
            return

        row = rows.next()
        i = 0
        while row:  # iterates through the DAD table - assiging the bounding values directly above and below the basin area size
            i += 1
            if row.AREASQMI < area:
                x1 = row.AREASQMI
                y1 = row.getValue(durField)
            else:
                xFlag = "TRUE"  # xFlag is switched to "TRUE" indicating area is within DAD range
                x2 = row.AREASQMI
                y2 = row.getValue(durField)
                break

            row = rows.next()
        del row, rows, i

        if xFlag == "FALSE":
            x2 = area  # If x2 is equal to the basin area, this means that the largest DAD area is smaller than the basin and the resulting DAD value must be extrapolated.
            arcpy.AddMessage(
                "\t\tThe basin area size: " + str(area) +
                " sqmi is greater than the largest DAD area: " + str(x1) +
                " sqmi.\n\t\tDAD value is estimated by extrapolation.")
            y = x1 / x2 * y1  # y (the DAD depth) is estimated by extrapolating the DAD area to the basin area size.
            return y  # The extrapolated DAD depth (in inches) is returned.

        # arcpy.AddMessage("\nArea = " + str(area) + "\nx1 = " + str(x1) + "\nx2 = " + str(x2) + "\ny1 = " + str(y1) + "\ny2 = " + str(y2))

        x = area  # If the basin area size is within the DAD table area range, the DAD depth is interpolated
        deltax = x2 - x1  # to determine the DAD value (y) at area (x) based on next lower (x1) and next higher (x2) areas.
        deltay = y2 - y1
        diffx = x - x1

        y = y1 + diffx * deltay / deltax

        if x < x1:
            arcpy.AddMessage(
                "\t\tThe basin area size: " + str(area) +
                " sqmi is less than the smallest DAD table area: " + str(x1) +
                " sqmi.\n\t\tDAD value is estimated by extrapolation.")

        return y  # The interpolated DAD depth (in inches) is returned.

    ###########################################################################
    ##  Define updatePMP() function:
    ##  This function updates the 'PMP_XX_' and 'STORM_XX' fields of the PMP_Points
    ##  feature class with the largest value from all analyzed storms stored in the
    ##  pmpValues list.
    def updatePMP(
        pmpValues, stormID, duration
    ):  # Accepts four arguments: pmpValues - largest adjusted rainfall for current duration (float list); stormID - driver storm ID for each PMP value (text list); and duration (string)
        pmpfield = "PMP_" + duration
        stormfield = "STORM_" + duration
        gridRows = arcpy.UpdateCursor(
            env.scratchGDB +
            "\\PMP_Points")  # iterates through PMP_Points rows
        i = 0
        for row in gridRows:
            row.setValue(
                pmpfield, pmpValues[i]
            )  # Sets the PMP field value equal to the Max Adj. Rainfall value (if larger than existing value).
            row.setValue(
                stormfield, stormID[i]
            )  # Sets the storm ID field to indicate the driving storm event
            gridRows.updateRow(row)
            i += 1
        del row, gridRows, pmpfield, stormfield
        arcpy.AddMessage("\n\t" + duration +
                         "-hour PMP values update complete. \n")
        return

    ###########################################################################
    ##  The outputPMP() function produces raster GRID files for each of the PMP durations.
    ##  Aslo, a space-delimited PMP_Distribition.txt file is created in the 'Text_Output' folder.
    def outputPMP(type, area, outPath):
        desc = arcpy.Describe(basin)
        basinName = desc.baseName
        pmpPoints = env.scratchGDB + "\\PMP_Points"  # Location of 'PMP_Points' feature class which will provide data for output

        outType = type[:1]
        outArea = str(int(round(area, 0))) + "sqmi"
        outFC = outType + "_" + outArea  #I don't think I need this.....
        arcpy.AddMessage("\nCopying PMP_Points feature class to " + outFC +
                         "...")  #outFC might be replaced with outpath...
        dm.Merge(
            pmpPoints, outPath
        )  # merge the scratch feature layer(s) of vector grid cells into the outputs

        arcpy.AddMessage("\nCreating Basin Summary Table...")
        tableName = type + "_PMP_Basin_Average" + "_" + outArea
        tablePath = env.scratchGDB + "\\" + tableName
        dm.CreateTable(env.scratchGDB, tableName)  # Create blank table
        cursor = arcpy.da.InsertCursor(
            tablePath,
            "*")  # Create Insert cursor and add a blank row to the table
        cursor.insertRow([0])
        del cursor

        dm.AddField(tablePath, "STORM_TYPE", "TEXT", "", "", 10,
                    "Storm Type")  # Create "Storm Type" field
        dm.CalculateField(tablePath, "STORM_TYPE", "'" + type + "'",
                          "PYTHON_9.3")  # populate storm type field

        i = 0
        for field in arcpy.ListFields(
                pmpPoints, "PMP_*"
        ):  # Add fields for each PMP duration and calculate the basin average
            fieldName = field.name
            fieldAve = basinAve(
                basin, fieldName
            )  # Calls the basinAve() function - returns the average (weighted or not)
            dm.AddField(tablePath, fieldName, "DOUBLE", "",
                        2)  # Add duration field
            dm.CalculateField(tablePath, fieldName, fieldAve,
                              "PYTHON_9.3")  # Assigns the basin average

            i += 1
        arcpy.AddMessage("\nSummary table complete.")

        basAveTables.append(tablePath)

        return

    ###########################################################################
    ##  The basin() returns the basin average PMP value for a given duration field.
    ##  If the option for a weighted average is checked in the tool parameter the script
    ##  will weight the grid point values based on proportion of area inside the basin.
    def basinAve(aoiBasin, pmpField):
        pmpPoints = env.scratchGDB + "\\PMP_Points"  # Path of 'PMP_Points' scratch feature class
        if weightedAve:
            arcpy.AddMessage("\tCalculating basin average for " + pmpField +
                             "(weighted)...")
            vectorGridClip = env.scratchGDB + "\\VectorGridClip"  # Path of 'PMP_Points' scratch feature class
            sumstats = env.scratchGDB + "\\SummaryStats"

            dm.MakeFeatureLayer(
                home + "\\Input\Non_Storm_Data.gdb\\Vector_Grid",
                "vgLayer")  # make a feature layer of vector grid cells
            dm.SelectLayerByLocation(
                "vgLayer", "INTERSECT", aoiBasin
            )  # select the vector grid cells that intersect the aoiBasin polygon

            an.Clip("vgLayer", aoiBasin,
                    vectorGridClip)  # clips aoi vector grid to basin
            dm.AddField(
                pmpPoints, "WEIGHT", "DOUBLE"
            )  # adds 'WEIGHT' field to PMP_Points scratch feature class
            dm.MakeFeatureLayer(
                vectorGridClip, "vgClipLayer"
            )  # make a feature layer of basin clipped vector grid cells
            dm.MakeFeatureLayer(
                pmpPoints, "pmpPointsLayer"
            )  # make a feature layer of PMP_Points feature class

            dm.AddJoin("pmpPointsLayer", "ID", "vgClipLayer",
                       "ID")  # joins PMP_Points and vectorGridBasin tables
            dm.CalculateField(
                "pmpPointsLayer", "WEIGHT", "!vectorGridClip.Shape_Area!",
                "PYTHON_9.3"
            )  # Calculates basin area proportion to use as weight for each grid cell.
            dm.RemoveJoin("pmpPointsLayer", "vectorGridClip")

            an.Statistics(pmpPoints, sumstats, [["WEIGHT", "SUM"]], "")
            stats = arcpy.SearchCursor(sumstats)
            pmpWgtAve = pmpField + "_WgtAve"

            for row in stats:
                calc = row.getValue("SUM_WEIGHT")
                express = "(!WEIGHT!/{})* !{}!".format(calc, pmpField)
                i = 0
                for field in arcpy.ListFields(pmpPoints, pmpField):
                    dm.AddField(pmpPoints, pmpWgtAve, "DOUBLE", 2)
                    dm.CalculateField(pmpPoints, pmpWgtAve, express,
                                      "PYTHON_9.3")
                    i += 1
                del stats, row

            an.Statistics(pmpPoints, sumstats, [[pmpWgtAve, "SUM"]], "")
            sumwgtave = "SUM_" + pmpWgtAve
            with arcpy.da.SearchCursor(sumstats, sumwgtave) as stats:
                for row in stats:
                    wgtAve = row[0]
                    return round(wgtAve, 2)

##            na = arcpy.da.TableToNumPyArray(pmpPoints,(pmpField, 'WEIGHT'))                                 # Assign pmpPoints values and weights to Numpy array (na)
##            wgtAve = numpy.average(na[pmpField], weights=na['WEIGHT'])                                         # Calculate weighted average with Numpy average
##            del na
##            return round(wgtAve, 2)

        else:
            arcpy.AddMessage("\tCalculating basin average for " + pmpField +
                             "(not weighted)...")
            sumstats = env.scratchGDB + "\\SummaryStats"
            an.Statistics(pmpPoints, sumstats, [[pmpField, "MEAN"]], "")
            mean = "MEAN_" + pmpField
            with arcpy.da.SearchCursor(sumstats, mean) as stats:
                for row in stats:
                    fieldAve = row[0]
                    return round(fieldAve, 2)

##            na = arcpy.da.TableToNumPyArray(pmpPoints, pmpField)                                            # Assign pmpPoints values to Numpy array (na)
##            fieldAve = numpy.average(na[pmpField])                                                             # Calculates aritmetic mean
##            del na
##            return round(fieldAve, 2)

###########################################################################
##  This portion of the code iterates through each storm feature class in the
##  'Storm_Adj_Factors' geodatabase (evaluating the feature class only within
##  the Local, Tropical, or general feature dataset).  For each duration,
##  at each grid point within the aoi basin, the transpositionality is
##  confirmed.  Then the DAD precip depth is retrieved and applied to the
##  total adjustement factor to yield the total adjusted rainfall.  This
##  value is then sent to the updatePMP() function to update the 'PMP_Points'
##  feature class.
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##

    desc = arcpy.Describe(
        basin)  # Check to ensure AOI input shape is a Polygon. If not - exit.
    basinShape = desc.shapeType
    if desc.shapeType == "Polygon":
        arcpy.AddMessage("\nBasin shape type: " + desc.shapeType)
    else:
        arcpy.AddMessage("\nBasin shape type: " + desc.shapeType)
        arcpy.AddMessage("\nError: Input shapefile must be a polygon!\n")
        sys.exit()

    createPMPfc(
    )  # Call the createPMPfc() function to create the PMP_Points feature class.

    env.workspace = adjFactGDB  # the workspace environment is set to the 'Storm_Adj_Factors' file geodatabase

    aoiSQMI = round(
        getAOIarea(), 2
    )  # Calls the getAOIarea() function to assign area of AOI shapefile to 'aoiSQMI'

    for dur in durList:
        stormList = arcpy.ListFeatureClasses(
            "", "Point", stormType
        )  # List all the total adjustment factor feature classes within the storm type feature dataset.

        arcpy.AddMessage(
            "\n*************************************************************\nEvaluating "
            + dur + "-hour duration...")

        pmpList = []
        driverList = []
        gridRows = arcpy.SearchCursor(env.scratchGDB + "\\PMP_Points")
        try:
            for row in gridRows:
                pmpList.append(
                    0.0
                )  # creates pmpList of empty float values for each grid point to store final PMP values
                driverList.append(
                    "STORM"
                )  # creates driverList of empty text values for each grid point to store final Driver Storm IDs
            del row, gridRows
        except UnboundLocalError:
            arcpy.AddMessage(
                "\n***Error: No data present within basin/AOI area.***\n")
            sys.exit()

        for storm in stormList:
            arcpy.AddMessage("\n\tEvaluating storm: " + storm + "...")
            dm.MakeFeatureLayer(
                storm,
                "stormLayer")  # creates a feature layer for the current storm
            dm.SelectLayerByLocation(
                "stormLayer", "HAVE_THEIR_CENTER_IN", "vgLayer"
            )  # examines only the grid points that lie within the AOI
            gridRows = arcpy.SearchCursor("stormLayer")
            pmpField = "PMP_" + dur
            i = 0
            try:
                dadPrecip = round(dadLookup(storm, dur, aoiSQMI), 3)
                arcpy.AddMessage("\t\t" + dur + "-hour DAD value:  " +
                                 str(dadPrecip) + chr(34))
            except TypeError:  # In no duration exists in the DAD table - move to the next storm
                arcpy.AddMessage("\t***Duration '" + str(dur) +
                                 "-hour' is not present for " + str(storm) +
                                 ".***\n")
                continue
            arcpy.AddMessage(
                "\t\tComparing " + storm +
                " adjusted rainfall values against current driver values...\n")
            for row in gridRows:
                if row.TRANS == 1:  # Only continue if grid point is transpositionable ('1' is transpostionable, '0' is not).
                    try:  # get total adj. factor if duration exists
                        adjRain = round(dadPrecip * row.TAF, 1)
                        if adjRain > pmpList[i]:
                            pmpList[i] = adjRain
                            driverList[i] = storm
                    except RuntimeError:
                        arcpy.AddMessage(
                            "\t\t   *Warning*  Total Adjusted Raifnall value falied to set for row "
                            + str(row.CNT))
                        break
                    del adjRain
                i += 1
            del row
        del storm, stormList, gridRows, dadPrecip
        updatePMP(pmpList, driverList,
                  dur)  # calls function to update "PMP Points" feature class
    del dur, pmpList

    arcpy.AddMessage(
        "\n'PMP_Points' Feature Class 'PMP_XX' fields update complete for all '"
        + stormType + "' storms.")

    outputPMP(stormType, aoiSQMI, outPath)  # calls outputPMP() function

    del aoiSQMI
    return
示例#18
0
import arcpy
from arcpy import env

parentGeodatabase = arcpy.GetParameterAsText(0)
outGeodb = arcpy.GetParameterAsText(1)
replicaName = arcpy.GetParameterAsText(2)
query = arcpy.GetParameterAsText(3)

env.workspace = parentGeodatabase
allFeatureClasses = arcpy.ListTables()
dataSets = arcpy.ListDatasets()
for dataset in dataSets:
    featureClasses = arcpy.ListFeatureClasses('*', '', dataset)
    print dataset
    arcpy.AddMessage(dataset)
    for featureClass in featureClasses:
        try:
            print featureClass
            featureNames = featureClass.split(".")
            pureName = featureNames[-1]
            arcpy.AddMessage("    " + pureName)
            arcpy.MakeFeatureLayer_management(featureClass, pureName + "_New",
                                              query, dataset)
            allFeatureClasses.append(pureName + "_New")
        except Exception, e:
            arcpy.AddError(e.message)
            # arcpy.MakeFeatureLayer_management(featureClass, featureClass + "_New", workspace=dataset)
            print('query didnt applied to this layer ' + featureClass + " " +
                  arcpy.GetMessages())
            pass
if len(allFeatureClasses) == 0:
示例#19
0
def create_layer_file(input_items,
                      meta_folder,
                      voyager_server,
                      hdrs,
                      show_progress=False):
    """Creates a layer for input items in the appropriate meta folders."""
    created = 0
    skipped = 0
    errors = 0
    global processed_count

    for input_item in input_items:
        try:
            lyr = None
            id = input_item[0]
            path = input_item[1]
            name = input_item[2]
            location = input_item[3]
            layer_folder = os.path.join(meta_folder, id[0], id[1:4])
            lyr_mxd = arcpy.mapping.MapDocument(mxd_path)
            dsc = arcpy.Describe(path)

            # Create layer folder if it does not exist.
            if not os.path.exists(layer_folder):
                os.makedirs(layer_folder)

            if not os.path.exists(
                    os.path.join(layer_folder, '{0}.layer.lyr'.format(id))):
                # os.makedirs(layer_folder)
                try:
                    if dsc.dataType in ('FeatureClass', 'Shapefile',
                                        'ShapeFile'):
                        feature_layer = arcpy.MakeFeatureLayer_management(
                            path, os.path.basename(path))
                        lyr = arcpy.SaveToLayerFile_management(
                            feature_layer,
                            os.path.join(layer_folder,
                                         '{0}.layer.lyr'.format(id)))
                    elif dsc.dataType == 'RasterDataset':
                        raster_layer = arcpy.MakeRasterLayer_management(
                            path,
                            os.path.splitext(os.path.basename(path))[0])
                        lyr = arcpy.SaveToLayerFile_management(
                            raster_layer,
                            os.path.join(layer_folder,
                                         '{0}.layer.lyr'.format(id)))
                    elif dsc.dataType in ('CadDrawingDataset',
                                          'FeatureDataset'):
                        arcpy.env.workspace = path
                        lyr_mxd = arcpy.mapping.MapDocument(mxd_path)
                        data_frame = arcpy.mapping.ListDataFrames(lyr_mxd)[0]
                        group_layer = arcpy.mapping.ListLayers(
                            lyr_mxd, 'Group Layer', data_frame)[0]
                        for fc in arcpy.ListFeatureClasses():
                            dataset_name = os.path.splitext(
                                os.path.basename(path))[0]
                            l = arcpy.MakeFeatureLayer_management(
                                fc, '{0}_{1}'.format(dataset_name,
                                                     os.path.basename(fc)))
                            arcpy.mapping.AddLayerToGroup(
                                data_frame, group_layer, l.getOutput(0))
                        arcpy.ResetEnvironments()
                        group_layer.saveACopy(
                            os.path.join(layer_folder,
                                         '{0}.layer.lyr'.format(id)))
                        lyr = '{0}.layer.lyr'.format(id)
                    elif dsc.catalogPath.lower().endswith(
                            '.tab') or dsc.catalogPath.lower().endswith(
                                '.mif'):
                        arcpy.ImportToolbox(
                            r"C:\Program Files (x86)\DataEast\TAB Reader\Toolbox\TAB Reader.tbx"
                        )
                        lyr = arcpy.GPTabsToArcGis_TR(
                            dsc.catalogPath, False, '', True, True,
                            os.path.join(layer_folder,
                                         '{0}.layer.lyr'.format(id)))
                    else:
                        skipped += 1
                        status_writer.send_status(
                            _('Invalid input type: {0}').format(dsc.name))
                        skipped_reasons[name] = _(
                            'Invalid input type: {0}').format(dsc.dataType)
                        continue
                except arcpy.ExecuteError:
                    errors += 1
                    status_writer.send_status(arcpy.GetMessages(2))
                    errors_reasons[name] = arcpy.GetMessages(2)
                    continue
                except RuntimeError as re:
                    errors += 1
                    status_writer.send_status(re.message)
                    errors_reasons[name] = re.message
                    continue
                except AssertionError as ae:
                    status_writer.send_status(
                        _('FAIL: {0}. MXD - {1}').format(repr(ae), mxd_path))
            else:
                lyr = os.path.join(layer_folder, '{0}.layer.lyr'.format(id))
            created += 1

            # Update the index.
            if lyr:
                try:
                    update_index(path, lyr, id, name, location, voyager_server,
                                 hdrs)
                except (IndexError, ImportError) as ex:
                    status_writer.send_state(status.STAT_FAILED, ex)
                processed_count += 1
                status_writer.send_percent(
                    processed_count / result_count,
                    _('Created: {0}').format('{0}.layer.lyr'.format(id)),
                    'create_layer_file')
        except IOError as io_err:
            processed_count += 1
            status_writer.send_percent(processed_count / result_count,
                                       _('Skipped: {0}').format(input_item),
                                       'create_layer_file')
            status_writer.send_status(_('FAIL: {0}').format(repr(io_err)))
            errors_reasons[input_item] = repr(io_err)
            errors += 1
            pass
    return created, errors, skipped
try:
    # add qualifier string to FC list name
    arcpy.AddMessage("Getting database qualifier string ...")
    featureDatasetName = os.path.basename(inputUTDSFeatureDataset)
    if len(os.path.basename(inputUTDSFeatureDataset)) > 4:
        qualifierString = os.path.basename(inputUTDSFeatureDataset)[:-4]
    if debug == True: arcpy.AddMessage("qualifier string: " + qualifierString)
    for i in featureClassesToMerge:
        fqClassesToMerge.append(str(qualifierString + i))
    if debug == True:
        arcpy.AddMessage("fqClassesToMerge: " + str(fqClassesToMerge))

    # get a list of feature classes in the UTDS feature dataset
    workspace = os.path.dirname(inputUTDSFeatureDataset)
    arcpy.env.workspace = workspace
    utdsFeatureClasses = arcpy.ListFeatureClasses(
        "*", "Line", os.path.basename(inputUTDSFeatureDataset))
    if debug == True:
        arcpy.AddMessage("utdsFeatureClasses: " + str(utdsFeatureClasses))

    # now go through the list of all of them and see which names match our target list, if so, add them to a new list
    arcpy.AddMessage("Building list of input features ...")
    for fc in utdsFeatureClasses:
        if fc in fqClassesToMerge:
            newList.append(str(os.path.join(workspace, featureDatasetName,
                                            fc)))
    if debug == True: arcpy.AddMessage("newList: " + str(newList))

    # output feature class name
    target = os.path.join(inputMAOTWorkspace, "CombinedObstacleFeature")
    if debug == True: arcpy.AddMessage("target: " + str(target))
示例#21
0
import arcpy

arcpy.env.workspace = r'Database Connections\Dataowner@[email protected]'
arcpy.Rename_management('W1902','W1709')
fclist = arcpy.ListFeatureClasses("","",'W1709')
for fc in fclist:
    print fc
    fcstripped = fc.lstrip('CL_DVC.OWD.')
    print fcstripped
    name = "CL_DVC.OWD.W1709_" + fcstripped
    print name
    arcpy.Rename_management(fc, name)
示例#22
0
# Import ArcPy module to use built-in functions to achieve the program objective
import arcpy
# From ArcPy, import the environment/workspace
from arcpy import env

# Ask the user to input a file path to set as a workspace
env.workspace = raw_input("\nPlease enter your directory [*.mdb or *.gdb]: ")
# Assign the workspace to a new variable
filePath = env.workspace

x = 0
while x < 1:  # Set up a file validation system
    if os.path.exists(filePath):  # If file path, exists: continue. Otherwise..
        x = 1  # ..go to Line 57

        fCList = arcpy.ListFeatureClasses("*", "All")  # List feature classes
        for fC in fCList:  # For files in the feature classes list..
            desc = arcpy.Describe(
                fC)  # ..describe them for multiple purposes for later use
            spatialRef = desc.spatialReference  # Describe the spatial reference system of the files

            if spatialRef.Name == 'Unknown':  # If spatial reference system is unknown..
                continue  # ..go to Line 43
                print "\n" + fC, (
                    "has an 'Unknown' spatial reference\n"
                )  # Display list of names of feature classes with no spatial reference system

            else:
                definedfC = fC + '_UTM_1983'  # Name of output file
                prjFile = 'Coordinate Systems/Projected Coordinate Systems/UTM/NAD 1983/NAD 1983 UTM Zone 11N.prj'  # Default path to .prj file in ArcMap
                arcpy.Project_management(
示例#23
0
ceAFiltrer = [u'CFSd_Pgn', u'CFSd_Pln', u'Emprise_Pgn', u'FUS_Pgn', u'FUS_Pln', u'Topo_Pgn', u'Topo_Pln']

# Création de la géodatabase fichier temporaire
if arcpy.Exists(output_gdb_path):
	print u"Suppression de la précédente géodatabase temporaire..."
	arcpy.Delete_management(output_gdb_path)
	print u"Géodatabase temporaire précédente supprimée."
print u"Création de la géodatabase temporaire..."
arcpy.CreateFileGDB_management (output_gdb_folder, output_gdb)
print u"Géodatabase temporaire créée !\n"

# Définition de la Gdb du chantier comme espace de travail par défaut
# Cela permet d'énumérer les CE facilement.
arcpy.env.workspace = input_gdb_path
# Création de la liste des classes d'entités en entrée
fcList = arcpy.ListFeatureClasses()

# On parcourt la gdb à la recherche des CE conventionnelles. 
# Leur absence ne cause ainsi pas d'erreur.
for fc in fcList:
	if fc in ceAExporter:
		output_fc = os.path.join(output_gdb_path, fc)
		print u'Copie de ' + fc + '...'
		arcpy.Copy_management(fc, output_fc)
		print fc + u' a été copié.'
print u"Tout a été copié !\n"
del fc, fcList

# Définition de la Gdb temporaire comme espace de travail par défaut
# On pourrait peut-être utiliser à la place ceAExporter... 
# -> vérifier si erreur en cas d'absence d'une CE
def fcs_in_workspace(inputFolder):
    # Direct to output folder
    if not os.path.exists(outputFolder):
        os.makedirs(outputFolder)
    # Loop for root, subFolders, files in inputFolder
    suffix = ".shp"
    for root, subFolders, files in os.walk(inputFolder):
        # Loop for fileName in files
        for fileName in files:
            if fileName.endswith(suffix):
                # define environment workspace
                arcpy.env.workspace = root
                # List feature classes
                fclist = arcpy.ListFeatureClasses()
                # Loop for fc in fclist
                for fc in fclist:
                    # Describe fc name
                    name = arcpy.Describe(root + "\\" + fc).name.strip(".shp")
                    if name == "Income_EA":
                        duplicate(name, fc, outputFolder, 5)
                    elif name == "Labour_Occupation_Education_EA":
                        duplicate(name, fc, outputFolder, 1)
                    elif name == "Language_Immigration_Citizenships_EA":
                        duplicate(name, fc, outputFolder, 3)
                    elif name == "Marital_Families_Households_EA":
                        duplicate(name, fc, outputFolder, 3)

                    elif name == "Income_CT":
                        duplicate(name, fc, outputFolder, 7)
                    elif name == "Labour_Occupation_Education_CT":
                        duplicate(name, fc, outputFolder, 1)
                    elif name == "Language_Immigration_Citizenships_CT":
                        duplicate(name, fc, outputFolder, 3)
                    elif name == "Marital_Families_Households_CT":
                        duplicate(name, fc, outputFolder, 3)

    # Change environment workspace within the loop
    arcpy.env.workspace = outputFolder
    # List feature classes
    fclist = arcpy.ListFeatureClasses()
    # Loop for fc in fclist
    for fc in fclist:
        # Define path parameter for delete_rename function
        path = outputFolder + "\\" + fc
        name = arcpy.Describe(path).name.strip(".shp")
        # Pre-declare variables of keepFields & newName with empty container for delete_rename function
        # Allow to let the lower tier block of different keepFields & newName codes
        # Fill in the upper tier keepFields & newName
        keepFields = []
        newName = ""

        # Import duplicated_name function to define names for if conditions
        if name == duplicated_name("Income_EA", 0):
            # Define keepField & newName parameters for delete_rename function
            keepFields = ["FID", "Shape", "EAUID", "GEOGRAPHY", "AVERAGE31"]
            newName = "Household_Average_Income_Income_1996_EA.shp"
        elif name == duplicated_name("Income_EA", 1):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "RENTED", "TOTAL_NU4"
            ]
            newName = "Dwellings_Rental_Income_1996_EA.shp"
        elif name == duplicated_name("Income_EA", 2):
            keepFields = ["FID", "Shape", "EAUID", "GEOGRAPHY", "AVERAGE_VA"]
            newName = "Dwellings_Average_Value_Income_1996_EA.shp"
        elif name == duplicated_name("Income_EA", 3):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "MAJOR_REPA", "TOTAL_NU4"
            ]
            newName = "Dwellings_Major_Repair_Income_1996_EA.shp"
        elif name == duplicated_name("Income_EA", 4):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "PERIOD_OF_",
                "PERIOD_OF1", "TOTAL_NU4"
            ]
            newName = "Dwellings_1960Constructions_Income_1991_EA.shp"
        elif name == duplicated_name("Labour_Occupation_Education_EA", 0):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "UNEMPLOYED",
                "IN_THE_LAB"
            ]
            newName = "Labour_Unemployment_Labour_1996_EA.shp"
        elif name == duplicated_name("Language_Immigration_Citizenships_EA",
                                     0):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "1991_1996_",
                "TOTAL_POPU"
            ]
            newName = "Immigration_Language_1996_EA.shp"
        elif name == duplicated_name("Language_Immigration_Citizenships_EA",
                                     1):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "NEITHER_EN", "TOTAL_PO3"
            ]
            newName = "Neither_Language_Language_1996_EA.shp"
        elif name == duplicated_name("Language_Immigration_Citizenships_EA",
                                     2):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "CANADIAN_C",
                "TOTAL_POPU"
            ]
            newName = "Canadian_Citizen_Language_1996_EA.shp"
        elif name == duplicated_name("Marital_Families_Households_EA", 0):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "TOTAL_LONE",
                "TOTAL_NUMB"
            ]
            newName = "Lone_Parent_Marital_1996_EA.shp"
        elif name == duplicated_name("Marital_Families_Households_EA", 1):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "LIVING_AL1", "TOTAL_NU3"
            ]
            newName = "Living_Alone_Marital_1996_EA.shp"
        elif name == duplicated_name("Marital_Families_Households_EA", 2):
            keepFields = [
                "FID", "Shape", "EAUID", "GEOGRAPHY", "MOVABLE_DW", "TOTAL_NU4"
            ]
            newName = "Dwellings_Mobile_Martial_1996_EA.shp"

        elif name == duplicated_name("Income_CT", 0):
            keepFields = ["FID", "Shape", "GEOGRAPHY", "AVERAGE_HO"]
            newName = "Household_Average_Income_Income_1996_CT.shp"
        elif name == duplicated_name("Income_CT", 1):
            keepFields = ["FID", "Shape", "GEOGRAPHY", "RENTED", "TOTAL_NU4"]
            newName = "Dwellings_Rental_Income_1996_CT.shp"
        elif name == duplicated_name("Income_CT", 2):
            keepFields = ["FID", "Shape", "GEOGRAPHY", "INCIDENC2"]
            newName = "Low_Income_Income_1996_CT.shp"
        elif name == duplicated_name("Income_CT", 3):
            keepFields = ["FID", "Shape", "GEOGRAPHY", "GOVERNMENT"]
            newName = "Government_Transfers_Income_1996_CT.shp"
        elif name == duplicated_name("Income_CT", 4):
            keepFields = ["FID", "Shape", "GEOGRAPHY", "AVERAGE_VA"]
            newName = "Dwellings_Average_Value_Income_1996_CT.shp"
        elif name == duplicated_name("Income_CT", 5):
            keepFields = [
                "FID", "Shape", "GEOGRAPHY", "MAJOR_REPA", "TOTAL_NU4"
            ]
            newName = "Dwellings_Major_Repair_Income_1996_CT.shp"
        elif name == duplicated_name("Income_CT", 6):
            keepFields = [
                "FID", "Shape", "GEOGRAPHY", "PERIOD_OF_", "PERIOD_OF1",
                "TOTAL_NU4"
            ]
            newName = "Dwellings_1960Constructions_Income_1996_CT.shp"
        elif name == duplicated_name("Labour_Occupation_Education_CT", 0):
            keepFields = [
                "FID", "Shape", "GEOGRAPHY", "UNEMPLOYED", "IN_THE_LAB"
            ]
            newName = "Labour_Unemployment_Labour_1996_CT.shp"
        elif name == duplicated_name("Language_Immigration_Citizenships_CT",
                                     0):
            keepFields = [
                "FID", "Shape", "GEOGRAPHY", "1991_1996_", "TOTAL_POPU"
            ]
            newName = "Immigration_Language_1996_CT.shp"
        elif name == duplicated_name("Language_Immigration_Citizenships_CT",
                                     1):
            keepFields = [
                "FID", "Shape", "GEOGRAPHY", "NEITHER_EN", "TOTAL_PO3"
            ]
            newName = "Neither_Language_Language_1996_CT.shp"
        elif name == duplicated_name("Language_Immigration_Citizenships_CT",
                                     2):
            keepFields = [
                "FID", "Shape", "GEOGRAPHY", "CANADIAN_C", "TOTAL_POPU"
            ]
            newName = "Canadian_Citizen_Language_1996_CT.shp"
        elif name == duplicated_name("Marital_Families_Households_CT", 0):
            keepFields = [
                "FID", "Shape", "GEOGRAPHY", "TOTAL_LONE", "TOTAL_NUMB"
            ]
            newName = "Lone_Parent_Marital_1996_CT.shp"
        elif name == duplicated_name("Marital_Families_Households_CT", 1):
            keepFields = [
                "FID", "Shape", "GEOGRAPHY", "LIVING_AL1", "TOTAL_NU3"
            ]
            newName = "Living_Alone_Marital_1996_CT.shp"
        elif name == duplicated_name("Marital_Families_Households_CT", 2):
            keepFields = [
                "FID", "Shape", "GEOGRAPHY", "MOVABLE_DW", "TOTAL_NU4"
            ]
            newName = "Dwellings_Mobile_Marital_1996_CT.shp"
        else:
            continue

        # Import delete_rename function only once instead execute in every lower tier if condition
        data_process(path, keepFields, newName)
    for fld in fields:
        fdict[fld.name] = fld.type
        #print fld.name, fld.type
    return fdict


addMsgAndPrint('  ' + versionString)
if len(sys.argv) != 3:
    addMsgAndPrint(usage)
    sys.exit()

gdb = sys.argv[1]
keylines1 = open(sys.argv[2], 'r').readlines()
arcpy.env.workspace = gdb
arcpy.env.workspace = 'GeologicMap'
featureClasses = arcpy.ListFeatureClasses()
arcpy.env.workspace = gdb

#Personal geodatabases are not supported in Pro and are being phased out entirely by Esri
if gdb.find('.mdb') > 0:
    addMsgAndPrint(
        'Personal geodatabases (*.mdb) are not supported in Arc Pro')

# remove empty lines from keylines1
keylines = []
for lin in keylines1:
    lin = lin.strip()
    if len(lin) > 1 and lin[0:1] != '#':
        keylines.append(lin)

n = 0
def CreateFishnetsForFeats(in_polys, out_loc, cell_x=0, cell_y=0):
    '''
    in_polys: input polygon feature class
    out_loc: folder location for new file gdb containing fishnet feature class
    cell_x: cell width
    cell_y: cell height
    '''

    # Set output directroy
    output_dir = os.getcwd() + "/remote/01_data/02_out/samples/"
    arcpy.env.overwriteOutput = True
    # spatial reference
    arcpy.env.outputCoordinateSystem = arcpy.Describe(
        in_polys).spatialReference
    # Loop thru rows of input polygons
    with arcpy.da.SearchCursor(polys, ['SHAPE@', 'OID@', 'ID']) as rows:
        for row in rows:
            ext = row[0].extent
            st = '%f %f' % (ext.XMin, ext.YMin)
            orien = '%f %f' % (ext.XMin, ext.YMax)
            if cell_y == 0:
                n_rows = 1
                cell_y = ext.height
            else:
                n_rows = int((ext.height - (ext.height % cell_y)) / cell_y) + 1
            if cell_x == 0:
                n_cols = 1
                cell_x = ext.width
            else:
                n_cols = int((ext.width - (ext.width % cell_x)) / cell_x) + 1

            # create fishnet
            out = os.path.join(output_dir, 'fish_{0}'.format(row[2]))
            arcpy.CreateFishnet_management(out,
                                           st,
                                           orien,
                                           cell_x,
                                           cell_y,
                                           n_rows,
                                           n_cols,
                                           labels='LABELS')
            where = '"ID"' + '=' + '\'' + str(row[2]) + '\''
            hti_selected = arcpy.SelectLayerByAttribute_management(
                in_polys, "NEW_SELECTION", where)
            selected_samples = arcpy.SelectLayerByLocation_management(
                output_dir + 'fish_{0}'.format(row[2]) + "_label.shp",
                'COMPLETELY_WITHIN', hti_selected)
            arcpy.FeatureClassToFeatureClass_conversion(
                selected_samples, output_dir,
                'samples_{0}'.format(row[2]) + ".shp")
            arcpy.Delete_management(selected_samples)

    # set workspace to output_dir
    arcpy.env.workspace = output_dir
    samples = arcpy.ListFeatureClasses('samples_*')
    targ = samples[0]
    for i, sample in enumerate(samples):
        # add field for original polygon ID
        #fid0 = sample.split('_')[1]
        #fid = fid0.split('.')[0]
        #arcpy.AddField_management(sample, 'HT_ID', 'TEXT')
        #with arcpy.da.UpdateCursor(sample, ['HT_ID']) as rows:
        #    for row in rows:
        #        row[0] = fid
        #        rows.updateRow(row)
        # append fishnets into one feature class
        if i > 0:
            arcpy.Append_management([sample], targ, 'NO_TEST')

            arcpy.Delete_management(sample)
            print('Appended: {0}'.format(sample))
    # deleting unused files
    fish = arcpy.ListFeatureClasses('fish_*')
    arcpy.Delete_management(fish)
    # rename file
    append_shp = arcpy.ListFeatureClasses('samples_*')
    arcpy.Rename_management(append_shp[0], "samples_hti.shp")
    # adding sample ids
    arcpy.AddField_management(in_table=output_dir + 'samples_hti.shp',
                              field_name='sid',
                              field_type='LONG')
    arcpy.CalculateField_management(output_dir + 'samples_hti.shp', 'sid',
                                    "!FID!", "PYTHON")
    arcpy.DeleteField_management(output_dir + 'samples_hti.shp', ['Id'])
    print('Done')

    return
示例#27
0
def demand(planning_area, inputs, output):
    """Function that automates the Demand component of the MPT.

    Required parameters:
    'planning_area' - the path to a boundary layer for the area of interest.
    'inputs' - the path to a geodatabase with input layers (ex. active businesses, hospitals, etc.).
    'output' - the path to a geodatabase for outputs to be stored.

    bike distance groups = "0 5280 5;5280 10560 4;10560 15840 3;15840 21120 2;21120 1000000 1"
    ped distance groups = "0 1320 5;1320 2640 4;2640 3960 3;3960 5280 2;5280 1000000 1"

    TODO: Elaborate on the full script function.
    """
    # Saves the start time so script duration can be calculated.
    start_time = time.time()

    # Checkout Spatial Analyst
    arcpy.CheckOutExtension("Spatial")

    # Set workspace, extent, mask and spatial reference according to 'inputs' and 'planning_area' provided.
    env.workspace = inputs
    env.extent = planning_area
    env.mask = planning_area
    sr = arcpy.Describe(planning_area).spatialReference
    env.outputCoordinateSystem = sr

    # Sets the cellsize. This can easily be a parameter if we decided to vary our cell size in the future.
    # This has to be set or the output changes slightly.
    env.cellSize = 30
    print("Environment Set Up")

    # Get a list of all point and polyline features features in 'inputs' database
    ed_features = arcpy.ListFeatureClasses(feature_type="POINT")
    for i in arcpy.ListFeatureClasses(feature_type="POLYLINE"):
        ed_features.append(i)
    print(ed_features)

    # Run Euclidean Distance for each point feature in 'inputs' database and Reclassify 1-5.
    for fc in ed_features:
        ed_layer = arcpy.gp.EucDistance_sa(os.path.join(inputs, fc),
                                           os.path.join(output, "rED_" + fc),
                                           "")
        # The values can be changed to create Bike or Ped Demand layer.
        # TODO: Make distance groups a parameter of the function.
        arcpy.gp.Reclassify_sa(
            ed_layer, "VALUE",
            "0 1320 5;1320 2640 4;2640 3960 3;3960 5280 2;5280 1000000 1",
            os.path.join(output, "rWS_" + fc), "DATA")
        # This clears the memory so you don't run out.
        del ed_layer

    # Create rasters for "Pop_Density", "Employ_Density", and "Active_Commute" found in BG_2017
    # Note: In BG_2017 Employ_Density field name is actually "Employ_D"
    block_group = os.path.join(inputs, 'BG_2017')
    print("Starting polygon to raster conversions")
    r_pop_den = arcpy.PolygonToRaster_conversion(
        block_group, "Pop_Density", os.path.join(output, 'r_pop_den'),
        "CELL_CENTER", "NONE")
    r_commute = arcpy.PolygonToRaster_conversion(
        block_group, "Active_Commute", os.path.join(output, 'r_commute'),
        "CELL_CENTER", "NONE")
    r_employ_den = arcpy.PolygonToRaster_conversion(
        block_group, "Employ_D", os.path.join(output, 'r_employ_den'),
        "CELL_CENTER", "NONE")

    # Reclassify Pop, Emp, Commute using Jenks
    print("Starting Slices")
    arcpy.gp.Slice_sa(r_pop_den, os.path.join(output, 'rWS_PopDensity'), "5",
                      "NATURAL_BREAKS", "1")
    arcpy.gp.Slice_sa(r_employ_den, os.path.join(output, 'rWS_EmpDensity'),
                      "5", "NATURAL_BREAKS", "1")
    arcpy.gp.Slice_sa(r_commute, os.path.join(output, 'rWS_Commute'), "5",
                      "NATURAL_BREAKS", "1")
    print("Starting LC Reclass")
    # Reclassify Land Cover
    arcpy.gp.Reclassify_sa(
        os.path.join(inputs, 'ActiveLC'), "NLCD_Land_Cover_Class",
        "'Developed, Low Intensity' 3;'Developed, Medium Intensity' 4;'Developed, High Intensity' "
        "5;NODATA 1", os.path.join(output, 'rWS_LandCover'), "DATA")

    # Build weighted sum table
    env.workspace = output
    ws_table = [[raster, "VALUE", 1] for raster in arcpy.ListRasters("rWS_*")]

    # Create weighted sum
    ws_table_obj = arcpy.sa.ws_table(ws_table)
    out_ws = arcpy.sa.WeightedSum(ws_table_obj)
    out_ws.save(os.path.join(output, "Weighted_Sum"))

    # Reclassify Weighted Sum
    ws_slice = arcpy.gp.Slice_sa(out_ws, os.path.join(output, "ws_slice"), "5",
                                 "NATURAL_BREAKS", "1")

    # Zonal Statistics using Blocks
    zonal_stats = arcpy.gp.ZonalStatistics_sa(
        os.path.join(inputs, "Blocks"), "OBJECTID", ws_slice,
        os.path.join(output, "ZonalSt_Demand"), "MEAN", "DATA")

    # Reclassify Zonal Statistics
    final_demand = arcpy.gp.Slice_sa(zonal_stats,
                                     os.path.join(output, "FinalDemand"), "5",
                                     "NATURAL_BREAKS", "1")

    # Convert FinalDemand to Polygon
    arcpy.RasterToPolygon_conversion(final_demand,
                                     os.path.join(output, "Demand_Polygons"),
                                     "SIMPLIFY", "Value", "")

    print("--- %s seconds ---" % (time.time() - start_time))
示例#28
0

print_arcpy_message("#  #  #  #  #  S T A R T  #  #  #  #  #", status=1)

# # # # # # # # Inputs # # # # # # # #

############################### TEST ##############################
# path = r'C:\Users\medad\OneDrive\שולחן העבודה\Check_Bankal\Data\Test.gdb'
################################################################

# path = r"C:\Users\Administrator\Desktop\medad\python\Work\Mpy\Tazar_For_run\EditTazar89705\CadasterEdit_Tazar.gdb"

path = arcpy.GetParameterAsText(0)  # GDB work space

arcpy.env.workspace = path
fc_List = arcpy.ListFeatureClasses()

path_polys = path + '\\' + Get_fc_from_List(fc_List, 'PARCEL_ALL')
path_lines = path + '\\' + Get_fc_from_List(fc_List, 'PARCEL_ARC')
path_points = path + '\\' + Get_fc_from_List(fc_List, 'PARCEL_NODE')

print(path_polys)
print(path_lines)
print(path_points)

############################### Prepare data ##############################

print_arcpy_message("#  #  Prepare data #  # ", status=1)

createFolder('C:\\temp')
path_folder = r'C:\temp'
def fcs_in_workspace(inputFolder):
    # Direct to output folder
    if not os.path.exists(outputFolder):
        os.makedirs(outputFolder)
    # Loop for root, subFolders, files in inputFolder
    suffix = ".shp"
    for root, subFolders, files in os.walk(inputFolder):
        # Loop for fileName in files
        for fileName in files:
            if fileName.endswith(suffix):
                # define environment workspace
                arcpy.env.workspace = root
                # List feature classes
                fclist = arcpy.ListFeatureClasses()
                # Loop for fc in fclist
                for fc in fclist:
                    # Describe fc name
                    name = arcpy.Describe(root + "\\" +fc).name.strip(".shp")
                    if name == "Income_DA":
                        duplicate(name, fc, outputFolder, 4)
                    elif name == "Labour_DA":
                        duplicate(name, fc, outputFolder, 1)
                    elif name == "Language_DA":
                        duplicate(name, fc, outputFolder, 3)
                    elif name == "Marital_DA":
                        duplicate(name, fc, outputFolder, 6)

                    elif name == "Income_CT":
                        duplicate(name, fc, outputFolder, 4)
                    elif name == "Labour_CT":
                        duplicate(name, fc, outputFolder, 1)
                    elif name == "Language_CT":
                        duplicate(name, fc, outputFolder, 3)
                    elif name == "Marital_CT":
                        duplicate(name, fc, outputFolder, 6)

    # Change environment workspace within the loop
    arcpy.env.workspace = outputFolder
    # List feature classes
    fclist = arcpy.ListFeatureClasses()
    # Loop for fc in fclist
    for fc in fclist:
        # Define path parameter for delete_rename function
        path = outputFolder + "\\" + fc
        name = arcpy.Describe(path).name.strip(".shp")
        value_process(path)
        arcpy.AddField_management(path, "normalize", "DOUBLE", "", "", 50)
        # Pre-declare variables of keepFields & newName with empty container for delete_rename function
        # Allow to let the lower tier block of different keepFields & newName codes
        # Fill in the upper tier keepFields & newName
        keepFields = []
        expression0 = ""
        expression1 = ""
        newName = ""

# Import duplicated_name function to define names for if conditions
        if name == duplicated_name("Income_DA", 0):
            # Define keepField & newName parameters for delete_rename function
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "AVERAGE_HO", "AVERAGE_H_", "POPULATION", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!AVERAGE_H_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Household_Average_Income_Income_2006_DA.shp"
        elif name == duplicated_name("Income_DA", 1):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "PREVALE14", "PREVALE14_", "POPULATION", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!PREVALE14_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Low_Income_Income_2006_DA.shp"
        elif name == duplicated_name("Income_DA", 2):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "GOVERNMENT", "GOVERNMEN_", "POPULATION", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!GOVERNMEN_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Government_Transfers_Income_2006_DA.shp"
        elif name == duplicated_name("Income_DA", 3):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "AVERAGE_VA", "AVERAGE_V_", "POPULATION", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!AVERAGE_V_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_Average_Value_Income_2006_DA.shp"
        elif name == duplicated_name("Labour_DA", 0):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "UNEMPLOYED",  "UNEMPLOYE_", "IN_THE_LAB", "IN_THE_LA_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!UNEMPLOYE_! / !IN_THE_LA_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Labour_Unemployment_Labour_2006_DA.shp"
        elif name == duplicated_name("Language_DA", 0):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "2001_TO_20", "_2001_TO_2", "TOTAL_PO6", "TOTAL_PO6_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!_2001_TO_2! / !TOTAL_PO6_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Immigration_Language_2006_DA.shp"
        elif name == duplicated_name("Language_DA", 1):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "NEITHER_EN", "NEITHER_E_", "TOTAL_PO2", "TOTAL_PO2_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!NEITHER_E_! / !TOTAL_PO2_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Neither_Language_Language_2006_DA.shp"
        elif name == duplicated_name("Language_DA", 2):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "CANADIAN_C", "CANADIAN__", "TOTAL_PO5", "TOTAL_PO5_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!CANADIAN__! / !TOTAL_PO5_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Canadian_Citizen_Language_2006_DA.shp"
        elif name == duplicated_name("Marital_DA", 0):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "RENTED", "RENTED_", "TOTAL_NU6", "TOTAL_NU6_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!RENTED_! / !TOTAL_NU6_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_Rental_Marital_2006_DA.shp"
        elif name == duplicated_name("Marital_DA", 1):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "TOTAL_LONE", "TOTAL_LON_", "TOTAL_NUMB", "TOTAL_NUM_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!TOTAL_LON_! / !TOTAL_NUM_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Lone_Parent_Marital_2006_DA.shp"
        elif name == duplicated_name("Marital_DA", 2):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "LIVING_AL1", "LIVING_AL_", "TOTAL_NU4", "TOTAL_NU4_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!LIVING_AL_! / !TOTAL_NU4_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Living_Alone_Marital_2006_DA.shp"
        elif name == duplicated_name("Marital_DA", 3):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "MAJOR_REPA", "MAJOR_REP_", "TOTAL_NU7", "TOTAL_NU7_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!MAJOR_REP_! / !TOTAL_NU7_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_Major_Repair_Marital_2006_DA.shp"
        elif name == duplicated_name("Marital_DA", 4):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "MOVABLE_DW", "MOVABLE_D_", "TOTAL_NU9", "TOTAL_NU9_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!MOVABLE_D_! / !TOTAL_NU9_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_Mobile_Marital_2006_DA.shp"
        elif name == duplicated_name("Marital_DA", 5):
            keepFields = ["FID", "Shape", "DAUID", "CSDUID", "CCSUID", "CDUID", "ERUID", "PRUID", "CTUID", "CMAUID", "OID_", "GEOGRAPHY", "PERIOD_OF_", "PERIOD_O_", "PERIOD_OF1", "PERIOD_O1_", "TOTAL_NU8", "TOTAL_NU8_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "(!PERIOD_O_! + !PERIOD_O1_!) / !TOTAL_NU8_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_1960Constructions_Marital_2006_DA.shp"

        elif name == duplicated_name("Income_CT", 0):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "AVERAGE_HO", "AVERAGE_H_", "POPULATION", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!AVERAGE_H_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Household_Average_Income_Income_2006_CT.shp"
        elif name == duplicated_name("Income_CT", 1):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "PREVALE14", "PREVALE14_", "POPULATION", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!PREVALE14_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Low_Income_Income_2006_CT.shp"
        elif name == duplicated_name("Income_CT", 2):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "GOVERNMENT", "GOVERNMEN_", "POPULATION", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!GOVERNMEN_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Government_Transfers_Income_2006_CT.shp"
        elif name == duplicated_name("Income_CT", 3):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "AVERAGE_VA", "AVERAGE_V_", "POPULATION", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!AVERAGE_V_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_Average_Value_Income_2006_CT.shp"
        elif name == duplicated_name("Labour_CT", 0):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "UNEMPLOYED", "UNEMPLOYE_", "IN_THE_LAB", "IN_THE_LA_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!UNEMPLOYE_! / !IN_THE_LA_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Labour_Unemployment_Labour_2006_CT.shp"
        elif name == duplicated_name("Language_CT", 0):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "2001_TO_20", "_2001_TO_2", "TOTAL_PO6", "TOTAL_PO6_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!_2001_TO_2! / !TOTAL_PO6_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Immigration_Language_2006_CT.shp"
        elif name == duplicated_name("Language_CT", 1):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "NEITHER_EN", "NEITHER_E_", "TOTAL_PO2", "TOTAL_PO2_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!NEITHER_E_! / !TOTAL_PO2_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Neither_Language_Language_2006_CT.shp"
        elif name == duplicated_name("Language_CT", 2):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "CANADIAN_C", "CANADIAN__", "TOTAL_PO5", "TOTAL_PO5_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!CANADIAN__! / !TOTAL_PO5_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Canadian_Citizen_Language_2006_CT.shp"
        elif name == duplicated_name("Marital_CT", 0):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "RENTED", "RENTED_", "TOTAL_NU6", "TOTAL_NU6_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!RENTED_! / !TOTAL_NU6_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_Rental_Marital_2006_CT.shp"
        elif name == duplicated_name("Marital_CT", 1):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "TOTAL_LONE", "TOTAL_LON_", "TOTAL_NUMB", "TOTAL_NUM_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!TOTAL_LON_! / !TOTAL_NUM_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Lone_Parent_Marital_2006_CT.shp"
        elif name == duplicated_name("Marital_CT", 2):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "LIVING_AL1", "LIVING_AL_", "TOTAL_NU4", "TOTAL_NU4_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!LIVING_AL_! / !TOTAL_NU4_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Living_Alone_Marital_2006_CT.shp"
        elif name == duplicated_name("Marital_CT", 3):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "MAJOR_REPA", "MAJOR_REP_", "TOTAL_NU7", "TOTAL_NU7_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!MAJOR_REP_! / !TOTAL_NU7_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_Major_Repair_Marital_2006_CT.shp"
        elif name == duplicated_name("Marital_CT", 4):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "MOVABLE_DW", "MOVABLE_D_", "TOTAL_NU9", "TOTAL_NU9_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "!MOVABLE_D_! / !TOTAL_NU9_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_Mobile_Marital_2006_CT.shp"
        elif name == duplicated_name("Marital_CT", 5):
            keepFields = ["FID", "Shape", "CTUID", "CMAUID", "PRUID", "CT_UID", "OID_", "GEOGRAPHY", "PERIOD_OF_", "PERIOD_O_", "PERIOD_OF1", "PERIOD_O1_", "TOTAL_NU8", "TOTAL_NU8_", "Tvariable", "TFvariable", "Z_score"]
            expression0 = "(!PERIOD_O_! + !PERIOD_O1_!)/ !TOTAL_NU8_!"
            arcpy.CalculateField_management(path, "normalize", expression0, "PYTHON_9.3")
            expression1 = variable_calculate(path, "normalize")
            newName = "Dwellings_1960Constructions_Marital_2006_CT.shp"
        else:
            continue

        # Import delete_rename function only once instead execute in every lower tier if condition
        data_process(path, keepFields, expression0, expression1, newName)
示例#30
0
import arcpy
from arcpy import env
env.workspace = r"H:\PythonGIS\youngsang\data\data\geoportal.gdb"
env.overwriteOutput = True
outDir = r"H:\PythonGIS\youngsang\data\data"
#arcpy.CreateFileGDB_management(outDir, "out.gdb")
outDir1 = r"H:\PythonGIS\youngsang\data\data\out.gdb\\"

fcList = arcpy.ListFeatureClasses(feature_type="Point")
print fcList
clipFc = "polygon"
for item in fcList:
    print outDir1 + item
    arcpy.Clip_analysis(item, clipFc, outDir1 + item + "_clip")
    print item + " is now processing"