Exemplo n.º 1
0
arcpy.SetProgressorLabel(
    "The progress is runing  dis,please wait for several minutes...")
dis = (PC1 * PC1 * 0.166 + PC2 * PC2 * 0.166 + PC3 * PC3 * 0.166 +
       PC4 * PC4 * 0.166 + PC5 * PC5 * 0.166 + PC6 * PC6 * 0.166)**0.5
dis.save(outPut + "/dis")
myremap = RemapRange([[0, 0.1, 1], [0.1, 100, 2]])
arcpy.SetProgressorLabel(
    "The progress is recalssifation, please wait for several minutes...")
redis = Reclassify(dis, "Value", myremap)
arcpy.SetProgressorLabel(
    "The progress is saving redis, please wait for several minutes...")
redis.save(outPut + "/redis")
#------------------------------To calculate suitable soil regions--------------------------------------------#
table = outPut + "/table"
print table
arcpy.CopyRows_management(input, table)
arcpy.CopyRows_management("HWSD_SMU", outPut + "/HWSD_SMU")
arcpy.CopyRows_management("HWSD_DATA", outPut + "/HWSD_DATA")

table_listfield = arcpy.ListFields(table)
fieldName = []
for tt in table_listfield:
    fieldName.append(tt.name)
if 'SU_CODE' not in fieldName:
    arcpy.JoinField_management(table, "hwsd", outPut + "/HWSD_SMU",
                               "MU_GLOBAL", "SU_CODE;MU_GLOBAL")
    arcpy.JoinField_management(
        table, "MU_GLOBAL", outPut + "/HWSD_DATA", "MU_GLOBAL",
        "T_GRAVEL;T_SAND;T_SILT;T_CLAY;T_USDA_TEX_CLASS;T_REF_BULK_DENSI;T_BULK_DENSITY;T_OC;T_PH_H2O;T_CEC_CLAY;T_CEC_SOIL;T_BS;T_TEB;T_CACO3;T_CASO4;T_ESP;T_ECE"
    )
def DrawRadialFlows():

    # Get the value of the input parameter
    inTable = arcpy.GetParameterAsText(0)
    startX_field = arcpy.GetParameterAsText(1)
    startY_field = arcpy.GetParameterAsText(2)
    endX_field = arcpy.GetParameterAsText(3)
    endY_field = arcpy.GetParameterAsText(4)
    id_field = arcpy.GetParameterAsText(5)
    lineType_str = arcpy.GetParameterAsText(6)
    spRef = arcpy.GetParameterAsText(7)
    joinFields = arcpy.GetParameterAsText(8)
    joinFields = joinFields.split(";")
    isChecked_AddNodes = arcpy.GetParameter(9)
    outFlowsLyrName = arcpy.GetParameterAsText(10)
    outNodesLyrName = arcpy.GetParameterAsText(11)

    if inTable and inTable != "#":

        try:
            # create empty list to append all output layers
            outList = []

            if isChecked_AddNodes and outNodesLyrName != '':

                # Make XY Event Layer (temporary)
                # Local variable:
                nodesXY = r"in_memory\nodes_lyr"
                arcpy.AddMessage('Creating Nodes at Flow Destinations ...')
                arcpy.SetProgressorLabel(
                    'Creating Nodes at Flow Destinations ...')
                arcpy.MakeXYEventLayer_management(table=inTable,
                                                  in_x_field=endX_field,
                                                  in_y_field=endY_field,
                                                  out_layer=nodesXY)

                # Copy XY Event Layer to Feature Class
                nodesOutputFC = os.path.join(arcpy.env.scratchGDB,
                                             outNodesLyrName)
                arcpy.CopyFeatures_management(in_features=nodesXY,
                                              out_feature_class=nodesOutputFC)
                outList.append(nodesOutputFC)

            # XY To Line
            flowsOutputFC = os.path.join(arcpy.env.scratchGDB, outFlowsLyrName)
            arcpy.AddMessage('Saved Flow Lines to: ' + flowsOutputFC)
            arcpy.SetProgressorLabel('Creating Radial Flow Lines ...')
            if id_field:
                arcpy.XYToLine_management(in_table=inTable,
                                          out_featureclass=flowsOutputFC,
                                          startx_field=startX_field,
                                          starty_field=startY_field,
                                          endx_field=endX_field,
                                          endy_field=endY_field,
                                          line_type=lineType_str,
                                          id_field=id_field,
                                          spatial_reference=spRef)

                if joinFields[0] != '':
                    ### IF any Join Fields are specified, then Copy Rows first and Join that to the input table ###
                    arcpy.AddMessage('Creating Temporary Join Table ...')
                    arcpy.SetProgressorLabel(
                        'Creating Temporary Join Table ...')
                    ### Copy Rows from Input Table to make sure it has an OID ###
                    outTable = r"in_memory\tempTable"
                    arcpy.CopyRows_management(inTable, outTable)

                    ### JOIN ###
                    arcpy.AddMessage('Joining Selected Fields ...')
                    arcpy.SetProgressorLabel('Joining Selected Fields ...')
                    arcpy.JoinField_management(in_data=flowsOutputFC,
                                               in_field=id_field,
                                               join_table=outTable,
                                               join_field=id_field,
                                               fields=joinFields)
                else:
                    arcpy.AddWarning(
                        "WARNING: No join fields have been selected. Only the ID field will be copied to the output feature class!"
                    )

            else:
                arcpy.XYToLine_management(in_table=inTable,
                                          out_featureclass=flowsOutputFC,
                                          startx_field=startX_field,
                                          starty_field=startY_field,
                                          endx_field=endX_field,
                                          endy_field=endY_field,
                                          line_type=lineType_str,
                                          spatial_reference=spRef)

            outList.append(flowsOutputFC)

            # Send string of (derived) output parameters back to the tool
            results = ";".join(outList)
            # Send string of (derived) output parameters back to the tool
            arcpy.SetParameterAsText(12, results)
            arcpy.ResetProgressor()

        except Exception:
            e = sys.exc_info()[1]
            arcpy.AddError('An error occurred: {}'.format(e.args[0]))
def tableTo2PointLine(inputTable, inputStartCoordinateFormat, inputStartXField,
                      inputStartYField, inputEndCoordinateFormat,
                      inputEndXField, inputEndYField, outputLineFeatures,
                      inputLineType, inputSpatialReference):
    '''
    Creates line features from a start point coordinate and an endpoint coordinate.

    inputTable - Input Table
    inputStartCoordinateFormat - Start Point Format (from Value List)
    inputStartXField - Start X Field (longitude, UTM, MGRS, USNG, GARS, GEOREF)(from Input Table)
    inputStartYField - Start Y Field (latitude)(from Input Table)
    inputEndCoordinateFormat - End Point Format (from Value List)
    inputEndXField - End X Field (longitude, UTM, MGRS, USNG, GARS, GEOREF)(from Input Table)
    inputEndYField - End Y Field (latitude) (from Input Table)
    outputLineFeatures - Output Line
    inputLineType - Line Type (from Value List)
    inputSpatialReference - Spatial Reference, default is GCS_WGS_1984

    returns line feature class

    inputStartCoordinateFormat and inputEndCoordinateFormat must be one of the following:
    * DD_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DD_2: Longitude and latitude values are in two separate fields.
    * DDM_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DDM_2: Longitude and latitude values are in two separate fields.
    * DMS_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DMS_2: Longitude and latitude values are in two separate fields.
    * GARS: Global Area Reference System. Based on latitude and longitude, it divides and subdivides the world into cells.
    * GEOREF: World Geographic Reference System. A grid-based system that divides the world into 15-degree quadrangles and then subdivides into smaller quadrangles.
    * UTM_ZONES: The letter N or S after the UTM zone number designates only North or South hemisphere.
    * UTM_BANDS: The letter after the UTM zone number designates one of the 20 latitude bands. N or S does not designate a hemisphere.
    * USNG: United States National Grid. Almost exactly the same as MGRS but uses North American Datum 1983 (NAD83) as its datum.
    * MGRS: Military Grid Reference System. Follows the UTM coordinates and divides the world into 6-degree longitude and 20 latitude bands, but MGRS then further subdivides the grid zones into smaller 100,000-meter grids. These 100,000-meter grids are then divided into 10,000-meter, 1,000-meter, 100-meter, 10-meter, and 1-meter grids.
    
    inputLineType must be one of the following:
    * GEODESIC:
    * GREAT_CIRCLE:
    * RHUMB_LINE:
    * NORMAL_SECTION:

    '''
    try:
        # get/set environment
        env.overwriteOutput = True

        deleteme = []
        scratch = '%scratchGDB%'

        joinFieldName = "JoinID"
        startXFieldName = "startX"
        startYFieldName = "startY"
        endXFieldName = "endX"
        endYFieldName = "endY"

        if env.scratchWorkspace:
            scratch = env.scratchWorkspace

        inputSpatialReference = _checkSpatialRef(inputSpatialReference)

        copyRows = os.path.join(scratch, "copyRows")
        arcpy.CopyRows_management(inputTable, copyRows)
        originalTableFieldNames = _tableFieldNames(inputTable,
                                                   joinExcludeFields)
        addUniqueRowID(copyRows, joinFieldName)

        #Convert Start Point
        arcpy.AddMessage("Formatting start point...")
        startCCN = os.path.join(scratch, "startCCN")
        arcpy.ConvertCoordinateNotation_management(copyRows, startCCN,
                                                   inputStartXField,
                                                   inputStartYField,
                                                   inputStartCoordinateFormat,
                                                   "DD_NUMERIC", joinFieldName)
        arcpy.AddField_management(startCCN, startXFieldName, "DOUBLE")
        arcpy.CalculateField_management(startCCN, startXFieldName, "!DDLon!",
                                        "PYTHON_9.3")
        arcpy.AddField_management(startCCN, startYFieldName, "DOUBLE")
        arcpy.CalculateField_management(startCCN, startYFieldName, "!DDLat!",
                                        "PYTHON_9.3")
        arcpy.JoinField_management(copyRows, joinFieldName, startCCN,
                                   joinFieldName,
                                   [startXFieldName, startYFieldName])

        #Convert End Point
        arcpy.AddMessage("Formatting end point...")
        endCCN = os.path.join(scratch, "endCCN")
        arcpy.ConvertCoordinateNotation_management(copyRows, endCCN,
                                                   inputEndXField,
                                                   inputEndYField,
                                                   inputEndCoordinateFormat,
                                                   "DD_NUMERIC", joinFieldName)
        arcpy.AddField_management(endCCN, endXFieldName, "DOUBLE")
        arcpy.CalculateField_management(endCCN, endXFieldName, "!DDLon!",
                                        "PYTHON_9.3")
        arcpy.AddField_management(endCCN, endYFieldName, "DOUBLE")
        arcpy.CalculateField_management(endCCN, endYFieldName, "!DDLat!",
                                        "PYTHON_9.3")
        arcpy.JoinField_management(copyRows, joinFieldName, endCCN,
                                   joinFieldName,
                                   [endXFieldName, endYFieldName])

        #XY TO LINE
        arcpy.AddMessage(
            "Connecting start point to end point as {0}...".format(
                inputLineType))
        arcpy.XYToLine_management(copyRows, outputLineFeatures,
                                  startXFieldName, startYFieldName,
                                  endXFieldName, endYFieldName, inputLineType,
                                  joinFieldName, inputSpatialReference)

        #Join original table fields to output
        arcpy.AddMessage(
            "Joining fields from input table to output line features...")
        arcpy.JoinField_management(outputLineFeatures, joinFieldName, copyRows,
                                   joinFieldName, originalTableFieldNames)

        arcpy.DeleteField_management(outputLineFeatures, [
            joinFieldName, startXFieldName, startYFieldName, endXFieldName,
            endYFieldName
        ])

        return outputLineFeatures

    except arcpy.ExecuteError:
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        print(msgs)

    except:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(
            sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # Print Python error messages for use in Python / Python Window
        print(pymsg + "\n")
        print(msgs)

    finally:
        if len(deleteme) > 0:
            # cleanup intermediate datasets
            if debug == True:
                arcpy.AddMessage("Removing intermediate datasets...")
            for i in deleteme:
                if debug == True: arcpy.AddMessage("Removing: " + str(i))
                arcpy.Delete_management(i)
            if debug == True: arcpy.AddMessage("Done")
            if b_createPools:
                if not createPool(maxElevConverted, storageCSV):
                    pass
            maxElevConverted = maxElevConverted - increment
        del i

    except:
        print_exception()
        sys.exit()

    if arcpy.Exists(tempDEM):
        arcpy.Delete_management(tempDEM)

    #------------------------------------------------------------------------ Convert StorageCSV to FGDB Table and populate fields.
    arcpy.CopyRows_management(storageCSV, storageTable, "")
    arcpy.AddField_management(storageTable, "ELEV_FEET", "DOUBLE", "5", "1",
                              "", "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.AddField_management(storageTable, "POOL_ACRES", "DOUBLE", "", "", "",
                              "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.AddField_management(storageTable, "POOL_SQFT", "DOUBLE", "", "", "",
                              "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.AddField_management(storageTable, "ACRE_FOOT", "DOUBLE", "", "", "",
                              "", "NULLABLE", "NON_REQUIRED", "")

    elevFeetCalc = "round(!Plane_Height! *" + str(conversionFactor) + ",1)"
    poolAcresCalc = "round(!Area_2D! /" + str(acreConversion) + ",1)"
    poolSqftCalc = "round(!Area_2D! /" + str(ftConversion) + ",1)"
    acreFootCalc = "round(!Volume! /" + str(volConversion) + ",1)"

    arcpy.CalculateField_management(storageTable, "ELEV_FEET", elevFeetCalc,
    expression = "round(!stationElev.MEAN! * " + str(Zfactor) + ",1)"
    arcpy.CalculateField_management(stationLyr, "stations.POINT_Z", expression,
                                    "PYTHON")
    arcpy.RemoveJoin_management(stationLyr, "stationElev")
    arcpy.DeleteField_management(stationTemp, "STATIONID; POINT_M")
    del expression

    # ---------------------------------------------------------------------- Create final output
    # Interpolate Line to 3d via Z factor
    arcpy.InterpolateShape_3d(DEM_aoi, lineTemp, outLine, "", Zfactor)

    # Copy Station Points
    arcpy.CopyFeatures_management(stationTemp, outPoints)

    # Copy output to tables folder
    arcpy.CopyRows_management(outPoints, pointsTable, "")
    #arcpy.CopyRows_management(stakeoutPoints, stakeoutTable, "")   # Redundant to the wascobStations script. Doesn't do anything here.

    # ------------------------------------------------------------------- Delete Temp Layers
    AddMsgAndPrint("\nDeleting temporary files...", 0)
    layersToRemove = (lineTemp, routes, stationTable, stationEvents,
                      stationTemp, stationLyr, stationBuffer, stationElev)

    x = 0
    for layer in layersToRemove:
        if arcpy.Exists(layer):
            if x == 0:
                x += 1
            try:
                arcpy.Delete_management(layer)
            except:
# Import arcpy module
import arcpy

# Script arguments
Jeopardy_CSV_File = arcpy.GetParameterAsText(0)
if Jeopardy_CSV_File == '#' or not Jeopardy_CSV_File:
    Jeopardy_CSV_File = "C:\\Users\\jvwhit\\Documents\\GitHub\\Python-For-ArcGIS-2017\\Advanced_PythonForArcGIS\\Data\\Answers_Adv\\CSV\\JeopardyContestants_LatLon.csv"  # provide a default value if unspecified

# Local variables:
JeopardyContestants_Table = "C:\\Users\\jvwhit\\Documents\\GitHub\\Python-For-ArcGIS-2017\\Advanced_PythonForArcGIS\\Data\\Answers_Adv\\Illinois.gdb\\JeopardyContestants_Table"
Jeopardy_Contestants = "Jeopardy Contestants"
Jeopardy_Contestants_Feature_Class_Output = "C:\\Users\\jvwhit\\Documents\\GitHub\\Python-For-ArcGIS-2017\\Advanced_PythonForArcGIS\\Data\\Answers_Adv\\Illinois.gdb\\JeopardyContestants"
Jeopardy_Contestants_Buffer_Output = "C:\\Users\\jvwhit\\Documents\\GitHub\\Python-For-ArcGIS-2017\\Advanced_PythonForArcGIS\\Data\\Answers_Adv\\Illinois.gdb\\JeopardyContestants_Buffer"

# Process: Copy Rows
arcpy.CopyRows_management(Jeopardy_CSV_File, JeopardyContestants_Table, "")

# Process: Make XY Event Layer
arcpy.MakeXYEventLayer_management(
    JeopardyContestants_Table, "lon", "lat", Jeopardy_Contestants,
    "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]];-400 -400 1000000000;-100000 10000;-100000 10000;8.98315284119522E-09;0.001;0.001;IsHighPrecision",
    "")

# Process: Select
arcpy.Select_analysis(Jeopardy_Contestants,
                      Jeopardy_Contestants_Feature_Class_Output,
                      "\"lat\" IS NOT NULL OR \"lon\" IS NOT NULL")

# Process: Buffer
arcpy.Buffer_analysis(Jeopardy_Contestants_Feature_Class_Output,
                      Jeopardy_Contestants_Buffer_Output, "5 Miles", "FULL",
Exemplo n.º 7
0
    def join_SFR_out2streams(self, use_arcpy=True):

        # get model info
        try:
            DX, DY, NLAY, NROW, NCOL, i = disutil.read_meta_data(self.DISfile)
        except:
            raise IOError("Cannot read MODFLOW DIS file {0}".format(
                self.DISfile))

        print "\naggregating flow information by cellnum..."
        indata = open(self.SFR_out).readlines()
        for line in indata[8:]:

            line = line.strip().split()

            # Kludge! terminates with blank line (only reads stress per. 1)
            # need to add support for transient.
            if len(line) == 0:
                break

            r, c = int(line[1]), int(line[2])
            cellnum = (r - 1) * NCOL + c
            seg_rch = "{0} {1}; ".format(line[3], line[4])
            flow = 0.5 * (float(line[5]) + float(line[7]))
            loss = float(line[6])
            overland = float(line[8])
            stage = float(line[11])
            depth = float(line[12])

            try:
                existingflow = self.flow_by_cellnum[cellnum]
                seg_rch_info = self.seg_rch_by_cellnum[cellnum]
            except KeyError:
                existingflow = 0
                seg_rch_info = 'segs  rchs: '

            # determine state
            if flow == 0:
                state = 'dry'
            elif loss > 0:
                state = 'losing'
            elif loss < 0:
                state = 'gaining'
            else:
                print 'Stream reach in cell {} has flow, but no interaction with aquifer.'.format(
                    cellnum)

            self.flow_by_cellnum[cellnum] = existingflow + flow
            self.seg_rch_by_cellnum[cellnum] = seg_rch_info + seg_rch
            self.loss_by_cellnum[cellnum] = loss
            self.state_by_cellnum[cellnum] = state
            self.overland_by_cellnum[cellnum] = overland
            self.stage_by_cellnum[cellnum] = stage
            self.depth_by_cellnum[cellnum] = depth

        # write to temporary output file
        ofp = open(os.path.join(self.outpath, 'temp.csv'), 'w')
        ofp.write(
            '{},row,column,seg_reach,flow,loss,overland,state,stage,depth\n'.
            format(self.node_num_attribute))
        for cn in self.flow_by_cellnum.keys():
            ofp.write('{0},{1},{2},"{3}",{4:.6e},{5},{6},{7},{8},{9}\n'.format(
                cn, 1, 1, self.seg_rch_by_cellnum[cn],
                self.flow_by_cellnum[cn], self.loss_by_cellnum[cn],
                self.overland_by_cellnum[cn], self.state_by_cellnum[cn],
                self.stage_by_cellnum[cn], self.depth_by_cellnum[cn]))
        ofp.close()

        outfile = os.path.join(self.outpath,
                               "{0}.shp".format(self.SFR_out[:-4]))
        if use_arcpy:
            try:
                import arcpy
                import SFR_arcpy
            except:
                print 'module arcpy not found!'

            # make feature/table layers
            arcpy.env.workspace = self.outpath
            arcpy.env.overwriteOutput = True
            arcpy.CopyFeatures_management(
                self.streams_shp, self.streams_shp[:-4] + '_backup.shp')
            arcpy.MakeFeatureLayer_management(
                self.streams_shp[:-4] + '_backup.shp', "streams")
            arcpy.CopyRows_management(os.path.join(self.outpath, 'temp.csv'),
                                      os.path.join(self.outpath, 'temp.dbf'))

            # drop all fields except for cellnum from stream linework
            Fields = arcpy.ListFields("streams")
            Fields = [
                f.name for f in Fields
                if f.name not in ["FID", "Shape", self.node_num_attribute]
            ]
            if len(Fields) > 0:
                arcpy.DeleteField_management("streams", Fields)

            SFR_arcpy.general_join(outfile,
                                   "streams",
                                   self.node_num_attribute,
                                   "temp.dbf",
                                   self.node_num_attribute,
                                   keep_common=True)

        else:
            import sys
            sys.path.append('../../GIS_utils')
            try:
                import GISops
            except:
                print 'GIS_utils.GISops not found!'
            GISops.join_csv2shp(self.streams_shp,
                                self.node_num_attribute,
                                os.path.join(self.outpath, 'temp.csv'),
                                self.node_num_attribute,
                                outfile,
                                how='inner')
def main(*argv):
    """ main driver of program """
    try:
        if os.path.split(sys.executable)[1] == 'ArcGISPro.exe':

            # Expected Parameters
            in_new = argv[0]
            in_old = argv[1]
            unique = argv[2]
            out_db = argv[3]

        else:

            # Expected Parameters
            in_new = argv[0]
            in_old = argv[1]
            unique = argv[2]
            out_db = argv[3]
            t_flag = argv[4]

            if t_flag.lower() not in ['fc', 'fs', 'sdf']:
                raise Exception('Input Type Not In Accepted Options: fc | fs | sdf')

        #  Local Variables
        out_table    = os.path.join(out_db, "InformationTable")
        out_fc       = os.path.join(out_db, "changed_features")
        change_csv   = os.path.join(arcpy.env.scratchFolder, "changes.csv")
        change_table = os.path.join(out_db, "change_table")

        # Remove Existing Files
        for target in [out_fc, out_table]:
            if arcpy.Exists(target):
                arcpy.Delete_management(target)

        # Create Information Table (Overview of Differences)
        if t_flag!= 'sdf':
            build_information_table(out_db, in_new, in_old)

            # Create SpatialDataFrame Objects
            old_sdf = arcgis.features.SpatialDataFrame.from_featureclass(in_old)
            new_sdf = arcgis.features.SpatialDataFrame.from_featureclass(in_new)
        else:
            old_sdf=in_old
            new_sdf=in_new

        # Remove Duplicate Values in old_sdf/new_sdf
        for sdf_set in [[old_sdf, in_old], [new_sdf, in_new]]:
            handle_duplicates(sdf_set, unique, t_flag)

        # Find Adds, Deletes and Matching Values
        merged = pd.merge(old_sdf, new_sdf, on=[unique], how='outer', indicator=True)
        adds    = merged.loc[merged['_merge'] == 'right_only']
        deletes = merged.loc[merged['_merge'] == 'left_only']
        if len(adds) > 0:
            q = new_sdf[unique].isin(adds[unique].tolist())
            new_sdf[q].to_featureclass(
                out_location=out_db,
                out_name="added_features",
                overwrite=True,
                skip_invalid=True
            )
        if len(deletes) > 0:
            q = old_sdf[unique].isin(deletes[unique].tolist())
            old_sdf[q].to_featureclass(
                out_location=out_db,
                out_name="deleted_features",
                overwrite=True,
                skip_invalid=True
            )

        # Assess Changed Features
        fields = [field for field in old_sdf.columns.tolist() if field in new_sdf.columns.tolist()]
        if 'SHAPE' in fields:
            fields.remove("SHAPE")

        old_uids = set(old_sdf[unique].unique().tolist())
        new_uids = set(new_sdf[unique].unique().tolist())
        common_uids = list(new_uids.intersection(old_uids))
        cq = new_sdf[unique].isin(common_uids)
        cq1 = old_sdf[unique].isin(common_uids)
        old_sdf = old_sdf[cq1].copy()
        new_sdf = new_sdf[cq].copy()

        new_sdf.index = new_sdf[unique]
        old_sdf.index = old_sdf[unique]
        old_sdf.sort_index(inplace=True)
        new_sdf.sort_index(inplace=True)
        ne_stacked = (old_sdf[fields] != new_sdf[fields]).stack()
        changed = ne_stacked[ne_stacked]

        changed.index.names = [unique, 'col']
        difference_locations = np.where(old_sdf[fields] != new_sdf[fields])
        changed_from = new_sdf[fields].values[difference_locations]
        changed_to = old_sdf[fields].values[difference_locations]
        df_new = pd.DataFrame({'from_val': changed_from, 'to_val': changed_to}, index=changed.index)
        df_new.reset_index(level=['col'],inplace=True)
        q3 = df_new['from_val'].isnull() & df_new['to_val'].isnull()
        df_new[~q3].to_csv(change_csv)
        joined_sdf = arcgis.features.SpatialDataFrame.merge(new_sdf, df_new[~q3], right_index=True, left_index=True)

        q4 = joined_sdf['from_val'].isnull() & joined_sdf['to_val'].isnull()
        stripped_sdf = joined_sdf[~q4]
        stripped_sdf.drop('from_val', axis=1, inplace=True)
        stripped_sdf.drop('to_val', axis=1, inplace=True)
        stripped_sdf.drop('col', axis=1, inplace=True)
        stripped_sdf['Edit Count'] = stripped_sdf.groupby([unique]).size()
        stripped_sdf.drop_duplicates(subset=unique, keep='last', inplace=True)

        stripped_sdf.to_featureclass(
            out_location=out_db,
            out_name="changed_features",
            overwrite=True,
            skip_invalid=True
        )
        arcpy.CopyRows_management(change_csv, change_table)

        arcpy.AddMessage('Done.')

    except arcpy.ExecuteError:
        line, filename, synerror = trace()
        arcpy.AddError("error on line: %s" % line)
        arcpy.AddError("error in file name: %s" % filename)
        arcpy.AddError("with error message: %s" % synerror)
        arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
    except FunctionError as f_e:
        messages = f_e.args[0]
        arcpy.AddError("error in function: %s" % messages["function"])
        arcpy.AddError("error on line: %s" % messages["line"])
        arcpy.AddError("error in file name: %s" % messages["filename"])
        arcpy.AddError("with error message: %s" % messages["synerror"])
        arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
    except:
        line, filename, synerror = trace()
        arcpy.AddError("error on line: %s" % line)
        arcpy.AddError("error in file name: %s" % filename)
        arcpy.AddError("with error message: %s" % synerror)
Exemplo n.º 9
0
sqlClause = "(year = 2013 AND hour IN (16,17)) AND day IN (7,14,21,28,35,42,49,56,63,70,77,84,91,98,105,112,119,126,133,140,147,154,161,168,175,182,189,196,203,210,217,224,231,238,245,252,259,266,273,280,287,294,301,308,315,322,329,336,343,350,357,364)"
whereClause = "(CycleEvents.edge_id = AucklandStravaMetro_Edges_NZTM.edge_id)"

#todo: make query Layer
print 'make Query Layer'
listTables = []
listTables.append(tbl)
arcpy.MakeQueryTable_management(in_table=listTables,
                                out_table="auckland_edges_ride_data",
                                in_key_field_option="ADD_VIRTUAL_KEY_FIELD")

#copy Query Table Locally
print 'Copy Query Tables Locally to Cycle Events'
if arcpy.Exists("CycleEvents"):
    arcpy.Delete_management("CycleEvents")
arcpy.CopyRows_management("auckland_edges_ride_data", "CycleEvents")
flds = []
flds.append("edge_id")
arcpy.AddIndex_management(in_table="CycleEvents",
                          fields=flds,
                          index_name="EdgeIdIdx")

#many To One Join
#print "start Many To One Join"
#listTables = ["CycleEvents", fc]
#arcpy.MakeQueryTable_management(in_table = listTables, out_table = "temp1", in_key_field_option = "ADD_VIRTUAL_KEY_FIELD", where_clause = whereClause)
#print 'Copy Query Tables Locally to All Cycle Event Features'
#if arcpy.Exists("AllCycleEvents"):
#    print "deleting AllCycleEvents..."
#    arcpy.Delete_management("AllCycleEvents")
#print "copying AllCycleEvents..."
Exemplo n.º 10
0
#Output variable
patchTable = sys.argv[
    -2]  # The last input is a boolean in the tool that allows for resetting values


##--FUNCTIONS--
def msg(txt):
    print txt
    arcpy.AddMessage(txt)
    return


##--PROCESSES--
#Create a table from the CSV file
msg("Creating attribute table")
attributeTbl = arcpy.CopyRows_management(inputCSV, "in_memory/attributeTbl")

#Get field names for everything but the patch id field (i.e. the first field)
fldNames = []
for f in arcpy.ListFields(attributeTbl)[2:]:
    fldNames.append(str(f.name))

#Calculate summary stats for each field
msg("Analysing values to compute normalizations")
statFlds = []
for f in fldNames:
    msg("...%s" % f)
    statFlds.append([f, "MIN"])
    statFlds.append([f, "MAX"])
    statFlds.append([f, "RANGE"])
sumTbl = arcpy.Statistics_analysis(attributeTbl, "in_memory/sumTbl", statFlds)
Exemplo n.º 11
0
def main(projPath, in_network, landuse, CrossingLow, CrossingHigh, AdjLow,
         AdjHigh, CanalLow, CanalHigh, RRLow, RRHigh, out_name):

    scratch = 'in_memory'

    arcpy.env.overwriteOutput = True

    # CrossingLow = 10
    # CrossingHigh = 100
    # AdjLow = 10
    # AdjHigh = 100
    # CanalLow = 50
    # CanalHigh = 200
    # RRLow = 30
    # RRHigh = 100

    # function to calculate slope-intercept equation based on user inputs
    def slopeInt(lowValue, highValue):
        x1 = lowValue
        y1 = 0.99
        x2 = highValue
        y2 = 0.01
        m = (y2 - y1) / (x2 - x1)  # calculate slope
        b = y1 - (m * x1)  # calculate y-intercept
        return [m, b]

    if out_name.endswith('.shp'):
        out_network = os.path.join(os.path.dirname(in_network), out_name)
    else:
        out_network = os.path.join(os.path.dirname(in_network),
                                   out_name + ".shp")

    arcpy.CopyFeatures_management(in_network, out_network)

    # check for oPC_Score field and delete if already exists
    fields = [f.name for f in arcpy.ListFields(out_network)]
    if "oPC_Score" in fields:
        arcpy.DeleteField_management(out_network, "oPC_Score")

    # create segid array for joining output
    segid_np = arcpy.da.FeatureClassToNumPyArray(out_network, "ReachID")
    segid_array = np.asarray(segid_np, np.int64)

    # road crossing conflict
    if "iPC_RoadX" in fields:
        roadx_array = arcpy.da.FeatureClassToNumPyArray(
            out_network, "iPC_RoadX")
        roadx = np.asarray(roadx_array, np.float64)
        roadx_pc = np.empty_like(roadx)
        m = slopeInt(CrossingLow, CrossingHigh)[0]
        b = slopeInt(CrossingLow, CrossingHigh)[1]
        for i in range(len(roadx)):
            if roadx[i] >= 0 and roadx[i] <= CrossingLow:
                roadx_pc[i] = 0.99
            elif roadx[i] > CrossingLow and roadx[i] <= CrossingHigh:
                roadx_pc[i] = m * roadx[i] + b
            elif roadx[i] > CrossingHigh:
                roadx_pc[i] = 0.01
            else:
                roadx_pc[i] = 0.01

        del roadx_array, roadx, m, b
    else:
        roadx_pc = np.zeros_like(segid_array)

    # road adjacent conflict
    if "iPC_RoadAd" in fields:
        roadad_array = arcpy.da.FeatureClassToNumPyArray(
            out_network, "iPC_RoadAd")
        roadad = np.asarray(roadad_array, np.float64)
        #roadad_pc = np.zeros_like(roadad)
        roadad_pc = np.empty_like(roadad)
        m = slopeInt(AdjLow, AdjHigh)[0]
        b = slopeInt(AdjLow, AdjHigh)[1]
        for i in range(len(roadad)):
            if roadad[i] >= 0 and roadad[i] <= AdjLow:
                roadad_pc[i] = 0.99
            elif roadad[i] > AdjLow and roadad[i] <= AdjHigh:
                roadad_pc[i] = m * roadad[i] + b
            elif roadad[i] > AdjHigh:
                roadad_pc[i] = 0.01
            else:
                roadad_pc[i] = 0.01

        del roadad_array, roadad, m, b
    else:
        roadad_pc = np.zeros_like(segid_array)

    # canal conflict
    if "iPC_Canal" in fields:
        canal_array = arcpy.da.FeatureClassToNumPyArray(
            out_network, "iPC_Canal")
        canal = np.asarray(canal_array, np.float64)
        #canal_pc = np.zeros_like(canal)
        canal_pc = np.empty_like(canal)
        m = slopeInt(CanalLow, CanalHigh)[0]
        b = slopeInt(CanalLow, CanalHigh)[1]
        for i in range(len(canal)):
            if canal[i] >= 0 and canal[i] <= CanalLow:
                canal_pc[i] = 0.99
            elif canal[i] > CanalLow and canal[i] <= CanalHigh:
                canal_pc[i] = m * canal[i] + b
            elif canal[i] > CanalHigh:
                canal_pc[i] = 0.01
            else:
                canal_pc[i] = 0.01

        del canal_array, canal, m, b
    else:
        canal_pc = np.zeros_like(segid_array)

    # railroad conflict
    if "iPC_RR" in fields:
        rr_array = arcpy.da.FeatureClassToNumPyArray(out_network, "iPC_RR")
        rr = np.asarray(rr_array, np.float64)
        #rr_pc = np.zeros_like(rr)
        rr_pc = np.empty_like(rr)
        m = slopeInt(RRLow, RRHigh)[0]
        b = slopeInt(RRLow, RRHigh)[1]
        for i in range(len(rr)):
            if rr[i] >= 0 and rr[i] <= RRLow:
                rr_pc[i] = 0.99
            elif rr[i] > RRLow and rr[i] <= RRHigh:
                rr_pc[i] = m * rr[i] + b
            elif rr[i] > RRHigh:
                rr_pc[i] = 0.01
            else:
                rr_pc[i] = 0.01

        del rr_array, rr, m, b
    else:
        rr_pc = np.zeros_like(segid_array)

    # landuse conflict
    if "iPC_LU" in fields:
        lu_array = arcpy.da.FeatureClassToNumPyArray(out_network, "iPC_LU")
        lu = np.asarray(lu_array, np.float64)
        lu_pc = np.empty_like(lu)

        # for i in range(len(lu)):
        #     if lu[i] >= 2:
        #         lu_pc[i] = 0.75
        #     elif lu[i] >= 1.25 and lu[i] < 2:
        #         lu_pc[i] = 0.5
        #     elif lu[i] < 1.25:
        #         lu_pc[i] = 0.01
        #     else:
        #         lu_pc[i] = 0.01

        for i in range(len(lu)):
            if lu[i] >= 1.0:
                lu_pc[i] = 0.99
            elif lu[i] >= 0.66 and lu[i] < 1.0:
                lu_pc[i] = 0.75
            elif lu[i] >= 0.33 and lu[i] < 0.66:
                lu_pc[i] = 0.5
            elif lu[i] > 0 and lu[i] < 0.33:
                lu_pc[i] = 0.25
            else:
                lu_pc[i] = 0.01
    else:
        lu_pc = np.zeros_like(segid_array)

    # get max of all individual conflict potential scores
    # this is our conflict potential output
    oPC_Score = np.fmax(
        roadx_pc, np.fmax(roadad_pc, np.fmax(canal_pc, np.fmax(rr_pc, lu_pc))))

    # save the output text file
    columns = np.column_stack((segid_array, oPC_Score))
    out_table = os.path.dirname(out_network) + "/oPC_Score_Table.txt"
    np.savetxt(out_table,
               columns,
               delimiter=",",
               header="ReachID, oPC_Score",
               comments="")

    opc_score_table = scratch + "/opc_score_table"
    arcpy.CopyRows_management(out_table, opc_score_table)

    # join the output to the flowline network
    # create empty dictionary to hold input table field values
    tblDict = {}
    # add values to dictionary
    with arcpy.da.SearchCursor(opc_score_table,
                               ['ReachID', 'oPC_Score']) as cursor:
        for row in cursor:
            tblDict[row[0]] = row[1]
    # populate flowline network out field
    arcpy.AddField_management(out_network, 'oPC_Score', 'DOUBLE')
    with arcpy.da.UpdateCursor(out_network,
                               ['ReachID', 'oPC_Score']) as cursor:
        for row in cursor:
            try:
                aKey = row[0]
                row[1] = tblDict[aKey]
                cursor.updateRow(row)
            except:
                pass
    tblDict.clear()

    arcpy.Delete_management(out_table)
    arcpy.Delete_management(opc_score_table)

    addxmloutput(projPath, in_network, out_network)
Exemplo n.º 12
0
def add_to_geodatabase(input_items, out_gdb, is_fds):
    """Adds items to a geodatabase."""
    added = 0
    skipped = 0
    errors = 0
    global processed_count
    global layer_name
    global  existing_fields
    global new_fields
    global  field_values

    for ds, out_name in input_items.iteritems():
        try:
            # -----------------------------------------------
            # If the item is a service layer, process and continue.
            # -----------------------------------------------
            if ds.startswith('http'):
                try:
                    service_layer = task_utils.ServiceLayer(ds)
                    arcpy.env.overwriteOutput = True
                    oid_groups = service_layer.object_ids
                    out_features = None
                    g = 0.
                    group_cnt = service_layer.object_ids_cnt
                    for group in oid_groups:
                        g += 1
                        group = [oid for oid in group if oid]
                        where = '{0} IN {1}'.format(service_layer.oid_field_name, tuple(group))
                        url = ds + "/query?where={}&outFields={}&returnGeometry=true&geometryType=esriGeometryPolygon&f=json".format(where, '*')
                        feature_set = arcpy.FeatureSet()
                        feature_set.load(url)
                        if not out_features:
                            out_features = arcpy.CopyFeatures_management(feature_set, task_utils.create_unique_name(out_name, out_gdb))
                        else:
                            features = arcpy.CopyFeatures_management(feature_set, task_utils.create_unique_name(out_name, out_gdb))
                            arcpy.Append_management(features, out_features, 'NO_TEST')
                            try:
                                arcpy.Delete_management(features)
                            except arcpy.ExecuteError:
                                pass
                        status_writer.send_percent(float(g) / group_cnt * 100, '', 'add_to_geodatabase')
                    processed_count += 1.
                    added += 1
                    status_writer.send_percent(processed_count / result_count, _('Added: {0}').format(ds), 'add_to_geodatabase')
                    continue
                except Exception as ex:
                    status_writer.send_state(status.STAT_WARNING, str(ex))
                    errors_reasons[ds] = ex.message
                    errors += 1
                    continue

            # ------------------------------
            # Is the input a mxd data frame.
            # ------------------------------
            map_frame_name = task_utils.get_data_frame_name(ds)
            if map_frame_name:
                ds = ds.split('|')[0].strip()

            # -------------------------------
            # Is the input a geometry feature
            # -------------------------------
            if isinstance(out_name, list):
                increment = task_utils.get_increment(result_count)
                for row in out_name:
                    try:
                        name = os.path.join(out_gdb, arcpy.ValidateTableName(ds, out_gdb))
                        # Create the geometry if it exists.
                        geom = None
                        try:
                            geo_json = row['[geo]']
                            geom = arcpy.AsShape(geo_json)
                            row.pop('[geo]')
                        except KeyError:
                            pass

                        if geom:
                            if not arcpy.Exists(name):
                                if arcpy.env.outputCoordinateSystem:
                                    arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                else:
                                    arcpy.env.outputCoordinateSystem = 4326
                                    arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                layer_name = arcpy.MakeFeatureLayer_management(name, 'flayer_{0}'.format(os.path.basename(name)))
                                existing_fields = [f.name for f in arcpy.ListFields(layer_name)]
                                new_fields = []
                                field_values = []
                                for field, value in row.iteritems():
                                    valid_field = arcpy.ValidateFieldName(field, out_gdb)
                                    new_fields.append(valid_field)
                                    field_values.append(value)
                                    arcpy.AddField_management(layer_name, valid_field, 'TEXT')
                            else:
                                if not geom.type.upper() == arcpy.Describe(name).shapeType.upper():
                                    name = arcpy.CreateUniqueName(os.path.basename(name), out_gdb)
                                    if arcpy.env.outputCoordinateSystem:
                                        arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                    else:
                                        arcpy.env.outputCoordinateSystem = 4326
                                        arcpy.CreateFeatureclass_management(out_gdb, os.path.basename(name), geom.type.upper())
                                    layer_name = arcpy.MakeFeatureLayer_management(name, 'flayer_{0}'.format(os.path.basename(name)))
                                    existing_fields = [f.name for f in arcpy.ListFields(layer_name)]
                                    new_fields = []
                                    field_values = []
                                    for field, value in row.iteritems():
                                        valid_field = arcpy.ValidateFieldName(field, out_gdb)
                                        new_fields.append(valid_field)
                                        field_values.append(value)
                                        if valid_field not in existing_fields:
                                            arcpy.AddField_management(layer_name, valid_field, 'TEXT')
                        else:
                            if not arcpy.Exists(name):
                                arcpy.CreateTable_management(out_gdb, os.path.basename(name))
                                view_name = arcpy.MakeTableView_management(name, 'tableview')
                                existing_fields = [f.name for f in arcpy.ListFields(view_name)]
                                new_fields = []
                                field_values = []
                                for field, value in row.iteritems():
                                    valid_field = arcpy.ValidateFieldName(field, out_gdb)
                                    new_fields.append(valid_field)
                                    field_values.append(value)
                                    if valid_field not in existing_fields:
                                        arcpy.AddField_management(view_name, valid_field, 'TEXT')


                        if geom:
                            with arcpy.da.InsertCursor(layer_name, ["SHAPE@"] + new_fields) as icur:
                                icur.insertRow([geom] + field_values)
                        else:
                            with arcpy.da.InsertCursor(view_name, new_fields) as icur:
                                icur.insertRow(field_values)

                        processed_count += 1
                        if (processed_count % increment) == 0:
                            status_writer.send_percent(float(processed_count) / result_count, _('Added: {0}').format(row['name']), 'add_to_geodatabase')
                        added += 1
                        continue
                    except Exception as ex:
                        processed_count += 1
                        errors += 1
                        errors_reasons[name] = ex.message
                        continue
                continue
            # -----------------------------
            # Check the data type and clip.
            # -----------------------------
            dsc = arcpy.Describe(ds)
            if dsc.dataType == 'FeatureClass':
                if out_name == '':
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'ShapeFile':
                if out_name == '':
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(dsc.name[:-4], out_gdb))
                else:
                    arcpy.CopyFeatures_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'FeatureDataset':
                if not is_fds:
                    fds_name = os.path.basename(task_utils.create_unique_name(dsc.name, out_gdb))
                    fds = arcpy.CreateFeatureDataset_management(out_gdb, fds_name).getOutput(0)
                else:
                    fds = out_gdb
                arcpy.env.workspace = dsc.catalogPath
                for fc in arcpy.ListFeatureClasses():
                    name = os.path.basename(task_utils.create_unique_name(fc, out_gdb))
                    arcpy.CopyFeatures_management(fc, os.path.join(fds, name))
                arcpy.env.workspace = out_gdb

            elif dsc.dataType == 'RasterDataset':
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                if out_name == '':
                    arcpy.CopyRaster_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyRaster_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'RasterCatalog':
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                if out_name == '':
                    arcpy.CopyRasterCatalogItems_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyRasterCatalogItems_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            elif dsc.dataType == 'Layer':
                layer_from_file = arcpy.mapping.Layer(dsc.catalogPath)
                layers = arcpy.mapping.ListLayers(layer_from_file)
                for layer in layers:
                    if out_name == '':
                        name = task_utils.create_unique_name(layer.name, out_gdb)
                    else:
                        name = task_utils.create_unique_name(out_name, out_gdb)
                    if layer.isFeatureLayer:
                        arcpy.CopyFeatures_management(layer.dataSource, name)
                    elif layer.isRasterLayer:
                        if is_fds:
                            name = os.path.dirname(name)
                        arcpy.CopyRaster_management(layer.dataSource, name)

            elif dsc.dataType == 'CadDrawingDataset':
                arcpy.env.workspace = dsc.catalogPath
                cad_wks_name = os.path.splitext(dsc.name)[0]
                for cad_fc in arcpy.ListFeatureClasses():
                    arcpy.CopyFeatures_management(
                        cad_fc,
                        task_utils.create_unique_name('{0}_{1}'.format(cad_wks_name, cad_fc), out_gdb)
                    )
                arcpy.env.workspace = out_gdb

            elif dsc.dataType == 'File':
                if dsc.catalogPath.endswith('.kml') or dsc.catalogPath.endswith('.kmz'):
                    name = os.path.splitext(dsc.name)[0]
                    temp_dir = tempfile.mkdtemp()
                    kml_layer = arcpy.KMLToLayer_conversion(dsc.catalogPath, temp_dir, name)
                    group_layer = arcpy.mapping.Layer(os.path.join(temp_dir, '{}.lyr'.format(name)))
                    for layer in arcpy.mapping.ListLayers(group_layer):
                        if layer.isFeatureLayer:
                            arcpy.CopyFeatures_management(layer, task_utils.create_unique_name(layer, out_gdb))
                        elif layer.isRasterLayer:
                            if is_fds:
                                out_gdb = os.path.dirname(out_gdb)
                            arcpy.CopyRaster_management(layer, task_utils.create_unique_name(layer, out_gdb))
                    # Clean up temp KML results.
                    arcpy.Delete_management(os.path.join(temp_dir, '{}.lyr'.format(name)))
                    arcpy.Delete_management(kml_layer)
                else:
                    processed_count += 1
                    status_writer.send_percent(processed_count / result_count, _('Invalid input type: {0}').format(dsc.name), 'add_to_geodatabase')
                    skipped += 1
                    skipped_reasons[ds] = _('Invalid input type: {0}').format(dsc.dataType)
                    continue

            elif dsc.dataType == 'MapDocument':
                mxd = arcpy.mapping.MapDocument(dsc.catalogPath)
                if map_frame_name:
                    df = arcpy.mapping.ListDataFrames(mxd, map_frame_name)[0]
                    layers = arcpy.mapping.ListLayers(mxd, data_frame=df)
                else:
                    layers = arcpy.mapping.ListLayers(mxd)
                for layer in layers:
                    if layer.isFeatureLayer:
                        arcpy.CopyFeatures_management(layer.dataSource,
                                                      task_utils.create_unique_name(layer.name, out_gdb))
                    elif layer.isRasterLayer:
                        if is_fds:
                            out_gdb = os.path.dirname(out_gdb)
                        arcpy.CopyRaster_management(layer.dataSource,
                                                    task_utils.create_unique_name(layer.name, out_gdb))
                table_views = arcpy.mapping.ListTableViews(mxd)
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                for table_view in table_views:
                    arcpy.CopyRows_management(table_view.dataSource,
                                              task_utils.create_unique_name(table_view.name, out_gdb))
                out_gdb = arcpy.env.workspace

            elif dsc.dataType.find('Table') > 0:
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                if out_name == '':
                    arcpy.CopyRows_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))
                else:
                    arcpy.CopyRows_management(ds, task_utils.create_unique_name(out_name, out_gdb))

            else:
                # Try to copy any other types such as topologies, network datasets, etc.
                if is_fds:
                    out_gdb = os.path.dirname(out_gdb)
                arcpy.Copy_management(ds, task_utils.create_unique_name(dsc.name, out_gdb))

            out_gdb = arcpy.env.workspace
            processed_count += 1.
            status_writer.send_percent(processed_count / result_count, _('Added: {0}').format(ds), 'add_to_geodatabase')
            status_writer.send_status(_('Added: {0}').format(ds))
            added += 1
        # Continue if an error. Process as many as possible.
        except Exception as ex:
            processed_count += 1
            status_writer.send_percent(processed_count / result_count, _('Skipped: {0}').format(ds), 'add_to_geodatabase')
            status_writer.send_status(_('FAIL: {0}').format(repr(ex)))
            errors_reasons[ds] = repr(ex)
            errors += 1
            continue

    return added, errors, skipped
Exemplo n.º 13
0
                                          out_feature_class=os.path.join(
                                              temp_fgdb, "sample_points"))[0]

# Update timezones
arcpy.AddMessage("Changing Dates and Times from UTC to NZ")
fields = ['SampleDate']
with arcpy.da.UpdateCursor(sample_fc, fields) as cursor:
    for row in cursor:
        if row[0]:
            row[0] = changeTimeZone(row[0])
            cursor.updateRow(row)

# Download records
arcpy.AddMessage("Copying Disorder Records")
disorder_rows = arcpy.CopyRows_management(in_rows=in_hfs_rel_rows,
                                          out_table=os.path.join(
                                              temp_fgdb,
                                              "disorder_records"))[0]

# Export XML
arcpy.AddMessage("Exporting XML")

sample_fields = [
    'GlobalID', 'FormID', 'Owner', 'Location', 'Stand', 'BioRegion',
    'SiteType', 'SampleDate', 'Inspector', 'HostSpec', 'EstabYear',
    'Treatment', 'Diam', 'Height', 'InspType', 'Comments', 'SHAPE@X',
    'SHAPE@Y', 'Err1'
]

disorder_fields = [
    'Sample_GUID', 'DisorderDescription', 'DisorderAgentName',
    'DisorderAspect', 'DisorderTerrain', 'DisorderPosition', 'DisorderType',
Exemplo n.º 14
0
#######################################################################################################################
myLogPrint("\n\n", myLogDatei)
myLogPrint(
    "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++",
    myLogDatei)
myLogPrint(" Image Catalog erstellen", myLogDatei)
arcpy.AddMessage("Image Catalog erstellen")
myLogPrint(
    "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++",
    myLogDatei)

if doIMAGECATALOG == 1:

    # Kopieren des landesweiten Image Catalogs
    arcpy.CopyRows_management(IMAGE_CATALOG_ORIG, IMAGE_CATALOG_AOI)

    arcpy.AddField_management(IMAGE_CATALOG_AOI, "TMP", "SHORT", "", "", "",
                              "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.CalculateField_management(IMAGE_CATALOG_AOI, "TMP", "0",
                                    "PYTHON_9.3", "")

    fields01 = ['OBJECT_ID', 'KB']
    with arcpy.da.SearchCursor(BS_DOB_AOI, fields01) as cursor01:
        for row01 in cursor01:
            ORDNER = row01[1]

            BS_NR = (str(row01[0])[0:-2])
            BS_NR_01 = (str(row01[0])[:-6])
            BS_NR_02 = (str(row01[0])[5:-2])
Exemplo n.º 15
0
def main(in_network, pt_type, ex_type, scratch):

    arcpy.env.overwriteOutput = True

    network_fields = [f.name for f in arcpy.ListFields(in_network)]

    # fix any values outside of fis range
    if pt_type == "true":

        # delete oVC_PT field if exists
        if "oVC_PT" in network_fields:
            arcpy.DeleteField_management(in_network, "oVC_PT")

        cursor = arcpy.da.UpdateCursor(in_network, ["iVeg_100PT", "iVeg_30PT"])
        for row in cursor:
            if row[0] < 0:
                row[0] = 0
            elif row[0] > 4:
                row[0] = 3.9
            elif row[1] < 0:
                row[1] = 0
            elif row[1] > 4:
                row[1] = 3.9
            else:
                pass
            cursor.updateRow(row)
        del row
        del cursor

        # get arrays for fields of interest
        riparian_area_a = arcpy.da.FeatureClassToNumPyArray(
            in_network, "iVeg_100PT")
        streamside_a = arcpy.da.FeatureClassToNumPyArray(
            in_network, "iVeg_30PT")

        riparian_array = np.asarray(riparian_area_a, np.float64)
        streamside_array = np.asarray(streamside_a, np.float64)

        del riparian_area_a, streamside_a

        # set up input and output ranges
        riparian = ctrl.Antecedent(np.arange(0, 4, 0.04), 'input1')
        streamside = ctrl.Antecedent(np.arange(0, 4, 0.04), 'input2')
        density = ctrl.Consequent(np.arange(0, 45, 0.5), 'result')

        # membership functions
        riparian['unsuitable'] = fuzz.trapmf(riparian.universe, [0, 0, 0.1, 1])
        riparian['barely'] = fuzz.trimf(riparian.universe, [0.1, 1, 2])
        riparian['moderately'] = fuzz.trimf(riparian.universe, [1, 2, 3])
        riparian['suitable'] = fuzz.trimf(riparian.universe, [2, 3, 4])
        riparian['preferred'] = fuzz.trimf(riparian.universe, [3, 4, 4])

        streamside['unsuitable'] = fuzz.trapmf(streamside.universe,
                                               [0, 0, 0.1, 1])
        streamside['barely'] = fuzz.trimf(streamside.universe, [0.1, 1, 2])
        streamside['moderately'] = fuzz.trimf(streamside.universe, [1, 2, 3])
        streamside['suitable'] = fuzz.trimf(streamside.universe, [2, 3, 4])
        streamside['preferred'] = fuzz.trimf(streamside.universe, [3, 4, 4])

        density['none'] = fuzz.trimf(density.universe, [0, 0, 0.1])
        density['rare'] = fuzz.trapmf(density.universe, [0, 0.1, 0.5, 1.5])
        density['occasional'] = fuzz.trapmf(density.universe, [0.5, 1.5, 4, 8])
        density['frequent'] = fuzz.trapmf(density.universe, [4, 8, 12, 25])
        density['pervasive'] = fuzz.trapmf(density.universe, [12, 25, 45, 45])

        # rules
        rule1 = ctrl.Rule(riparian['unsuitable'] & streamside['unsuitable'],
                          density['none'])
        rule2 = ctrl.Rule(riparian['barely'] & streamside['unsuitable'],
                          density['rare'])
        rule3 = ctrl.Rule(riparian['moderately'] & streamside['unsuitable'],
                          density['occasional'])
        rule4 = ctrl.Rule(riparian['suitable'] & streamside['unsuitable'],
                          density['occasional'])
        rule5 = ctrl.Rule(riparian['preferred'] & streamside['unsuitable'],
                          density['occasional'])
        rule6 = ctrl.Rule(riparian['unsuitable'] & streamside['barely'],
                          density['rare'])
        rule7 = ctrl.Rule(riparian['barely'] & streamside['barely'],
                          density['rare'])
        rule8 = ctrl.Rule(riparian['moderately'] & streamside['barely'],
                          density['occasional'])
        rule9 = ctrl.Rule(riparian['suitable'] & streamside['barely'],
                          density['frequent'])
        rule10 = ctrl.Rule(riparian['preferred'] & streamside['barely'],
                           density['frequent'])
        rule11 = ctrl.Rule(riparian['unsuitable'] & streamside['moderately'],
                           density['rare'])
        rule12 = ctrl.Rule(riparian['barely'] & streamside['moderately'],
                           density['occasional'])
        rule13 = ctrl.Rule(riparian['moderately'] & streamside['moderately'],
                           density['occasional'])
        rule14 = ctrl.Rule(riparian['suitable'] & streamside['moderately'],
                           density['frequent'])
        rule15 = ctrl.Rule(riparian['preferred'] & streamside['moderately'],
                           density['pervasive'])
        rule16 = ctrl.Rule(riparian['unsuitable'] & streamside['suitable'],
                           density['rare'])
        rule17 = ctrl.Rule(riparian['barely'] & streamside['suitable'],
                           density['frequent'])
        rule18 = ctrl.Rule(riparian['moderately'] & streamside['suitable'],
                           density['frequent'])
        rule19 = ctrl.Rule(riparian['suitable'] & streamside['suitable'],
                           density['frequent'])
        rule20 = ctrl.Rule(riparian['preferred'] & streamside['suitable'],
                           density['pervasive'])
        rule21 = ctrl.Rule(riparian['unsuitable'] & streamside['preferred'],
                           density['occasional'])
        rule22 = ctrl.Rule(riparian['barely'] & streamside['preferred'],
                           density['frequent'])
        rule23 = ctrl.Rule(riparian['moderately'] & streamside['preferred'],
                           density['frequent'])
        rule24 = ctrl.Rule(riparian['suitable'] & streamside['preferred'],
                           density['pervasive'])
        rule25 = ctrl.Rule(riparian['preferred'] & streamside['preferred'],
                           density['pervasive'])

        # FIS
        veg_ctrl = ctrl.ControlSystem([
            rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9,
            rule10, rule11, rule12, rule13, rule14, rule15, rule16, rule17,
            rule18, rule19, rule20, rule21, rule22, rule23, rule24, rule25
        ])
        veg_fis = ctrl.ControlSystemSimulation(veg_ctrl)

        out = np.zeros(len(riparian_array))
        for i in range(len(out)):
            veg_fis.input['input1'] = riparian_array[i]
            veg_fis.input['input2'] = streamside_array[i]
            veg_fis.compute()
            out[i] = veg_fis.output['result']

        # save the output text file then merge to shapefile
        fid = np.arange(0, len(out), 1)
        columns = np.column_stack((fid, out))
        out_table = os.path.dirname(in_network) + "/oVC_PT_Table.txt"
        np.savetxt(out_table,
                   columns,
                   delimiter=",",
                   header="FID, oVC_PT",
                   comments="")

        ovc_table = scratch + "/ovc_pt_table"
        arcpy.CopyRows_management(out_table, ovc_table)
        arcpy.JoinField_management(in_network, "FID", ovc_table, "FID",
                                   "oVC_PT")
        arcpy.Delete_management(out_table)

        del out, fid, columns, out_table, ovc_table

    elif ex_type == "true":

        if "oVC_EX" in network_fields:
            arcpy.DeleteField_management(in_network, "oVC_EX")

        cursor = arcpy.da.UpdateCursor(in_network, ["iVeg_100EX", "iVeg_30EX"])
        for row in cursor:
            if row[0] < 0:
                row[0] = 0
            elif row[0] > 4:
                row[0] = 3.9
            elif row[1] < 0:
                row[1] = 0
            elif row[1] > 4:
                row[1] = 3.9
            else:
                pass
            cursor.updateRow(row)
        del row
        del cursor

        # get arrays for fields of interest
        riparian_area_a = arcpy.da.FeatureClassToNumPyArray(
            in_network, "iVeg_100EX")
        streamside_a = arcpy.da.FeatureClassToNumPyArray(
            in_network, "iVeg_30EX")

        riparian_array = np.asarray(riparian_area_a, np.float64)
        streamside_array = np.asarray(streamside_a, np.float64)

        del riparian_area_a, streamside_a

        # set up input and output ranges
        riparian = ctrl.Antecedent(np.arange(0, 4, 0.04), 'input1')
        streamside = ctrl.Antecedent(np.arange(0, 4, 0.04), 'input2')
        density = ctrl.Consequent(np.arange(0, 45, 0.5), 'result')

        # membership functions
        riparian['unsuitable'] = fuzz.trapmf(riparian.universe, [0, 0, 0.1, 1])
        riparian['barely'] = fuzz.trimf(riparian.universe, [0.1, 1, 2])
        riparian['moderately'] = fuzz.trimf(riparian.universe, [1, 2, 3])
        riparian['suitable'] = fuzz.trimf(riparian.universe, [2, 3, 4])
        riparian['preferred'] = fuzz.trimf(riparian.universe, [3, 4, 4])

        streamside['unsuitable'] = fuzz.trapmf(streamside.universe,
                                               [0, 0, 0.1, 1])
        streamside['barely'] = fuzz.trimf(streamside.universe, [0.1, 1, 2])
        streamside['moderately'] = fuzz.trimf(streamside.universe, [1, 2, 3])
        streamside['suitable'] = fuzz.trimf(streamside.universe, [2, 3, 4])
        streamside['preferred'] = fuzz.trimf(streamside.universe, [3, 4, 4])

        density['none'] = fuzz.trimf(density.universe, [0, 0, 0.1])
        density['rare'] = fuzz.trapmf(density.universe, [0, 0.1, 0.5, 1.5])
        density['occasional'] = fuzz.trapmf(density.universe, [0.5, 1.5, 4, 8])
        density['frequent'] = fuzz.trapmf(density.universe, [4, 8, 12, 25])
        density['pervasive'] = fuzz.trapmf(density.universe, [12, 25, 45, 45])

        # rules
        rule1 = ctrl.Rule(riparian['unsuitable'] & streamside['unsuitable'],
                          density['none'])
        rule2 = ctrl.Rule(riparian['barely'] & streamside['unsuitable'],
                          density['rare'])
        rule3 = ctrl.Rule(riparian['moderately'] & streamside['unsuitable'],
                          density['occasional'])
        rule4 = ctrl.Rule(riparian['suitable'] & streamside['unsuitable'],
                          density['occasional'])
        rule5 = ctrl.Rule(riparian['preferred'] & streamside['unsuitable'],
                          density['occasional'])
        rule6 = ctrl.Rule(riparian['unsuitable'] & streamside['barely'],
                          density['rare'])
        rule7 = ctrl.Rule(riparian['barely'] & streamside['barely'],
                          density['rare'])
        rule8 = ctrl.Rule(riparian['moderately'] & streamside['barely'],
                          density['occasional'])
        rule9 = ctrl.Rule(riparian['suitable'] & streamside['barely'],
                          density['frequent'])
        rule10 = ctrl.Rule(riparian['preferred'] & streamside['barely'],
                           density['frequent'])
        rule11 = ctrl.Rule(riparian['unsuitable'] & streamside['moderately'],
                           density['rare'])
        rule12 = ctrl.Rule(riparian['barely'] & streamside['moderately'],
                           density['occasional'])
        rule13 = ctrl.Rule(riparian['moderately'] & streamside['moderately'],
                           density['occasional'])
        rule14 = ctrl.Rule(riparian['suitable'] & streamside['moderately'],
                           density['frequent'])
        rule15 = ctrl.Rule(riparian['preferred'] & streamside['moderately'],
                           density['pervasive'])
        rule16 = ctrl.Rule(riparian['unsuitable'] & streamside['suitable'],
                           density['rare'])
        rule17 = ctrl.Rule(riparian['barely'] & streamside['suitable'],
                           density['frequent'])
        rule18 = ctrl.Rule(riparian['moderately'] & streamside['suitable'],
                           density['frequent'])
        rule19 = ctrl.Rule(riparian['suitable'] & streamside['suitable'],
                           density['frequent'])
        rule20 = ctrl.Rule(riparian['preferred'] & streamside['suitable'],
                           density['pervasive'])
        rule21 = ctrl.Rule(riparian['unsuitable'] & streamside['preferred'],
                           density['occasional'])
        rule22 = ctrl.Rule(riparian['barely'] & streamside['preferred'],
                           density['frequent'])
        rule23 = ctrl.Rule(riparian['moderately'] & streamside['preferred'],
                           density['frequent'])
        rule24 = ctrl.Rule(riparian['suitable'] & streamside['preferred'],
                           density['pervasive'])
        rule25 = ctrl.Rule(riparian['preferred'] & streamside['preferred'],
                           density['pervasive'])

        # FIS
        veg_ctrl = ctrl.ControlSystem([
            rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9,
            rule10, rule11, rule12, rule13, rule14, rule15, rule16, rule17,
            rule18, rule19, rule20, rule21, rule22, rule23, rule24, rule25
        ])
        veg_fis = ctrl.ControlSystemSimulation(veg_ctrl)

        out = np.zeros(len(riparian_array))
        for i in range(len(out)):
            veg_fis.input['input1'] = riparian_array[i]
            veg_fis.input['input2'] = streamside_array[i]
            veg_fis.compute()
            out[i] = veg_fis.output['result']

        # save the output text file then merge to shapefile
        fid = np.arange(0, len(out), 1)
        columns = np.column_stack((fid, out))
        out_table = os.path.dirname(in_network) + "/oVC_EX_Table.txt"
        np.savetxt(out_table,
                   columns,
                   delimiter=",",
                   header="FID, oVC_EX",
                   comments="")

        ovc_table = scratch + "/ovc_ex_table"
        arcpy.CopyRows_management(out_table, ovc_table)
        arcpy.JoinField_management(in_network, "FID", ovc_table, "FID",
                                   "oVC_EX")
        arcpy.Delete_management(out_table)

        del out, fid, columns, out_table, ovc_table

    else:
        raise Exception("either historic or existing must be selected")

    return
Exemplo n.º 16
0
    'ISO3final "ISO3final" true true false 50 Text 0 0 ,First,#,wdpa_flat_1km2_final,ISO3final,-1,-1;AREA_GEO "AREA_GEO" true true false 8 Double 0 0 ,First,#,wdpa_flat_1km2_final,AREA_GEO,-1,-1;ORIG_FID "ORIG_FID" true true false 4 Long 0 0 ,First,#,wdpa_flat_1km2_final,ORIG_FID,-1,-1;nodeID "nodeID" true true false 4 Long 0 0 ,First,#,wdpa_flat_1km2_final,nodeID,-1,-1;Shape_Leng "Shape_Leng" false true true 8 Double 0 0 ,First,#,wdpa_flat_1km2_final,Shape_Length,-1,-1;Shape_Area "Shape_Area" false true true 8 Double 0 0 ,First,#,wdpa_flat_1km2_final,Shape_Area,-1,-1',
    config_keyword="")

print("wdpa_flat_1km2_final exported in Geopackage")

# Process: Generate Near Table (edit output name before running)
print(
    "Now generating Near Table. If you want to do it in postgis, kill the script now and execute in docker the script /globes/USERS/GIACOMO/protconn/scripts/exec_generate_near_table_country.sh"
)
arcpy.GenerateNearTable_analysis(wdpa_flat_1km2_final, wdpa_flat_1km2_final,
                                 All_distances_300km_Nov2019, "300 Kilometers",
                                 "NO_LOCATION", "NO_ANGLE", "ALL", "0",
                                 "GEODESIC")
print("Near Table generated")

# Process: Copy Rows
arcpy.CopyRows_management(All_distances_300km_Nov2019, outfile_dist)
print("Near Table exported in .txt")

print('-------------------------------------------------------')
endtime = datetime.now()
totaltime = endtime - firststarttime
print(' ')
print('PROCEDURE COMPLETED. Elapsed time: ', totaltime)
print(
    'If needed, now execute in docker the script "/globes/USERS/GIACOMO/protconn/scripts/gis_proc/exec_generate_near_table_country.sh" '
)
print(' ')

sys.exit()
Exemplo n.º 17
0
    currentOverwriteOutput = env.overwriteOutput
    env.overwriteOutput = True

    scratchWS = env.scratchWorkspace
    if scratchWS == None:
        scratchWS = r'in_memory'

    scratchTable = os.path.join(scratchWS, "cc_temp")
    delete_me.append(scratchTable)

    # Local variables:
    intermed = Output_Table

    # Process: Copy Rows
    arcpy.CopyRows_management(Input_Table, Output_Table, "")

    # Process: Add Unique Row ID
    arcpy.gp.AddUniqueRowID(Output_Table, "JoinID")

    # Process: Convert Coordinate Notation (GARS)
    arcpy.AddMessage("Converting & appending GARS ...")
    arcpy.ConvertCoordinateNotation_management(
        intermed, scratchTable,
        X_Field__Longitude__UTM__MGRS__USNG__GARS__GeoRef_, Y_Field__Latitude_,
        Input_Coordinate_Format, "GARS", "JoinID", Spatial_Reference)
    arcpy.JoinField_management(intermed, "JoinID", scratchTable, "JoinID",
                               "GARS")

    # Process: Convert Coordinate Notation (DD)
    arcpy.AddMessage("Converting & appending Decimal Degrees ...")
    arcpy.Statistics_analysis(LU_PLUS_SOILS, RCN_Stats2, "WGT_RCN sum", "")
    wgtrows = arcpy.SearchCursor(RCN_Stats2)
    wgtrow = wgtrows.next()
    wgtRCN = wgtrow.SUM_WGT_RCN
    AddMsgAndPrint(
        "\n\tWeighted Average Runoff Curve No. for " + str(wsName) + " is " +
        str(int(wgtRCN)), 0)

    del wsArea
    del rows
    del row
    del wgtrows
    del wgtrow

    # Export RCN Summary Table
    arcpy.CopyRows_management(LU_PLUS_SOILS, RCN_TABLE)

    # Delete un-necessary fields from summary table
    arcpy.DeleteField_management(
        RCN_TABLE, "VALUE;COUNT;SOILS;HYD_CODE;HYD_CODE;WGT_RCN")

    # ------------------------------------------------------------------ Pass results to user watershed
    AddMsgAndPrint("\nAdding RCN results to " + str(wsName) + "'s attributes",
                   0)
    if not len(arcpy.ListFields(watershed, "RCN")) > 0:
        arcpy.AddField_management(watershed, "RCN", "LONG", "", "", "", "",
                                  "NULLABLE", "NON_REQUIRED", "")
    arcpy.CalculateField_management(watershed, "RCN", "" + str(wgtRCN) + "",
                                    "PYTHON")

    del wgtRCN
Exemplo n.º 19
0
def main(in_network,
         region,
         basin_elev,
         basin_long,
         basin_jan_precip,
         basin_lat,
         Qlow_eqtn=None,
         Q2_eqtn=None):
    """
    if region is None or region == "None":
        region = 0
    else:
        region = int(region)
    if Qlow_eqtn == "None":
        Qlow_eqtn = None
    if Q2_eqtn == "None":
        Q2_eqtn = None
    """

    scratch = 'in_memory'

    arcpy.env.overwriteOutput = True

    # set basin specific variables
    ELEV_FT = float(basin_elev)
    LONGITUDE = float(basin_long)
    LATITUDE = float(basin_lat)
    JAN_PRECIP = float(basin_jan_precip)

    # create segid array for joining output to input network
    segid_np = arcpy.da.FeatureClassToNumPyArray(in_network, "ReachID")
    segid = np.asarray(segid_np, np.int64)

    # create array for input network drainage area ("iGeo_DA")
    DA_array = arcpy.da.FeatureClassToNumPyArray(in_network, "iGeo_DA")
    DA = np.asarray(DA_array, np.float32)

    # convert drainage area (in square kilometers) to square miles
    # note: this assumes that streamflow equations are in US customary units (e.g., inches, feet)
    DAsqm = np.zeros_like(DA)
    DAsqm = DA * 0.3861021585424458

    # create Qlow and Q2
    Qlow = np.zeros_like(DA)
    Q2 = np.zeros_like(DA)

    arcpy.AddMessage("Adding Qlow and Q2 to network...")
    """
    # --regional curve equations for Qlow (baseflow) and Q2 (annual peak streamflow)--
    # # # Add in regional curve equations here # # #
    if Qlow_eqtn is not None:
        Qlow = eval(Qlow_eqtn)
    elif region == 101:  # example 1 (box elder county)
        Qlow = 0.019875 * (DAsqm ** 0.6634) * (10 ** (0.6068 * 2.04))
    elif region == 102:  # example 2 (upper green generic)
        Qlow = 4.2758 * (DAsqm ** 0.299)
    elif region == 24:  # oregon region 5
        Qlow = 0.000133 * (DAsqm ** 1.05) * (15.3 ** 2.1)
    elif region == 1: #Truckee
        ELEV_FT = 6027.722
        PRECIP_IN = 23.674
        Qlow = (10**-7.2182) * (DAsqm**1.013) * (ELEV_FT**01.1236) * (PRECIP_IN**1.4483)
    else:
        Qlow = (DAsqm ** 0.2098) + 1

    if Q2_eqtn is not None:
        Q2 = eval(Q2_eqtn)
    elif region == 101:  # example 1 (box elder county)
        Q2 = 14.5 * DAsqm ** 0.328
    elif region == 102:  # example 2 (upper green generic)
        Q2 = 22.2 * (DAsqm ** 0.608) * ((42 - 40) ** 0.1)
    elif region == 24:  # oregon region 5
        Q2 = 0.000258 * (DAsqm ** 0.893) * (15.3 ** 3.15)
    elif region == 1: #Truckee
        PRECIP_IN = 23.674
        Q2 = 0.0865*(DAsqm**0.736)*(PRECIP_IN**1.59)
    else:
        Q2 = 14.7 * (DAsqm ** 0.815)
    """

    # set regional regression equations
    if region is None:
        region = 0
    if Qlow_eqtn is not None:
        Qlow = eval(Qlow_eqtn)
    elif float(region) == 0:
        Qlow = (DAsqm**0.2098) + 1  # default
    elif float(region) == 21:
        Qlow = (10**-0.695) * (DAsqm**1.093)  # Rocky Mountains
    elif float(region) == 22:
        Qlow = (10**-0.324) * (DAsqm**0.885
                               )  # Central Basin and Northern Plains
    elif float(region) == 25:
        Qlow = (10**-0.301) * (DAsqm**1.008)  # Overthrust belt
    elif float(region) == 26:
        Qlow = (10**-2.277) * (DAsqm**1.308)  # High desert
    else:
        arcpy.AddMessage(
            "WARNING: Region is not part of project area. Quitting iHyd script."
        )
        return

    if Q2_eqtn is not None:
        Q2 = eval(Q2_eqtn)
    elif float(region) == 0:
        Q2 = 14.7 * (DAsqm**0.815)  # default
    elif float(region) == 21:
        Q2 = 0.313 * (DAsqm**0.866) * (((ELEV_FT - 3000) / 1000)**2.32) * (
            (LONGITUDE - 100)**-0.069)  # Rocky Mountains
    elif float(region) == 22:
        Q2 = 29.9 * (DAsqm**0.475)  # Central Basin and Northern Plains
    elif float(region) == 25:
        Q2 = 3.07 * (DAsqm**0.869) * (JAN_PRECIP**0.884)  # Overthrust belt
    elif float(region) == 26:
        Q2 = 22.2 * (DAsqm**0.608) * ((LATITUDE - 40)**-1.24)  # High desert
    else:
        arcpy.AddMessage(
            "WARNING: Region is not part of project area. Quitting iHyd script."
        )
        return

    # save segid, Qlow, Q2 as single table
    columns = np.column_stack((segid, Qlow, Q2))
    tmp_table = os.path.dirname(in_network) + "/ihyd_Q_Table.txt"
    np.savetxt(tmp_table,
               columns,
               delimiter=",",
               header="ReachID, iHyd_QLow, iHyd_Q2",
               comments="")
    ihyd_table = scratch + "/ihyd_table"
    arcpy.CopyRows_management(tmp_table, ihyd_table)

    # join Qlow and Q2 output to the flowline network
    # create empty dictionary to hold input table field values
    tblDict = {}
    # add values to dictionary
    with arcpy.da.SearchCursor(ihyd_table,
                               ['ReachID', "iHyd_QLow", "iHyd_Q2"]) as cursor:
        for row in cursor:
            tblDict[row[0]] = [row[1], row[2]]
    # check for and delete if output fields already included in flowline network
    remove_existing_output(in_network)
    # populate flowline network out field
    arcpy.AddField_management(in_network, "iHyd_QLow", 'DOUBLE')
    arcpy.AddField_management(in_network, "iHyd_Q2", 'DOUBLE')
    with arcpy.da.UpdateCursor(in_network,
                               ['ReachID', "iHyd_QLow", "iHyd_Q2"]) as cursor:
        for row in cursor:
            try:
                aKey = row[0]
                row[1] = tblDict[aKey][0]
                row[2] = tblDict[aKey][1]
                cursor.updateRow(row)
            except:
                pass
    tblDict.clear()

    # delete temporary table
    arcpy.Delete_management(tmp_table)

    # check that Q2 is greater than Qlow
    # if not, re-calculate Q2 as Qlow + 0.001
    with arcpy.da.UpdateCursor(in_network, ["iHyd_QLow", "iHyd_Q2"]) as cursor:
        for row in cursor:
            if row[1] < row[0]:
                row[1] = row[0] + 0.001
            else:
                pass
            cursor.updateRow(row)

    arcpy.AddMessage("Adding stream power to network...")

    # calculate Qlow and Q2 stream power
    # where stream power = density of water (1000 kg/m3) * acceleration due to gravity (9.80665 m/s2) * discharge (m3/s) * channel slope
    # note: we assume that discharge ("iHyd_QLow", "iHyd_Q2") was calculated in cubic feet per second and handle conversion to cubic
    #       meters per second (e.g., "iHyd_QLow" * 0.028316846592
    arcpy.AddField_management(in_network, "iHyd_SPLow", "DOUBLE")
    arcpy.AddField_management(in_network, "iHyd_SP2", "DOUBLE")
    with arcpy.da.UpdateCursor(
            in_network,
        ["iGeo_Slope", "iHyd_QLow", "iHyd_SPLow", "iHyd_Q2", "iHyd_SP2"
         ]) as cursor:
        for row in cursor:
            if row[0] < 0.001:
                slope = 0.001
            else:
                slope = row[0]
            row[2] = (1000 * 9.80665) * slope * (row[1] * 0.028316846592)
            row[4] = (1000 * 9.80665) * slope * (row[3] * 0.028316846592)
            cursor.updateRow(row)

    makeLayers(in_network)
Exemplo n.º 20
0
    """ Create report file for each metric """
    tmpName = city + '_Final_Combo_' + time.strftime('%Y%m%d_%H-%M')
    reportfileName = reportfileDir + '/' + tmpName  + '.txt'
    reportFile = open(reportfileName, 'w')

    """ Write out first line of report file """
    reportFile.write('Begin with the final tables the EnviroAtlas community.--' + time.strftime('%Y%m%d--%H%M%S') + '--\n')
    print 'Final Combination Start Time: ' + time.asctime()
    #-------------------------------------------------------------------------

    #-------- CREATE FINAL iTREE TABLE ---------------------------------------
    """ Copy Table to Final """
    if arcpy.Exists(iTreeDir + '/' + city + '_iTree') == False:
        print 'The iTree Table does not exist.'
        exit
    arcpy.CopyRows_management(iTreeDir + '/' + city + '_iTree', finalDir + '/' + city + '_iTree')

    """ Check and/or make MaxTempReduction Fields """
    # Current Fields
    alliTreeFields = [f.name for f in arcpy.ListFields(finalDir + '/' + city + '_iTree')]

    maxTempField = filter((lambda x: x.lower().startswith("maxtempreduction_")),alliTreeFields)
    maxTempNightField = filter((lambda x: x.lower().startswith("maxtempreductionnight_")),alliTreeFields)

    if maxTempField:
        print('Adding and calculating maxtempreduction and maxtempreductionnight fields...')
        arcpy.AddField_management(finalDir + '/' + city + '_iTree', 'maxtempreduction')
        arcpy.AddField_management(finalDir + '/' + city + '_iTree', 'maxtempreductionnight')
        arcpy.CalculateField_management(finalDir + '/' + city + '_iTree','maxtempreduction', '!' + str(maxTempField[0]) + '!', 'PYTHON_9.3')
        arcpy.CalculateField_management(finalDir + '/' + city + '_iTree','maxtempreductionnight', '!' + str(maxTempNightField[0]) + '!', 'PYTHON_9.3')
    else:
Exemplo n.º 21
0
    if line1.Rank_UGO != line2.Rank_UGO:
        # At the end of an UGO, set the last point of this UGO as the UGO (line3) length and the first
        # point of the next UGO as 0
        line3 = rows3.next()
        line1.MEAS = line3.Shape_Length
        line2.MEAS = 0
        rows1.updateRow(line1)
        rows2.updateRow(line2)

    n += 1

#/split of the inflection line at inflection points
arcpy.AddMessage(
    "    Preparing the split at inflection points of the final inflection line - 7/9"
)
PtsForInflLineDisaggregationTABLE = arcpy.CopyRows_management(
    InflectionPtTABLE, "%ScratchWorkspace%\\PtsForInflLineDisaggregationTABLE")
rows1 = arcpy.UpdateCursor(PtsForInflLineDisaggregationTABLE)
rows2 = arcpy.UpdateCursor(PtsForInflLineDisaggregationTABLE)
line2 = rows2.next()
nrows = int(str(arcpy.GetCount_management(PtsForInflLineDisaggregationTABLE)))

n = 0
for line1 in rows1:
    if n >= nrows - 1:
        rows1.deleteRow(line1)
        rows1.updateRow(line1)
        break
    line2 = rows2.next()
    if line1.Rank_UGO != line2.Rank_UGO:
        rows1.deleteRow(line1)
        rows1.updateRow(line1)
def tableToPolyline(inputTable,
                    inputCoordinateFormat,
                    inputXField,
                    inputYField,
                    outputPolylineFeatures,
                    inputLineField,
                    inputSortField,
                    inputSpatialReference):
    '''
    Converts a table of vertices to one or more polyline features.
    
    inputTable - input table, each row is a vertex
    inputCoordinateFormat - coordinate notation format of input vertices
    inputXField - field in inputTable for vertex x-coordinate, or full coordinate
    inputYField - field in inputTable for vertex y-coordinate, or None
    outputPolylineFeatures - polyline feature class to create
    inputLineField - field in inputTable to identify separate polylines
    inputSortField - field in inputTable to sort vertices
    inputSpatialReference - spatial reference of input coordinates
    
    returns polyline feature class
    
    inputCoordinateFormat must be one of the following:
    * DD_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DD_2: Longitude and latitude values are in two separate fields.
    * DDM_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DDM_2: Longitude and latitude values are in two separate fields.
    * DMS_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DMS_2: Longitude and latitude values are in two separate fields.
    * GARS: Global Area Reference System. Based on latitude and longitude, it divides and subdivides the world into cells.
    * GEOREF: World Geographic Reference System. A grid-based system that divides the world into 15-degree quadrangles and then subdivides into smaller quadrangles.
    * UTM_ZONES: The letter N or S after the UTM zone number designates only North or South hemisphere.
    * UTM_BANDS: The letter after the UTM zone number designates one of the 20 latitude bands. N or S does not designate a hemisphere.
    * USNG: United States National Grid. Almost exactly the same as MGRS but uses North American Datum 1983 (NAD83) as its datum.
    * MGRS: Military Grid Reference System. Follows the UTM coordinates and divides the world into 6-degree longitude and 20 latitude bands, but MGRS then further subdivides the grid zones into smaller 100,000-meter grids. These 100,000-meter grids are then divided into 10,000-meter, 1,000-meter, 100-meter, 10-meter, and 1-meter grids.
     
    '''
    try:
        deleteme = []
        joinFieldName = "JoinID"
        scratch = '%scratchGDB%'
        if env.scratchWorkspace:
            scratch = env.scratchWorkspace
            
        inputSpatialReference = _checkSpatialRef(inputSpatialReference)
        
        copyRows = os.path.join(scratch, "copyRows")
        arcpy.CopyRows_management(inputTable, copyRows)
        addUniqueRowID(copyRows, joinFieldName)
        
        copyCCN = os.path.join(scratch, "copyCCN")
        arcpy.ConvertCoordinateNotation_management(copyRows,
                                                   copyCCN,
                                                   inputXField,
                                                   inputYField,
                                                   inputCoordinateFormat,
                                                   "DD_NUMERIC",
                                                   joinFieldName,
                                                   inputSpatialReference)
        
        arcpy.PointsToLine_management(copyCCN,
                                      outputPolylineFeatures,
                                      inputLineField,
                                      inputSortField,
                                      "CLOSE")
        
        return outputPolylineFeatures
    
    except arcpy.ExecuteError:
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        print(msgs)

    except:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # Print Python error messages for use in Python / Python Window
        print(pymsg + "\n")
        print(msgs)
        
    finally:
        if len(deleteme) > 0:
            # cleanup intermediate datasets
            if debug == True: arcpy.AddMessage("Removing intermediate datasets...")
            for i in deleteme:
                if debug == True: arcpy.AddMessage("Removing: " + str(i))
                arcpy.Delete_management(i)
            if debug == True: arcpy.AddMessage("Done")
def exportrastab(rasdat, out_table):
    if not Raster(rasdat).hasRAT:
        print('Create raster attribute table for {}...'.format(rasdat))
        arcpy.BuildRasterAttributeTable_management(rasdat)
    arcpy.CopyRows_management(rasdat, out_table)
def convert(costpath, file_name_1, file_name_2, name_1, name_2):
    try:
        arcpy.RasterToPolyline_conversion(
            costpath,
            directory + '\polylines\pl_' + file_name_1 + '_' + file_name_2,
            "ZERO", 10, "SIMPLIFY")
        distance = 0
        with arcpy.da.SearchCursor(
                directory + '\polylines\pl_' + file_name_1 + '_' +
                file_name_2 + '.shp', ['SHAPE@LENGTH']) as poly_cursor:
            for row in poly_cursor:
                distance += row[0]  # sum distance for each polyline segment
    except arcpy.ExecuteError:
        error = arcpy.GetMessages(2)
        str_error = str(error)
        if str_error.startswith('ERROR 010151'):
            print(
                '\nCannot convert cost path raster between ' + loc_one_name +
                ' and ' + loc_two_name +
                ' to a valid polyline, but rest of data should be saved properly.  Source and destination may be too'
                'close to each other.')
            print(
                'Linear distance between source and destination set to zero in output table.'
            )
            print(str(error))
            log.write(
                asctime() + ': Cannot convert cost path raster between ' +
                loc_one_name + ' and ' + loc_two_name +
                ' to a valid polyline, but rest of data should be saved properly.\n'
                +
                'Linear distance between source and destination set to zero in output table.\n'
                + str(error) +
                '------------------------------------------------------------------------------------------'
                + '\n')
            distance = 0
        else:
            print(
                '\nCannot convert cost path raster between ' + loc_one_name +
                ' and ' + loc_two_name +
                ' to a valid polyline, but rest of data should be saved properly.'
            )
            print(
                'Linear distance between source and destination not calculated.'
            )
            print(str(error))
            log.write(
                asctime() + ': Cannot convert cost path raster between ' +
                loc_one_name + ' and ' + loc_two_name +
                ' to a valid polyline, but rest of data should be saved properly.\n'
                +
                'Linear distance between source and destination not calculated.\n'
                + str(error) +
                '------------------------------------------------------------------------------------------'
                + '\n')
            distance = 'NA'
    except Exception as error:
        print('\nCannot convert cost path raster between ' + loc_one_name +
              ' and ' + loc_two_name + ' to a valid polyline.')
        print('Linear distance between source and destination not calculated.')
        print(str(error))
        log.write(
            asctime() + ': Cannot convert cost path raster between ' +
            loc_one_name + ' and ' + loc_two_name +
            ' to a valid polyline, but rest of data should be saved properly.\n'
            +
            'Linear distance between source and destination not calculated.\n'
            + str(error) +
            '------------------------------------------------------------------------------------------'
            + '\n')
        distance = 0

    try:
        arcpy.AddField_management(costpath, 'Source', 'TEXT')
        arcpy.AddField_management(costpath, 'Dest', 'TEXT')
        arcpy.AddField_management(costpath, 'Distance', 'FLOAT')
        arcpy.CalculateField_management(costpath, 'Source', "'" + name_1 + "'")
        arcpy.CalculateField_management(costpath, 'Dest', "'" + name_2 + "'")
        arcpy.CalculateField_management(costpath, 'Distance', distance)
        arcpy.MakeTableView_management(costpath, 'table')
        with arcpy.da.SearchCursor(
                'table',
            ['SOURCE', 'DEST', 'PATHCOST', 'DISTANCE', 'STARTROW'
             ]) as table_cursor:
            for entry in table_cursor:
                if entry[4] != 0:
                    in_cursor = arcpy.da.InsertCursor(table, fields)
                    in_cursor.insertRow(
                        (str(entry[0]), str(entry[1]), entry[2], entry[3]))
                    del in_cursor

        if int_data is True:
            try:
                arcpy.CopyRows_management(
                    costpath, directory + r'\tables\tb_' + file_name_1 + '_' +
                    file_name_2 + '.csv')
            except Exception as error:
                print('\nFailed to save data for cost path between ' +
                      loc_one_name + ' and ' + loc_two_name +
                      ' in .csv table. See error message for more details.')
                print(
                    'Linear distance between source and destination not calculated.'
                )
                print(str(error))
                log.write(
                    asctime() +
                    ': Failed to save data for cost path between ' +
                    loc_one_name + ' and ' + loc_two_name +
                    ' in .csv table. See error message for more details.\n' +
                    str(error) +
                    '------------------------------------------------------------------------------------------'
                    + '\n')

            try:
                costpath.save(directory + r'\costpath\cp_' + file_name_1 +
                              '_' + file_name_2)
            except Exception as error:
                str_error = str(error)
                if str_error.startswith('ERROR 010240'):
                    print('\nCould not save cost path raster cp_' +
                          file_name_1 + '_' + file_name_2 +
                          ', but rest of data should be saved properly.')
                    print(
                        'Combination of file names for fc one and fc two likely exceeds 13 characters. '
                        'See help file for more information.')
                    log.write(
                        asctime() + ': Could not save cost path raster cp_' +
                        file_name_1 + '_' + file_name_2 +
                        ', but rest of data should be saved properly.\n' +
                        'Combination of file names for fc one and fc two likely exceed 13 characters. '
                        'See help file for more information.\n' + str(error) +
                        '\n' +
                        '----------------------------------------------------'
                        '--------------------------------------' + '\n')
                else:
                    print(
                        '\nCould not save cost path raster cp_' + file_name_1 +
                        '_' + file_name_2 +
                        ', but rest of data should be saved properly. See error message for more details'
                    )
                    print(str(error))
                    log.write(
                        asctime() + ': Could not save cost path raster cp_' +
                        file_name_1 + '_' + file_name_2 +
                        ', but rest of data should be saved properly. See error message for more details.\n'
                        +
                        '-------------------------------------------------------'
                        '-----------------------------------' + '\n')
    except arcpy.ExecuteError:
        error = arcpy.GetMessages(2)
        print('\nFailed to properly save data for least cost path between ' +
              loc_one_name + ' and ' + loc_two_name +
              ' in master table. Script will continue with next iteration(1).')
        print(str(error))
        log.write(
            asctime() +
            ': Failed to properly save data for least cost path between ' +
            loc_one_name + ' and ' + loc_two_name +
            ' in master table. Script continued with next iteration.' + '.\n' +
            str(error) +
            '------------------------------------------------------------------------------------------'
            + '\n')
    except Exception as error:
        print('\nFailed to properly save data for least cost path between ' +
              loc_one_name + ' and ' + loc_two_name +
              ' in master table. Script will continue with next iteration(2).')
        print(str(error))
        log.write(
            asctime() +
            ': Failed to properly save data for least cost path between ' +
            loc_one_name + ' and ' + loc_two_name +
            ' in master table. Script continued with next iteration.' + '.\n' +
            str(error) +
            '------------------------------------------------------------------------------------------'
            + '\n')
Exemplo n.º 25
0
for ab in abbr: 
    arcpy.Select_analysis(outWorkspace + '\Clumped_Cluster_{}.shp"'.format(ab), outWorkspace + '\Dispersed_Input_{}.shp"'.format(ab),'"CLUSTER_ID" = -1')

for ab in abbr:
    arcpy.stats.DensityBasedClustering(outWorkspace + '\Dispersed_Input_{}.shp"'.format(ab), 
    outWorkspace + '\Dispersed_Cluster_{}.shp"'.format(ab),
    "OPTICS", minFeatures2, srcDistance2, "")

for ab in abbr: 
    arcpy.Select_analysis(outWorkspace + '\Dispersed_Cluster_{}.shp"'.format(ab), outWorkspace + '\Random_Points{}.shp"'.format(ab),'"CLUSTER_ID" = -1')

tableList = arcpy.ListTables
for dbaseTable in tableList(): # check if there is a way to only select certain tables - don't need inputs, just outputs
    if "Random_points" in tableList():
        outTable = os.path.join(outWorkspace, os.path.splitext(dbaseTable)[0])
        arcpy.CopyRows_management(dbaseTable, outTable.csv)
    else: 
        print("Table could not be found.")

# try excepts: arcgisscripting.ExecuteError: ERROR 110141: The Minimum Number of Features per Cluster is greater than the number of features in the dataset.Failed to execute (DensityBasedClustering).

# these files can then be read back in and counted for the results ie. 
print("************************************************************************")

total_random_points = []
total_clumped_clusters = []
total_dispersed_clusters = []

with open(file_path + "random_point_rows.csv","r") as csv_file: # wasn't recognizing file in same folder, had to add file path
    csv_reader = csv.reader(csv_file, delimiter=',') 
for lines in csv_reader: 
Exemplo n.º 26
0
arcpy.env.overwriteOutput = True

infilename = arcpy.GetParameterAsText(0)  #as CSV
infile = arcpy.GetParameter(0)
outfile = arcpy.GetParameterAsText(1)  #as GDB
outname = outfile + '\\AllPoints'
csvFile = os.path.basename(infilename)
spRef = arcpy.SpatialReference(
    "NAD 1983 StatePlane Missouri East FIPS 2401 (US Feet)")

if arcpy.Exists(outfile) == False:
    arcpy.AddMessage("Creating GDB...")
    arcpy.CreateFileGDB_management(os.path.dirname(outfile),
                                   os.path.basename(outfile))

arcpy.AddMessage("Copying Rows...")
for inputs in infile:
    arcpy.AddMessage(inputs)
    if arcpy.Exists(outname) == False:
        arcpy.CopyRows_management(csvFile, outname)
    else:
        arcpy.Append_management(inputs, outfile + '/AllPoints', 'NO_TEST', '',
                                '')

arcpy.AddMessage("Making Point Features...")
arcpy.MakeXYEventLayer_management(outname, "XCoord", "YCoord", "Temp_Points",
                                  spRef, "")
arcpy.FeatureClassToFeatureClass_conversion("Temp_Points", outfile,
                                            'STL_CRIME_POINTS')
arcpy.Delete_management(outname)
Exemplo n.º 27
0
        if row[3] is None: #Transfer AADT_avg to AADT_interp
            row[3] = row[2]
        cursor.updateRow(row)

#For all non-arterial roads that do not already have an AADT value, assign 1000
arcpy.JoinField_management(roadstraffic_avg, 'CUSTOM_ID', 'routes_loc', 'CUSTOM_ID', 'AADT_interp')
with arcpy.da.UpdateCursor(roadstraffic_avg, ['ARTDESCRIP','AADT_avg','AADT_interp']) as cursor:
    for row in cursor:
        if row[1] is not None:
            row[2] = row[1]
        if row[1] is None and row[2] is None and row[0] in ['','Not Designated']:
            row[2]=1000
        cursor.updateRow(row)

#Write out individual roads' attributes
arcpy.CopyRows_management(roadstraffic_avg, path.join(rootdir, 'results/Seattle_roads.dbf'))

########################################################################################################################

########################################################################################################################
# CREATE HEATMAPS OF SPEEDLIMIT, AADT, and BING
# Use a logarithmic decay function to 'simulate' the pollution spread of various levels of traffic volume, speed,
# and congestion
########################################################################################################################
res = arcpy.GetRasterProperties_management(path.join(rootdir,'results/bing/180620_09_30_class_mlc.tif'), 'CELLSIZEX')
#SPEED LIMIT
arcpy.PolylineToRaster_conversion(roadstraffic_avg, value_field='SPEEDLIMIT', out_rasterdataset='Seattle_spdlm', priority_field='SPEEDLIMIT',cellsize=res)
heat_spdlm = FocalStatistics(path.join(gdb,'Seattle_spdlm'), neighborhood=NbrWeight('C:/Mathis/ICSL/stormwater/results/logkernel100.txt'),
                             statistics_type='SUM', ignore_nodata='DATA') #It seems that full paths are needed to make this work
heat_spdlm.save('heat_spdlm')
heat_spdlm_int = Int(Raster('heat_spdlm')+0.5) #Constantly result in overall python crash?
Exemplo n.º 28
0
destination_view = os.path.join(arcpy.env.scratchGDB, 'destination_table_view')
print(destination_view)

#zones to calculate stats on
#inpoly = r"E:/Guyana Priority Areas - Backup/Planning Units/hex_5km_pu_wgs84_102218.shp"
inpoly = r"E:/Guyana Priority Areas - Backup/Planning Units/North_Rupununi/nrup_pu_3119_1km_hex_wgs84_ed.shp"
print(inpoly)

#where to save output files
destination_path = r"E:/test/tables"
print(destination_path)

#list of rasters to calculate statistics on
rasters = arcpy.ListRasters("*", "TIF")

for raster in rasters:
    print(raster)
    destination_raster = os.path.join(destination_path, raster)[:-4] + '.dbf'
    arcpy.gp.ZonalStatisticsAsTable_sa(inpoly, "PU_ID", raster,
                                       destination_raster, "DATA", "SUM")

    destination_csv = os.path.join(destination_path, raster)[:-4] + '.csv'
    field_info = arcpy.FieldInfo()
    for field in arcpy.ListFields(destination_raster):
        field_info.addField(field.name, field.name, "VISIBLE", "")
    arcpy.MakeTableView_management(destination_raster, destination_view, "",
                                   "", field_info)
    arcpy.CopyRows_management(destination_view, destination_csv)

arcpy.CheckInExtension("Spatial")
def tableToLineOfBearing(inputTable, inputCoordinateFormat, inputXField,
                         inputYField, inputBearingUnits, inputBearingField,
                         inputDistanceUnits, inputDistanceField,
                         outputLineFeatures, inputLineType,
                         inputSpatialReference):
    '''
    Tool method for converting a table of starting points, bearings, and distances
    to line features.
    
    inputTable - input table, each row will be a separate line feature in output
    inputCoordinateFormat - coordinate notation format of input vertices
    inputXField - field in inputTable for vertex x-coordinate, or full coordinate
    inputYField - field in inputTable for vertex y-coordinate, or None
    inputBearingUnits -
    inputBearingField -
    inputDistanceUnits -
    inputDistanceField -
    outputLineFeatures - polyline feature class to create
    inputLineType - 
    inputSpatialReference - spatial reference of input coordinates
    
    returns polyline feature class
    
    inputCoordinateFormat must be one of the following:
    * DD_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DD_2: Longitude and latitude values are in two separate fields.
    * DDM_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DDM_2: Longitude and latitude values are in two separate fields.
    * DMS_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DMS_2: Longitude and latitude values are in two separate fields.
    * GARS: Global Area Reference System. Based on latitude and longitude, it divides and subdivides the world into cells.
    * GEOREF: World Geographic Reference System. A grid-based system that divides the world into 15-degree quadrangles and then subdivides into smaller quadrangles.
    * UTM_ZONES: The letter N or S after the UTM zone number designates only North or South hemisphere.
    * UTM_BANDS: The letter after the UTM zone number designates one of the 20 latitude bands. N or S does not designate a hemisphere.
    * USNG: United States National Grid. Almost exactly the same as MGRS but uses North American Datum 1983 (NAD83) as its datum.
    * MGRS: Military Grid Reference System. Follows the UTM coordinates and divides the world into 6-degree longitude and 20 latitude bands, but MGRS then further subdivides the grid zones into smaller 100,000-meter grids. These 100,000-meter grids are then divided into 10,000-meter, 1,000-meter, 100-meter, 10-meter, and 1-meter grids.
    
    inputBearingUnits must be one of the following:
    * DEGREES
    * MILS
    * RADS
    * GRAD
    
    inputDistanceUnits must be one of the following:
    * METERS
    * KILOMETERS
    * MILES
    * NAUTICAL_MILES
    * FEET
    * US_SURVEY_FEET
    
    inputLineType must be one of the following:
    * GEODESIC:
    * GREAT_CIRCLE:
    * RHUMB_LINE:
    * NORMAL_SECTION:
    
    '''
    try:
        env.overwriteOutput = True

        deleteme = []
        joinFieldName = "JoinID"
        scratch = '%scratchGDB%'
        if env.scratchWorkspace:
            scratch = env.scratchWorkspace

        inputSpatialReference = _checkSpatialRef(inputSpatialReference)

        copyRows = os.path.join(scratch, "copyRows")
        arcpy.CopyRows_management(inputTable, copyRows)
        originalTableFieldNames = _tableFieldNames(inputTable,
                                                   joinExcludeFields)
        addUniqueRowID(copyRows, joinFieldName)

        arcpy.AddMessage("Formatting start point...")
        copyCCN = os.path.join(scratch, "copyCCN")
        arcpy.ConvertCoordinateNotation_management(copyRows, copyCCN,
                                                   inputXField, inputYField,
                                                   inputCoordinateFormat,
                                                   "DD_NUMERIC", joinFieldName,
                                                   inputSpatialReference)

        arcpy.AddMessage("Creating lines as {0}...".format(inputLineType))
        arcpy.BearingDistanceToLine_management(
            copyCCN, outputLineFeatures, "DDLon", "DDLat", inputDistanceField,
            inputDistanceUnits, inputBearingField, inputBearingUnits,
            inputLineType, joinFieldName, inputSpatialReference)

        #Join original table fields to output
        arcpy.AddMessage(
            "Joining fields from input table to output line features...")
        arcpy.JoinField_management(outputLineFeatures, joinFieldName, copyRows,
                                   joinFieldName, originalTableFieldNames)
        arcpy.DeleteField_management(outputLineFeatures, [joinFieldName])

        return outputLineFeatures

    except arcpy.ExecuteError:
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        print(msgs)

    except:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(
            sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # Print Python error messages for use in Python / Python Window
        print(pymsg + "\n")
        print(msgs)

    finally:
        if len(deleteme) > 0:
            # cleanup intermediate datasets
            if debug == True:
                arcpy.AddMessage("Removing intermediate datasets...")
            for i in deleteme:
                if debug == True: arcpy.AddMessage("Removing: " + str(i))
                arcpy.Delete_management(i)
            if debug == True: arcpy.AddMessage("Done")
Exemplo n.º 30
0
        file.write(str(time.ctime()) +": dissolved bikes"+ "\n")
    except Exception as e:
        print(e)
        file.write(str(time.ctime()) +": FAILED TO PROCESS bike counts"+ "\n")

################################################################################################
        
    #STEP THREE
    # GEOPROCESSING: PED AND BIKE FATALITIES

    # PED 
    # Ped fatalities
    try:
        switrs_mvmt_csv = "C:\\ETLs\\TIM\\TIMUpdates\\Helper Files\\switrs_mvmt.csv"
        switrs_mvmt_codes = "C:\\ETLs\\TIM\\TIMUpdates\\Helper Files\\switrs_mvmt.dbf"
        arcpy.CopyRows_management (switrs_mvmt_csv, switrs_mvmt_codes)
        print "SWITRS codes loaded"
    except:
        file.write(str(time.ctime()) +": FAILED TO PROCESS ped fatalities - SWITRS codes loading failed"+ "\n")
        print "SWITRS codes not loaded"

    try:
        print "Filtering for ped fatalities..."
        # Create shapefile: pedestrian level, fatalities only
        ped_f = "pedcollisions_party_ped_fatal"
        arcpy.FeatureClassToFeatureClass_conversion (staging_gdb + pedcol_party, staging_gdb, ped_f, """ "party_type" = 'Pedestrian' AND "party_number_killed" <> 0 """)
        file.write(str(time.ctime()) +": copied FC - ped fatal"+ "\n")
        print("Success")
    except Exception as e:
        print(e)
        file.write(str(time.ctime()) +": FAILED TO PROCESS ped fatalities - feature class to feature class conversion (ped level fatalities)"+ "\n")