def createOutput(self, outputTable):
        """Creates Moran's I Step Output Table.

        INPUTS
        outputTable (str): path to the output table
        """

        #### Allow Overwrite Output ####
        ARCPY.env.overwriteOutput = 1

        #### Get Output Table Name With Extension if Appropriate ####
        outputTable, dbf = UTILS.returnTableName(outputTable)

        #### Set Progressor ####
        ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84008))

        #### Delete Table If Exists ####
        UTILS.passiveDelete(outputTable)

        #### Create Table ####
        outPath, outName = OS.path.split(outputTable)
        try:
            DM.CreateTable(outPath, outName)
        except:
            ARCPY.AddIDMessage("ERROR", 541)
            raise SystemExit()

        #### Add Result Fields ####
        self.outputFields = []
        for field in iaFieldNames:
            fieldOut = ARCPY.ValidateFieldName(field, outPath)
            UTILS.addEmptyField(outputTable, fieldOut, "DOUBLE")
            self.outputFields.append(fieldOut)

        #### Create Insert Cursor ####
        try:
            insert = DA.InsertCursor(outputTable, self.outputFields)
        except:
            ARCPY.AddIDMessage("ERROR", 204)
            raise SystemExit()

        #### Add Rows to Output Table ####
        for testIter in xrange(self.nIncrements):
            insert.insertRow(self.giResults[testIter])

        #### Clean Up ####
        del insert

        return outputTable, dbf
Ejemplo n.º 2
0
    def outputPMP(type, area, outPath):
        desc = arcpy.Describe(basin)
        basinName = desc.baseName
        pmpPoints = env.scratchGDB + "\\PMP_Points"  # Location of 'PMP_Points' feature class which will provide data for output

        outType = type[:1]
        outArea = str(int(round(area, 0))) + "sqmi"
        outFC = outType + "_" + outArea  #I don't think I need this.....
        arcpy.AddMessage("\nCopying PMP_Points feature class to " + outFC +
                         "...")  #outFC might be replaced with outpath...
        dm.Merge(
            pmpPoints, outPath
        )  # merge the scratch feature layer(s) of vector grid cells into the outputs

        arcpy.AddMessage("\nCreating Basin Summary Table...")
        tableName = type + "_PMP_Basin_Average" + "_" + outArea
        tablePath = env.scratchGDB + "\\" + tableName
        dm.CreateTable(env.scratchGDB, tableName)  # Create blank table
        cursor = arcpy.da.InsertCursor(
            tablePath,
            "*")  # Create Insert cursor and add a blank row to the table
        cursor.insertRow([0])
        del cursor

        dm.AddField(tablePath, "STORM_TYPE", "TEXT", "", "", 10,
                    "Storm Type")  # Create "Storm Type" field
        dm.CalculateField(tablePath, "STORM_TYPE", "'" + type + "'",
                          "PYTHON_9.3")  # populate storm type field

        i = 0
        for field in arcpy.ListFields(
                pmpPoints, "PMP_*"
        ):  # Add fields for each PMP duration and calculate the basin average
            fieldName = field.name
            fieldAve = basinAve(
                basin, fieldName
            )  # Calls the basinAve() function - returns the average (weighted or not)
            dm.AddField(tablePath, fieldName, "DOUBLE", "",
                        2)  # Add duration field
            dm.CalculateField(tablePath, fieldName, fieldAve,
                              "PYTHON_9.3")  # Assigns the basin average

            i += 1
        arcpy.AddMessage("\nSummary table complete.")

        basAveTables.append(tablePath)

        return
Ejemplo n.º 3
0
def lake_from_to(nhd_subregion_gdb, output_table):
    arcpy.env.workspace = 'in_memory'
    waterbody0 = os.path.join(nhd_subregion_gdb, 'NHDWaterbody')
    network = os.path.join(nhd_subregion_gdb, 'Hydrography', 'HYDRO_NET')
    junctions0 = os.path.join(nhd_subregion_gdb, 'HYDRO_NET_Junctions')

    # use layers for selections. We will only work with lakes over 1 hectare for this tool.
    waterbody = DM.MakeFeatureLayer(waterbody0,
                                    'waterbody',
                                    where_clause=LAGOS_LAKE_FILTER)
    num_wbs = int(arcpy.GetCount_management(waterbody).getOutput(0))
    junctions = DM.MakeFeatureLayer(junctions0, 'junctions')

    DM.SelectLayerByLocation(junctions, 'INTERSECT', waterbody, '1 Meters',
                             'NEW_SELECTION')
    junctions_1ha = DM.MakeFeatureLayer(junctions, 'junctions_1ha')

    # insert results into output table
    DM.CreateTable(os.path.dirname(output_table),
                   os.path.basename(output_table))
    DM.AddField(output_table, 'FROM_PERMANENT_ID', 'TEXT', field_length=40)
    DM.AddField(output_table, 'TO_PERMANENT_ID', 'TEXT', field_length=40)

    # create a dictionary to hold results in memory
    results = []

    counter = 0
    progress = .01
    arcpy.AddMessage("Starting network tracing...")
    with arcpy.da.SearchCursor(waterbody, 'Permanent_Identifier') as cursor:
        for row in cursor:
            # set up a progress printer
            counter += 1
            if counter >= float(num_wbs) * progress:
                progress += .01
                arcpy.AddMessage("{}% complete...".format(
                    round(progress * 100), 1))

            # select this lake
            id = row[0]
            where_clause = """"{0}" = '{1}'""".format('Permanent_Identifier',
                                                      id)
            this_waterbody = DM.MakeFeatureLayer(waterbody, 'this_waterbody',
                                                 where_clause)

            # select junctions overlapping this lake. only the downstream one matters, rest have no effect
            DM.SelectLayerByLocation(junctions_1ha, 'INTERSECT',
                                     this_waterbody, '1 Meters')
            count_junctions = int(
                arcpy.GetCount_management(junctions_1ha).getOutput(0))
            if count_junctions == 0:
                # add a row with no "TO" lake to the results
                results.append({'FROM': id, 'TO': None})
            else:
                # copy with selection on
                this_junctions = DM.MakeFeatureLayer(junctions_1ha,
                                                     'this_junctions')
                DM.TraceGeometricNetwork(network, 'downstream', this_junctions,
                                         'TRACE_DOWNSTREAM')
                # select lakes that intersect the downstream network with a tolerance of 1 meters
                DM.SelectLayerByLocation(waterbody, 'INTERSECT',
                                         'downstream/NHDFlowline', '1 Meters',
                                         'NEW_SELECTION')
                # remove this lake
                DM.SelectLayerByAttribute(waterbody, 'REMOVE_FROM_SELECTION',
                                          where_clause)
                # get the count, if it's 0 then there should be no table entry or something?
                count_waterbody = int(
                    arcpy.GetCount_management(waterbody).getOutput(0))
                # copy those into the table that you're storing stuff in
                if count_waterbody == 0:
                    # add a row with no "TO" lake to the results
                    results.append({'FROM': id, 'TO': None})
                else:
                    # for each ID, how am I getting those
                    to_ids = [
                        row[0] for row in arcpy.da.SearchCursor(
                            waterbody, 'Permanent_Identifier')
                    ]
                    for to_id in to_ids:
                        result = {'FROM': id, 'TO': to_id}
                        results.append(result)

                # delete all the intermediates
            DM.SelectLayerByAttribute(waterbody, 'CLEAR_SELECTION')
            for item in [this_waterbody, this_junctions, 'downstream']:
                DM.Delete(item)

    # insert the results in the table
    insert_cursor = arcpy.da.InsertCursor(
        output_table, ['FROM_PERMANENT_ID', 'TO_PERMANENT_ID'])
    for result in results:
        insert_cursor.insertRow([result['FROM'], result['TO']])

    # delete everything
    for item in [waterbody, junctions, junctions_1ha, 'in_memory']:
        DM.Delete(item)
    arcpy.AddMessage("Completed.")
Ejemplo n.º 4
0
    def createOutput(self, outputTable, displayIt = False):
        """Creates K-Function Output Table.

        INPUTS
        outputTable (str): path to the output table
        displayIt {bool, False}: create output graph?
        """

        #### Allow Overwrite Output ####
        ARCPY.env.overwriteOutput = 1

        #### Get Output Table Name With Extension if Appropriate ####
        outputTable, dbf = UTILS.returnTableName(outputTable)

        #### Set Progressor ####
        ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84008))

        #### Delete Table If Exists ####
        UTILS.passiveDelete(outputTable)

        #### Create Table ####
        outPath, outName = OS.path.split(outputTable)
        try:
            DM.CreateTable(outPath, outName)
        except:
            ARCPY.AddIDMessage("ERROR", 541)
            raise SystemExit()

        #### Add Result Fields ####
        fn = UTILS.getFieldNames(kOutputFieldNames, outPath) 
        expectedKName, observedKName, diffKName, lowKName, highKName = fn
        outputFields = [expectedKName, observedKName, diffKName]
        if self.permutations:
            outputFields += [lowKName, highKName] 

        for field in outputFields:
            UTILS.addEmptyField(outputTable, field, "DOUBLE")

        #### Create Insert Cursor ####
        try:
            insert = DA.InsertCursor(outputTable, outputFields)
        except:
            ARCPY.AddIDMessage("ERROR", 204)
            raise SystemExit()

        #### Add Rows to Output Table ####
        for testIter in xrange(self.nIncrements):
            distVal = self.cutoffs[testIter]
            ldVal = self.ld[testIter]
            diffVal = ldVal - distVal
            rowResult = [distVal, ldVal, diffVal]
            if self.permutations:
                ldMinVal = self.ldMin[testIter]
                ldMaxVal = self.ldMax[testIter]
                rowResult += [ldMinVal, ldMaxVal]
            insert.insertRow(rowResult)

        #### Clean Up ####
        del insert

        #### Make Table Visable in TOC if *.dbf Had To Be Added ####
        if dbf:
            ARCPY.SetParameterAsText(1, outputTable)

        #### Display Results ####
        if displayIt:
            if "WIN" in SYS.platform.upper():
                #### Set Progressor ####
                ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84186))

                #### Get Image Directory ####
                imageDir = UTILS.getImageDir()

                #### Make List of Fields and Set Template File ####
                yFields = [expectedKName, observedKName]
                if self.permutations:
                    #### Add Confidence Envelopes ####
                    yFields.append(highKName)
                    yFields.append(lowKName)
                    tee = OS.path.join(imageDir, "KFunctionPlotEnv.tee")
                else:
                    tee = OS.path.join(imageDir, "KFunctionPlot.tee")

                xFields = [ expectedKName for i in yFields ]

                #### Create Data Series String ####
                dataStr = UTILS.createSeriesStr(xFields, yFields, outputTable)

                #### Make Graph ####
                DM.MakeGraph(tee, dataStr, "KFunction")
                ARCPY.SetParameterAsText(11, "KFunction")

            else:
                ARCPY.AddIDMessage("Warning", 942)
Ejemplo n.º 5
0
    def unflatten(intermediate_table):
        flat_zoneid = zone_field
        unflat_zoneid = zone_field.replace('flat', '')
        zone_type = [f.type for f in arcpy.ListFields(zone_fc, flat_zoneid)][0]
        # Set up the output table (can't do this until the prior tool is run)
        # if os.path.dirname(out_table):
        #     out_path = os.path.dirname(out_table)
        # else:
        #     out_path = orig_env

        unflat_result = DM.CreateTable('in_memory',
                                       os.path.basename(out_table))

        # get the fields to add to the table
        editable_fields = [
            f for f in arcpy.ListFields(intermediate_table)
            if f.editable and f.name.lower() != flat_zoneid.lower()
        ]

        # populate the new table schema
        DM.AddField(unflat_result, unflat_zoneid, zone_type)
        for f in editable_fields:
            DM.AddField(unflat_result, f.name, f.type, field_length=f.length)

        # map original zone ids to new zone ids
        original_flat = defaultdict(list)
        with arcpy.da.SearchCursor(unflat_table,
                                   [unflat_zoneid, flat_zoneid]) as cursor:
            for row in cursor:
                if row[1] not in original_flat[row[0]]:
                    original_flat[row[0]].append(row[1])

        # Use CELL_COUNT as weight for means to calculate final values for each zone.
        fixed_fields = [
            unflat_zoneid, 'ORIGINAL_COUNT', 'CELL_COUNT', 'datacoveragepct'
        ]
        other_field_names = [
            f.name for f in editable_fields if f.name not in fixed_fields
        ]
        i_cursor = arcpy.da.InsertCursor(
            unflat_result,
            fixed_fields + other_field_names)  # open output table cursor
        flat_stats = {
            r[0]: r[1:]
            for r in arcpy.da.SearchCursor(intermediate_table, [
                flat_zoneid, 'ORIGINAL_COUNT', 'CELL_COUNT', 'datacoveragepct'
            ] + other_field_names)
        }

        count_diff = 0
        for zid, unflat_ids in original_flat.items():
            valid_unflat_ids = [id for id in unflat_ids if id in flat_stats
                                ]  # skip flatpolys not rasterized
            area_vec = [flat_stats[id][0] for id in valid_unflat_ids
                        ]  # ORIGINAL_COUNT specified in 0 index earlier
            cell_vec = [flat_stats[id][1] for id in valid_unflat_ids]
            coverage_vec = [flat_stats[id][2] for id in valid_unflat_ids
                            ]  # datacoveragepct special handling
            stat_vectors_by_id = [
                flat_stats[id][3:] for id in valid_unflat_ids
            ]  # "the rest", list of lists

            # calc the new summarized values
            original_count = sum(
                filter(None, area_vec)
            )  # None area is functionally equivalent to 0, all Nones = 0 too
            cell_count = sum(filter(None, cell_vec))
            if cell_count > 0:
                weighted_coverage = sum(
                    [a * b
                     for a, b in zip(area_vec, coverage_vec)]) / original_count

                # this calculation accounts for fractional missing values, both kinds (whole zone is no data, or zone
                # was missing some data and had data coverage % < 100). This is done by converting None to 0
                # and by using the cell_count (count of cells with data present)
                # instead of the full zone original_count. You have to do both or the mean will be distorted.
                # hand-verification that this works as intended using test GIS data on was completed 2019-11-01 by NJS
                crossprods = []
                for i in range(0, len(valid_unflat_ids)):
                    crossprods.append([
                        cell_vec[i] * float(s or 0)
                        for s in stat_vectors_by_id[i]
                    ])

                weighted_stat_means = []
                for i in range(0, len(other_field_names)):
                    weighted_stat_means.append(
                        sum(zip(*crossprods)[i]) / cell_count)
            else:
                weighted_coverage = 0
                weighted_stat_means = [None] * len(other_field_names)
                count_diff += 1

            new_row = [zid, original_count, cell_count, weighted_coverage
                       ] + weighted_stat_means
            i_cursor.insertRow(new_row)
        del i_cursor

        DM.Delete(intermediate_table)

        return [unflat_result, count_diff]
Ejemplo n.º 6
0
def swm2Table(swmFile, outputTable):
    """Places the spatial relationships contained in a given Spatial
    Weight Matrix File (*.swm) into a given output table.

    INPUTS:
    swmFile (str): Path to the input spatial weight matrix file
    outputTable (str): Path to the output database table
    """

    #### Open Spatial Weights and Obtain Characteristics ####
    swm = WU.SWMReader(swmFile)
    masterField = swm.masterField
    N = swm.numObs
    rowStandard = swm.rowStandard

    #### Allow Overwrite Output ####
    ARCPY.env.overwriteOutput = True

    #### Get Output Table Name With Extension if Appropriate ####
    outputTable, dbf = UTILS.returnTableName(outputTable)

    #### Delete Table If Exists ####
    UTILS.passiveDelete(outputTable)

    #### Create Table ####
    outPath, outName = OS.path.split(outputTable)
    try:
        DM.CreateTable(outPath, outName)
    except:
        ARCPY.AddIDMessage("ERROR", 541)
        raise SystemExit()

    #### Create a List of Required Field Names ####
    fn = UTILS.getFieldNames(swm2TabFieldNames, outPath)
    neighFieldName, weightFieldName = fn
    fieldNames = [masterField, neighFieldName, weightFieldName]
    fieldTypes = ["LONG", "LONG", "DOUBLE"]

    for ind, field in enumerate(fieldNames):
        UTILS.addEmptyField(outputTable, field, fieldTypes[ind])

    #### Create Insert Cursor ####
    try:
        insert = DA.InsertCursor(outputTable, fieldNames)
    except:
        ARCPY.AddIDMessage("ERROR", 204)
        raise SystemExit()

    #### Create Progressor ####
    ARCPY.SetProgressor("step", ARCPY.GetIDMessage(84117), 0, N, 1)

    #### Process Spatial Weights File and Populate Output Table ####
    try:
        for r in xrange(N):
            info = swm.swm.readEntry()
            masterID, nn, nhs, weights, sumUnstandard = info
            if nn != 0:
                for ind, neigh in enumerate(nhs):
                    row = [masterID, neigh, weights[ind]]
                    insert.insertRow(row)

            ARCPY.SetProgressorPosition()
    except:
        swm.close()
        ARCPY.AddIDMessage("ERROR", 919)
        raise SystemExit()

    #### Clean Up ####
    del insert
    swm.close()

    #### Report if Any Features Have No Neighbors ####
    swm.reportNoNeighbors()

    #### Make Table Visable in TOC if *.dbf Had To Be Added ####
    if dbf:
        ARCPY.SetParameterAsText(1, outputTable)