コード例 #1
0
def execute_Resample(smoothed_dir, dem3m_dir, flow_dir, frompoints_dir,
                     prefixe, river_tools_folder, output_folder, messages):

    sys.path.append(river_tools_folder)
    from Breach import execute_Breach

    arcpy.env.workspace = smoothed_dir
    rasterlist = arcpy.ListRasters()

    flowdir_output = arcpy.CreateScratchName(
        "rres",
        data_type="RasterDataset",
        workspace=arcpy.env.scratchWorkspace)
    temp_resample = arcpy.CreateScratchName(
        "rres",
        data_type="RasterDataset",
        workspace=arcpy.env.scratchWorkspace)
    temp_tobreach = arcpy.CreateScratchName(
        "rres",
        data_type="RasterDataset",
        workspace=arcpy.env.scratchWorkspace)

    arcpy.env.extent = flow_dir

    for raster in rasterlist:
        print(raster)

        buffered = arcpy.sa.Float(
            arcpy.sa.EucAllocation(arcpy.sa.Int(
                arcpy.Raster(raster) * 1000))) / 1000
        with arcpy.EnvManager(snapRaster=flow_dir):
            arcpy.Resample_management(
                buffered, temp_resample,
                str(flow_dir.meanCellWidth) + " " +
                str(flow_dir.meanCellHeight), "BILINEAR")
        dem3m = os.path.join(dem3m_dir, raster)
        tobreach = arcpy.sa.ExtractByMask(temp_resample, dem3m)
        tobreach.save(temp_tobreach)

        newextent = str(tobreach.extent.XMin) + " " + str(
            tobreach.extent.YMin) + " " + str(
                tobreach.extent.XMax) + " " + str(tobreach.extent.YMax)
        arcpy.Clip_management(flow_dir,
                              newextent,
                              flowdir_output,
                              maintain_clipping_extent="MAINTAIN_EXTENT")

        str_frompoints = os.path.join(frompoints_dir,
                                      prefixe + raster + ".shp")
        result = os.path.join(output_folder, raster)

        with arcpy.EnvManager(extent=flow_dir):
            execute_Breach(arcpy.Raster(temp_tobreach),
                           arcpy.Raster(flowdir_output), str_frompoints,
                           result, messages)

    arcpy.Delete_management(flowdir_output)
    arcpy.Delete_management(temp_resample)
    arcpy.Delete_management(temp_tobreach)
コード例 #2
0
    def update(self, crate):
        '''crate: Crate
        updates the hash table with the current hash and truncates and loads the destination data

        returns an updates crate status
        '''
        status = Crate.UPDATED
        if crate.result[0] == Crate.INVALID_DATA:
            return crate.result
        elif crate.result[0] == Crate.CREATED:
            status = Crate.CREATED

        if (crate.source_describe['hasGlobalID']):
            log.info(f'deleting and copying {crate.destination}')
            with arcpy.EnvManager(
                    geographicTransformations=crate.geographic_transformation,
                    preserveGlobalIds=True,
                    outputCoordinateSystem=crate.destination_coordinate_system
            ):
                arcpy.management.Delete(crate.destination)

                #: the only way to preserve global id values when exporting to fgdb is to use these tools
                if crate.is_table():
                    arcpy.conversion.TableToTable(crate.source,
                                                  crate.destination_workspace,
                                                  crate.destination_name)
                else:
                    arcpy.conversion.FeatureClassToFeatureClass(
                        crate.source, crate.destination_workspace,
                        crate.destination_name)
        else:
            log.info(f'truncating and loading {crate.destination}')
            arcpy.management.TruncateTable(crate.destination)

            with arcpy.EnvManager(
                    geographicTransformations=crate.geographic_transformation):
                arcpy.management.Append(crate.source,
                                        crate.destination,
                                        schema_type='NO_TEST')

        table_name = crate.source_name.lower()
        with arcpy.da.UpdateCursor(
                self.hash_table, [hash_field],
                where_clause=f'{table_name_field} = \'{table_name}\''
        ) as cursor:
            try:
                next(cursor)
                log.info(f'updating value in hash table for {table_name}')
                cursor.updateRow((self.current_hashes[table_name], ))
            except StopIteration:
                log.info(f'adding new row in hash table for {table_name}')
                with arcpy.da.InsertCursor(
                        self.hash_table,
                    [table_name_field, hash_field]) as insert_cursor:
                    insert_cursor.insertRow(
                        (table_name, self.current_hashes[table_name]))

        return (status, None)
コード例 #3
0
def execute_BridgeCorrection(r_dem,
                             str_bridges,
                             str_result,
                             messages,
                             language="FR"):

    with arcpy.EnvManager(snapRaster=r_dem):
        with arcpy.EnvManager(extent=r_dem):

            linebridges = arcpy.CreateScratchName(
                "lines",
                data_type="FeatureClass",
                workspace=arcpy.env.scratchWorkspace)
            arcpy.PolygonToLine_management(str_bridges, linebridges,
                                           "IGNORE_NEIGHBORS")

            r_linebridges = arcpy.CreateScratchName(
                "rlines",
                data_type="RasterDataset",
                workspace=arcpy.env.scratchWorkspace)
            arcpy.PolylineToRaster_conversion(linebridges,
                                              "ORIG_FID",
                                              r_linebridges,
                                              cellsize=r_dem)

            r_polybridges = arcpy.CreateScratchName(
                "rpoly",
                data_type="RasterDataset",
                workspace=arcpy.env.scratchWorkspace)
            arcpy.PolygonToRaster_conversion(
                str_bridges,
                arcpy.Describe(str_bridges).OIDFieldName,
                r_polybridges,
                cellsize=r_dem)

            r_bridges = arcpy.sa.Con(
                arcpy.sa.IsNull(r_polybridges) == 1, r_linebridges,
                r_polybridges)

            z_bridges = arcpy.sa.ZonalStatistics(r_bridges, "VALUE", r_dem,
                                                 "MINIMUM")

            temp_isnull = arcpy.sa.IsNull(z_bridges)

            temp_dem = arcpy.sa.Con(temp_isnull, z_bridges, r_dem, "VALUE = 0")
            temp_fill = arcpy.sa.Fill(temp_dem)
            result = arcpy.sa.Con(temp_isnull, temp_fill, r_dem, "VALUE = 0")
            result.save(str_result)

            arcpy.Delete_management(linebridges)
            arcpy.Delete_management(r_linebridges)
コード例 #4
0
ファイル: addresses.py プロジェクト: agrc/sweeper
    def sweep(self):
        '''Loop through all values and check addresses for problems returning a report
        '''
        report = {
            'title': 'Address Test',
            'feature_class': self.table_name,
            'issues': {}
        }
        required_street_address_parts = set(['address_number', 'street_name'])

        with arcpy.EnvManager(workspace=self.workspace):
            with arcpy.da.SearchCursor(
                    self.table_name,
                ['OID@', self.field_name]) as search_cursor:
                for oid, address in search_cursor:
                    issue_message = None
                    try:
                        parsed_address = Address(address)
                    except Exception as exception:
                        issue_message = str(exception)

                    if issue_message is None:
                        parts_found = set(parsed_address.__dict__)
                        missing_parts = required_street_address_parts - parts_found
                        if len(missing_parts
                               ) > 0 and 'po_box' not in parts_found:
                            issue_message = f'missing address parts: {", ".join(missing_parts)}'
                        elif parsed_address.normalized != address:
                            issue_message = f'address not fully normalized: "{address}" -> "{parsed_address.normalized}"'

                    if issue_message is not None:
                        report['issues'][oid] = issue_message
                        self.oids_with_issues.append(oid)

        return report
コード例 #5
0
ファイル: addresses.py プロジェクト: agrc/sweeper
    def try_fix(self):
        report = {
            'title': 'Address Try Fix',
            'feature_class': self.table_name,
            'issues': {},
            'fixes': {}
        }

        with arcpy.EnvManager(workspace=self.workspace):
            describe = arcpy.da.Describe(self.table_name)
            where = f'{describe["OIDFieldName"]} IN ({", ".join([str(oid) for oid in self.oids_with_issues])})'
            with arcpy.da.UpdateCursor(self.table_name,
                                       ['OID@', self.field_name],
                                       where_clause=where) as update_cursor:
                for oid, address in update_cursor:
                    try:
                        parsed_address = Address(address)
                        update_cursor.updateRow(
                            (oid, parsed_address.normalized))
                        report['fixes'][
                            oid] = f'{address} -> {parsed_address.normalized}'
                    except Exception as exception:
                        report['issues'][oid] = str(exception)

        return report
コード例 #6
0
def applyMatrix(in_raster):

    in_raster = arcpy.Raster(in_raster)
    base_name = in_raster.name.split(".")[0]
    out_raster = arcpy.sa.Int(in_raster)
    out_raster = arcpy.sa.RegionGroup(out_raster, "FOUR", "CROSS", "NO_LINK",
                                      None)
    zsTable = arcpy.sa.ZonalStatisticsAsTable(out_raster, "Value", in_raster,
                                              r"memory\\zsTable", "DATA",
                                              "MEAN", "CURRENT_SLICE")
    arcpy.management.JoinField(out_raster, "OBJECTID", zsTable, "OBJECTID",
                               ["AREA", "MEAN"])
    out_raster = arcpy.sa.SetNull(out_raster, out_raster, matrix)
    with arcpy.EnvManager(outputZFlag="Disabled", outputMFlag="Disabled"):
        polygon = arcpy.conversion.RasterToPolygon(out_raster,
                                                   r"memory\\polygon",
                                                   "NO_SIMPLIFY", "OBJECTID",
                                                   "SINGLE_OUTER_PART", None)
    polygon = arcpy.management.EliminatePolygonPart(
        polygon, os.path.join(out_path, base_name + "_Polygon"), "AREA",
        "13068 SquareFeet", 0, "CONTAINED_ONLY")
    out_raster = arcpy.sa.ExtractByMask(in_raster, out_raster)
    out_raster = (arcpy.sa.Int(out_raster * 10 + 0.5)) / 10
    out_raster.save(os.path.join(out_path, base_name + "_Cleaned"))
    del polygon, out_raster
コード例 #7
0
ファイル: duplicates.py プロジェクト: agrc/sweeper
    def try_fix(self):
        '''a method that tries to remove the duplicate records
        '''
        report = {'title': 'Duplicate Try Fix', 'feature_class': self.table_name, 'issues': [], 'fixes': []}

        if len(self.oids_with_issues) == 0:
            return report

        sql = f'"OBJECTID" IN ({",".join([str(oid) for oid in self.oids_with_issues])})'
        temp_feature_layer = 'temp_layer'

        with arcpy.EnvManager(workspace=self.workspace):
            try:
                duplicate_features = arcpy.management.MakeFeatureLayer(self.table_name, temp_feature_layer, sql)

                print(f'attempting to delete {len(self.oids_with_issues)} duplicate records')

                arcpy.management.DeleteFeatures(duplicate_features)
            except Exception as error:
                error_message = f'unable to delete features {error}'
                report['issues'].append(error_message)
            finally:
                if arcpy.Exists(temp_feature_layer):
                    arcpy.management.Delete(temp_feature_layer)

            report['fixes'].append(f'{len(self.oids_with_issues)} records deleted successfully')

        return report
コード例 #8
0
ファイル: empties.py プロジェクト: agrc/sweeper
    def try_fix(self):
        '''A method to that attempts to remove records with empty geometries
        '''
        report = {
            'title': 'Empty Try Fix',
            'feature_class': self.table_name,
            'issues': [],
            'fixes': []
        }
        if len(self.oids_with_issues) == 0:
            return report

        #: for point, polylines, or polygons
        fields = ['OID@']
        query = f'OBJECTID IN ({",".join([str(oid) for oid in self.oids_with_issues])})'

        with arcpy.EnvManager(workspace=self.workspace):
            with arcpy.da.UpdateCursor(self.table_name, fields,
                                       query) as update_cursor:
                for oid, in update_cursor:
                    if oid not in self.oids_with_issues:
                        continue

                    update_cursor.deleteRow()

        report['fixes'].append(
            f'{len(self.oids_with_issues)} records deleted successfully')

        return report
コード例 #9
0
def createHeatmap(yrbuilt=2021,
                  field="nnsqft",
                  cellSize=100,
                  searchRadius=1000,
                  intableName="jobs_2045_xy",
                  method="Kernel"):

    s = intableName.split("_")
    outLayer = os.path.join(outpath, s[0] + s[1][2:4] + "prj.shp")
    outRaster = os.path.join(
        outpath, method + s[0] + s[1][2:4] + "_" + str(cellSize) + "_" +
        str(searchRadius) + ".tif")
    print(
        "creating raster data for {0} heatmap in {1} using {2} Density with a cell size {3} and search radius {4}..."
        .format(s[0], s[1], method, str(cellSize), str(searchRadius)))

    arcpy.env.extent = MPOBound
    mask = MPOBound
    with arcpy.EnvManager(mask=MPOBound):
        if method == "Point":
            arcpy.gp.PointDensity_sa(outLayer, "NONE", outRaster, cellSize, "",
                                     "SQUARE_KILOMETERS")

        else:
            arcpy.gp.KernelDensity_sa(outLayer, "NONE", outRaster, cellSize,
                                      searchRadius, "SQUARE_KILOMETERS",
                                      "DENSITIES", "GEODESIC")

    print("completed...")
コード例 #10
0
def outputSelectDominionRdSegs(rdsSGID):
    utSGID = r'C:\ZBECK\BlueStakes\stagingBS.gdb\SGID_GEOGRAPHIC\Utah'
    annoRdSegsALL_FL = arcpy.MakeFeatureLayer_management(
        annoRdSegsALL, 'annoRdSegsALL_FL')
    roadsSGID_FL = arcpy.MakeFeatureLayer_management(rdsSGID, 'roadsSGID_FL')
    sql = """"STATE" = 'Utah'"""
    utah_FL = arcpy.MakeFeatureLayer_management(utSGID, 'utah_FL', sql)

    annoRdSegsALL_Selected = arcpy.SelectLayerByLocation_management(
        annoRdSegsALL_FL, 'INTERSECT', utah_FL)
    annoRdSegsALL_Selected = arcpy.SelectLayerByLocation_management(annoRdSegsALL_FL, 'WITHIN_A_DISTANCE', \
                                                                       roadsSGID_FL, '75 Meters', 'SUBSET_SELECTION',
                                                                       'INVERT')

    selected = arcpy.GetCount_management(annoRdSegsALL_Selected)
    selected_Count = int(selected.getOutput(0))
    print('Dominion Road Segments Selected = {}'.format(selected_Count))

    outLocation = r'C:\ZBECK\BlueStakes\stagingBS.gdb\DOMINION_GEOGRAPHIC'
    with arcpy.EnvManager(workspace=outLocation):
        arcpy.env.overwriteOutput = True

        outRoadSegs = 'RoadSegs_DominionSelect'
        if arcpy.Exists(outRoadSegs):
            arcpy.Delete_management(outRoadSegs)

        arcpy.FeatureClassToFeatureClass_conversion(annoRdSegsALL_Selected,
                                                    outLocation, outRoadSegs)
コード例 #11
0
def extract_region(region_polygon, output_geodatabase, inputs, name):
	for year in inputs:
		raster = inputs[year]
		print(raster)
		output_path = os.path.join(output_geodatabase, "{}_{}".format(name, year))
		print(output_path)
		with arcpy.EnvManager(outputCoordinateSystem=raster, extent="MINOF", snapRaster=raster, pyramid="NONE"):
			output = arcpy.sa.ExtractByMask(arcpy.Raster(raster), region_polygon)  #Clip_management(arcpy.Raster(raster), region_polygon, output_path, region_polygon, "#", "NONE", "NO_MAINTAIN_EXTENT")  # NO_MAINTAIN_EXTENT should mean to keep cell alignment
			output.save(output_path)
コード例 #12
0
def get_fc_list(inWS: "Input workspace") -> "Full list of featureclasses":
    """Temporarily sets the workspace and returns the full path and name of each fc."""
    return_ = []
    with arcpy.EnvManager(workspace=inWS):
        fcs = arcpy.ListFeatureClasses()
        for fc in fcs:
            desc = arcpy.Describe(fc)
            catPath = desc.catalogPath
            return_.append(catPath)
    return return_
コード例 #13
0
def convert_mobile_gdb(in_ws, out_ws):

    with arcpy.EnvManager(workspace=in_ws):
        fcs = arcpy.ListFeatureClasses()
        for fc in fcs:
            new_fc = fc.replace("main.", "")
            print(fc, new_fc)
            out_fc = os.path.join(out_ws, new_fc)
            print(f"Copying to {out_fc}")
            arcpy.management.CopyFeatures(fc, out_fc, '', None, None, None)
コード例 #14
0
def delete_reorder_fields(inWS:"Input workspace"):
    '''
    Temporarily sets the workspace and deletes layers that meet a criteria. 
    '''

    with arcpy.EnvManager(workspace=inWS):
        fcs = arcpy.ListFeatureClasses("*_reorder")
        for fc in fcs:
            print("Deleting " + fc)
            arcpy.Delete_management(fc)
コード例 #15
0
def shakemap_get_bldgs(bldg_gdb=config.BuildingCentroids,
                       eventdir=config.NapaEventDir):

    ShakeMapDir = get_shakemap_dir()
    mi, pgv, pga = get_shakemap_files(eventdir)
    unique = eventdir.split("\\")[-1]

    arcpy.env.workspace = os.path.join(eventdir, "eqmodel_outputs.gdb")
    GDB = os.path.join(eventdir, "eqmodel_outputs.gdb")

    #get list of intersecting states
    state_names_list = unique_values(table=os.path.join(
        GDB, "census_county_max_mmi_pga_pgv"),
                                     field="STATE_NAME")
    bldgs_output = os.path.join(GDB, "ORNL_LB_bldgs")

    #select building centroids that are within intersecting states and intersect them with shakemap
    remove_list = []
    for state in state_names_list:
        fc = os.path.join(bldg_gdb, state)
        if arcpy.Exists(fc):
            arcpy.management.MakeFeatureLayer(os.path.join(bldg_gdb, state),
                                              "{}_lyr".format(state))
            arcpy.management.SelectLayerByLocation("{}_lyr".format(state),
                                                   'INTERSECT',
                                                   "shakemap_countyclip_mmi",
                                                   "", "NEW_SELECTION")
        else:
            remove_list.append(state)

    if len(remove_list) >= 1:
        for x in remove_list:
            state_names_list.remove(x)

    if len(state_names_list) > 1:
        #merge
        arcpy.Merge_management(["{}_lyr".format(x) for x in state_names_list],
                               bldgs_output)
    else:
        #copy features
        arcpy.CopyFeatures_management("{}_lyr".format(state), bldgs_output)

    #Summarize Within Bldg Count to Tracts
    with arcpy.EnvManager(scratchWorkspace=GDB, workspace=GDB):
        arcpy.analysis.SummarizeWithin(
            os.path.join(GDB, "census_tract_max_mmi_pga_pgv"), bldgs_output,
            os.path.join(GDB,
                         "census_tract_max_mmi_pga_pgv_bldgcount"), "KEEP_ALL",
            None, "ADD_SHAPE_SUM", '', None, "NO_MIN_MAJ", "NO_PERCENT", None)

    scratchgdb = os.path.join(eventdir, 'scratch.gdb')
    if arcpy.Exists(scratchgdb):
        arcpy.management.Delete(scratchgdb)

    return bldgs_output
コード例 #16
0
ファイル: duplicates.py プロジェクト: agrc/sweeper
    def sweep(self):
        '''A method that finds duplicate records and returns a report dictionary
        '''
        report = {'title': 'Duplicate Test', 'feature_class': self.table_name, 'issues': []}
        digests = set([])

        truncate_shape_precision = re.compile(r'(\d+\.\d{2})(\d+)')

        with arcpy.EnvManager(workspace=self.workspace):
            description = arcpy.da.Describe(self.table_name)

            skip_fields = ['guid', description['shapeFieldName']]

            if description['hasGlobalID']:
                skip_fields.append(description['globalIDFieldName'])

            if description['hasOID']:
                skip_fields.append(description['OIDFieldName'])

            fields = [field.name for field in description['fields'] if field.name not in skip_fields]

            fields.append('SHAPE@WKT')
            fields.append('OID@')

            shapefield_index = fields.index('SHAPE@WKT')
            oid_index = fields.index('OID@')

            with arcpy.da.SearchCursor(self.table_name, fields) as search_cursor:
                for row in search_cursor:
                    shape_wkt = row[shapefield_index]
                    object_id = row[oid_index]

                    if shape_wkt is None:
                        continue

                    #: trim some digits to help with hash matching
                    generalized_wkt = truncate_shape_precision.sub(r'\1', shape_wkt)

                    hasher = xxh64(f'{row[:-2]} {generalized_wkt}')
                    digest = hasher.hexdigest()

                    if digest in digests:
                        report['issues'].append(str(object_id))
                        self.oids_with_issues.append(object_id)

                    digests.add(digest)

        return report
コード例 #17
0
def Model():  # Model

    # To allow overwriting outputs change overwriteOutput option to True.
    arcpy.env.overwriteOutput = False

    # Check out any necessary licenses.
    arcpy.CheckOutExtension("spatial")
    arcpy.CheckOutExtension("ImageAnalyst")

    Incidents_xy = r'S:\CoreData\NCIS\LITS_20200729\NCIS.gdb\Incidents_xy'

    # Process: Kernel Density (Kernel Density) (sa)
    MB_Rescale_1k_KD_Suicide_Inc_08_17_Total_P = "F:\\Suicide_Vs_Population\\LITS_20200729\\Suicide_2008-2017_KDs.gdb\\MB_Rescale_1k_KD_Suicide_Inc_08_17_Total_P"
    Kernel_Density = MB_Rescale_1k_KD_Suicide_Inc_08_17_Total_P
    MB_Rescale_1k_KD_Suicide_Inc_08_17_Total_P = arcpy.sa.KernelDensity(
        in_features=Incidents_xy,
        population_field="NONE",
        cell_size="200",
        search_radius=1000,
        area_unit_scale_factor="SQUARE_KILOMETERS",
        out_cell_values="DENSITIES",
        method="PLANAR",
        in_barriers="")
    MB_Rescale_1k_KD_Suicide_Inc_08_17_Total_P.save(Kernel_Density)

    # Process: Con (Con) (ia)
    MB_Con_1k_KD_Suicide_Inc_08_17_Total_P = "F:\\Suicide_Vs_Population\\LITS_20200729\\Suicide_2008-2017_KDs.gdb\\MB_Con_1k_KD_Suicide_Inc_08_17_Total_P"
    Con = MB_Con_1k_KD_Suicide_Inc_08_17_Total_P
    MB_Con_1k_KD_Suicide_Inc_08_17_Total_P = arcpy.ia.Con(
        in_conditional_raster=MB_Rescale_1k_KD_Suicide_Inc_08_17_Total_P,
        in_true_raster_or_constant=MB_Rescale_1k_KD_Suicide_Inc_08_17_Total_P,
        in_false_raster_or_constant="",
        where_clause="VALUE > 0")
    MB_Con_1k_KD_Suicide_Inc_08_17_Total_P.save(Con)

    # Process: Rescale by Function (Rescale by Function) (sa)
    MBx_Rescale_1k_KD_Suicide_Inc_08_17_Total_P = "F:\\Suicide_Vs_Population\\LITS_20200729\\Suicide_2008-2017_KDs.gdb\\MBx_Rescale_1k_KD_Suicide_Inc_08_17_Total_P"
    Rescale_by_Function = MBx_Rescale_1k_KD_Suicide_Inc_08_17_Total_P
    with arcpy.EnvManager(compression="NONE", pyramid="NONE"):
        MBx_Rescale_1k_KD_Suicide_Inc_08_17_Total_P = arcpy.sa.RescaleByFunction(
            in_raster=MB_Con_1k_KD_Suicide_Inc_08_17_Total_P,
            transformation_function=[[
                "LINEAR", 0, "", 60.44379806518555, "", 0, 60.44379806518555,
                ""
            ]],
            from_scale=0,
            to_scale=20)
        MBx_Rescale_1k_KD_Suicide_Inc_08_17_Total_P.save(Rescale_by_Function)
コード例 #18
0
def execute_ExpandExtent(r_raster_in, raster_out, messages):

    newXMin = r_raster_in.extent.XMin - r_raster_in.meanCellWidth
    newXMax = r_raster_in.extent.XMax + r_raster_in.meanCellWidth
    newYMin = r_raster_in.extent.YMin - r_raster_in.meanCellHeight
    newYMax = r_raster_in.extent.YMax + r_raster_in.meanCellHeight

    #newextent = str(newXMin) + " " + str(newYMin) + " " + str(newXMax) + " " + str(newYMax)

    #arcpy.Clip_management(r_raster_in, newextent, raster_out, maintain_clipping_extent="NO_MAINTAIN_EXTENT")
    with arcpy.EnvManager(
            extent=arcpy.Extent(newXMin, newYMin, newXMax, newYMax)):
        raster2 = r_raster_in + 0
        raster2.save(raster_out)

    return
コード例 #19
0
def get_featureclasses(workspace_path):
    '''
    workspace_path: full path to the feature workspace.
    '''

    with arcpy.EnvManager(workspace=workspace_path):
        fc_list = []

        datasets = arcpy.ListDatasets(feature_type='feature')
        datasets = [''] + datasets if datasets is not None else []

        #: generate a list of feature classes showing their full path (Example: Directory/Workspace/Dataset/TableName)
        for ds in datasets:
            for fc in arcpy.ListFeatureClasses(feature_dataset=ds):
                fc_list.append(fc)

        return fc_list
コード例 #20
0
def createHeatmap(yrbuilt=2021,
                  shpnm='parcel_data',
                  field="jobs",
                  by="cum",
                  cellSize=25,
                  searchRadius=1000,
                  changeFileNm=False):
    arcpy.env.extent = MPOBound
    mask = MPOBound
    if by == "yearly":
        inFeature = os.path.join(path, 'output', shpnm + str(yrbuilt) + '.shp')
    else:
        if yrbuilt == "":
            inFeature = os.path.join(path, 'output', shpnm + '.shp')
        else:
            inFeature = os.path.join(path, 'output',
                                     shpnm + str(yrbuilt) + 'cum.shp')

    arcpy.FeatureToPoint_management(in_features=inFeature,
                                    out_feature_class="dataCentroids",
                                    point_location="INSIDE")
    if changeFileNm:
        outRaster = os.path.join(
            path, 'output', "KernelD_" + field + "_" + str(yrbuilt) + "_" +
            str(cellSize) + "_" + str(searchRadius) + ".tif")
    else:
        if yrbuilt == "":
            outRaster = os.path.join(path, 'output',
                                     "KernelD_" + field + ".tif")
        else:
            outRaster = os.path.join(
                path, 'output',
                "KernelD_" + field + "_" + str(yrbuilt) + ".tif")
    with arcpy.EnvManager(mask=MPOBound):
        arcpy.gp.KernelDensity_sa("dataCentroids", field, outRaster, cellSize,
                                  searchRadius, "SQUARE_KILOMETERS",
                                  "DENSITIES", "GEODESIC")
    #output_raster = arcpy.sa.RasterCalculator('Int(outRaster)')
    #output_raster.save(outRaster)

    if yrbuilt == "":
        print("Created the heatmap for {0}".format(field))
    else:
        print("Created the heatmap for {0} in {1}".format(field, yrbuilt))
コード例 #21
0
def #  NOT  IMPLEMENTED# Function Body not implemented

def Model():  # Model

    # To allow overwriting outputs change overwriteOutput option to True.
    arcpy.env.overwriteOutput = False

    data = "data"
    result_2_ = "D:\\2021\\Demo\\Demo.gdb\\result"

    for I_data_Dissolve2_GUID, Value in #  NOT  IMPLEMENTED(data_Dissolve2_2_, [["GUID", ""]], False):

        # Process: Dissolve (Dissolve) (management)
        data_Dissolve3 = "D:\\2021\\Demo\\Demo.gdb\\data_Dissolve3"
        arcpy.management.Dissolve(in_features=data, out_feature_class=data_Dissolve3, dissolve_field=["Type"], statistics_fields=[], multi_part="MULTI_PART", unsplit_lines="DISSOLVE_LINES")

        # Process: Calculate Field (Calculate Field) (management)
        data_Dissolve2_2_ = arcpy.management.CalculateField(in_table=data_Dissolve3, field="GUID", expression="SequentialNumber()", expression_type="PYTHON3", code_block="""# Calculates a sequential number
# More calculator examples at esriurl.com/CalculatorExamples
rec=0
def SequentialNumber():
    global rec
    pStart = 1
    pInterval = 1
    if (rec == 0):
        rec = pStart
    else:
        rec = rec + pInterval
    return rec""", field_type="TEXT", enforce_domains="NO_ENFORCE_DOMAINS")[0]

        # Process: Aggregate Polygons (Aggregate Polygons) (cartography)
        Value = "2"
        I_data_Dissolve2_GUID_Aggreg_Value_ = fr"D:\2021\Demo\Demo.gdb\I_data_Dissolve2_GUID_Aggreg_{Value}"
        I_data_Dissolve2_GUID_Aggreg_Value_Tbl = fr"D:\2021\Demo\Demo.gdb\I_data_Dissolve2_GUID_Aggreg_{Value}_Tbl"
        arcpy.cartography.AggregatePolygons(in_features=I_data_Dissolve2_GUID, out_feature_class=I_data_Dissolve2_GUID_Aggreg_Value_, aggregation_distance="100 Kilometers", minimum_area="0 SquareMeters", minimum_hole_size="0 SquareMeters", orthogonality_option="NON_ORTHOGONAL", barrier_features=[], out_table=I_data_Dissolve2_GUID_Aggreg_Value_Tbl, aggregate_field="Type")

        # Process: Append (Append) (management)
        result = arcpy.management.Append(inputs=[I_data_Dissolve2_GUID_Aggreg_Value_], target=result_2_, schema_type="TEST", field_mapping="", subtype="", expression="")[0]

if __name__ == '__main__':
    # Global Environment settings
    with arcpy.EnvManager(scratchWorkspace=r"D:\2021\Demo\Demo.gdb", workspace=r"D:\2021\Demo\Demo.gdb"):
        Model()
コード例 #22
0
def reconcile_and_post_versions(versions, admin_connection, log_path):
    '''method to reconcile and post versions
    versions: array of VersionInfo objects
    admin_connection: the UIC Admin sde connection path
    log_path: the file path to where the reconcile and post log are stored
    '''
    print('reconcile and posting')

    with arcpy.EnvManager(workspace=admin_connection):
        all_versions = [
            version.name.lower()
            for version in arcpy.da.ListVersions(admin_connection)
        ]

        required_versions = [
            version for version in versions
            if version.fully_qualified_version_name.lower() not in all_versions
        ]

        if len(required_versions) > 0:
            print(
                f'missing the following required versions: {", ".join([version.fully_qualified_version_name for version in required_versions])}'
            )

            return

        #: Create RnP versions if they don't exist
        missing_rnp_versions = [
            version for version in versions
            if version.fully_qualified_rnp_version_name not in all_versions
        ]

        for missing_version in missing_rnp_versions:
            missing_version.create_rnp_version(admin_connection)

        #: Reconcile/Post RnP Versions to QA
        for version_info in versions:
            version_info.reconcile(admin_connection, log_path)

        arcpy.management.ClearWorkspaceCache(admin_connection)

        print(f'finished{os.linesep}')
コード例 #23
0
ファイル: empties.py プロジェクト: agrc/sweeper
    def sweep(self):
        '''A method to find empty geometries and return a report dictionarty
        '''
        report = {
            'title': 'Empty Test',
            'feature_class': self.table_name,
            'issues': []
        }
        fields = ['OID@', 'SHAPE@']

        with arcpy.EnvManager(workspace=self.workspace):
            with arcpy.da.SearchCursor(self.table_name,
                                       fields) as search_cursor:
                for oid, geometry in search_cursor:
                    if geometry is not None:
                        continue

                    report['issues'].append(str(oid))
                    self.oids_with_issues.append(oid)

        return report
コード例 #24
0
    def update(self, crate):
        '''crate: Crate
        updates the hash table with the current hash and truncates and loads the destination data

        returns an updates crate status
        '''
        status = Crate.UPDATED
        if crate.result[0] == Crate.INVALID_DATA:
            return crate.result
        elif crate.result[0] == Crate.CREATED:
            status = Crate.CREATED

        log.info(f'truncating {crate.destination}')
        arcpy.management.TruncateTable(crate.destination)

        with arcpy.EnvManager(
                geographicTransformations=crate.geographic_transformation):
            arcpy.management.Append(crate.source,
                                    crate.destination,
                                    schema_type='NO_TEST')

        table_name = crate.source_name.lower()
        with arcpy.da.UpdateCursor(
                self.hash_table, [hash_field],
                where_clause=f'{table_name_field} = \'{table_name}\''
        ) as cursor:
            try:
                next(cursor)
                log.info(f'updating value in hash table for {table_name}')
                cursor.updateRow((self.current_hashes[table_name], ))
            except StopIteration:
                log.info(f'adding new row in hash table for {table_name}')
                with arcpy.da.InsertCursor(
                        self.hash_table,
                    [table_name_field, hash_field]) as insert_cursor:
                    insert_cursor.insertRow(
                        (table_name, self.current_hashes[table_name]))

        return (status, None)
コード例 #25
0
def delete_versions(versions, admin_connection):
    print(
        f'trying to delete {sum(version.version_count for version in versions)} versions. some may not exist'
    )

    with arcpy.EnvManager(workspace=admin_connection):
        print('testing if delete can be run')

        has_rnp = len([
            version for version in arcpy.da.ListVersions(admin_connection)
            if 'rnp' in version.name.lower()
        ]) > 0

        if not has_rnp:
            print('Reconcile and Post versions have not been created. Exiting')

            sys.exit(-1)

    print('checks pass, deleting versions')

    for version in versions:
        version.delete(admin_connection)

    print(f'finished{os.linesep}')
コード例 #26
0
def runTool(output_stop_file, SQLDbase, time_window_value_table,
            snap_to_nearest_5_minutes):
    def RetrieveFrequencyStatsForStop(stop_id, stoptimedict, start_sec,
                                      end_sec):
        '''For a given stop, query the dictionary
        and return the NumTrips, NumTripsPerHr, MaxWaitTime, and AvgHeadway given a
        specific route_id and direction. If snap to nearest five minutes is true, then
        this function will return headways snapped to the closest 5 minute interval.'''
        # Make a list of stop_times
        StopTimesAtThisPoint = []
        try:
            for trip in stoptimedict[stop_id]:
                StopTimesAtThisPoint.append(trip[1])
        except KeyError:
            pass
        StopTimesAtThisPoint.sort()

        # Calculate the number of trips
        NumTrips = len(StopTimesAtThisPoint)
        NumTripsPerHr = round(
            float(NumTrips) / ((end_sec - start_sec) / 3600), 2)
        # Get the max wait time and the average headway
        MaxWaitTime = BBB_SharedFunctions.CalculateMaxWaitTime(
            StopTimesAtThisPoint, start_sec, end_sec)
        if snap_to_nearest_5_minutes:
            round_to = 5
        else:
            round_to = None
        AvgHeadway = BBB_SharedFunctions.CalculateAvgHeadway(
            StopTimesAtThisPoint, round_to)
        return NumTrips, NumTripsPerHr, MaxWaitTime, AvgHeadway

    # ----- Get input parameters and set things up. -----
    # Check software version and fail out quickly if it's not sufficient.
    BBB_SharedFunctions.CheckArcVersion(min_version_pro="1.2")

    arcpy.AddMessage("Reading data...")

    # Connect to SQL database of preprocessed GTFS from Step 1
    conn = BBB_SharedFunctions.conn = sqlite3.connect(SQLDbase)
    c = BBB_SharedFunctions.c = conn.cursor()

    # Store frequencies if relevant
    frequencies_dict = BBB_SharedFunctions.MakeFrequenciesDict()

    # Get unique route_id/direction_id pairs and calculate the trips used in each
    # Some GTFS datasets use the same route_id to identify trips traveling in
    # either direction along a route. Others identify it as a different route.
    # We will consider each direction separately if there is more than one.
    trip_route_dict = {
    }  # {(route_id, direction_id): [(trip_id, service_id),..]}
    triproutefetch = '''SELECT DISTINCT route_id,direction_id FROM trips;'''
    c.execute(triproutefetch)
    for rtpair in c.fetchall():
        key = tuple(rtpair)
        route_id = rtpair[0]
        direction_id = rtpair[1]
        # Get list of trips
        # Ignore direction if this route doesn't have a direction
        if direction_id is not None and str(direction_id).strip():
            triproutefetch = '''
                    SELECT trip_id, service_id FROM trips
                    WHERE route_id = '{0}' AND direction_id = {1};'''.format(
                route_id, direction_id)
        else:
            triproutefetch = '''
                    SELECT trip_id, service_id FROM trips
                    WHERE route_id = '{0}';'''.format(route_id)
        c.execute(triproutefetch)
        triproutelist = c.fetchall()
        trip_route_dict[key] = triproutelist

    # ----- For each time window, calculate the stop frequency -----
    final_stop_freq_dict = {
    }  # {(stop_id, route_id, direction_id): {prefix: (NumTrips, NumTripsPerHour, MaxWaitTimeSec, AvgHeadwayMin)}}
    # The time_window_value_table will be a list of nested lists of strings like:
    # [[Weekday name or YYYYMMDD date, HH: MM, HH: MM, Departures / Arrivals, Prefix], [], ...]
    for time_window in time_window_value_table:
        # Prefix/identifier associated with this time window
        prefix = time_window[4]
        arcpy.AddMessage("Calculating statistics for time window %s..." %
                         prefix)
        # Clean up date and determine whether it's a date or a weekday
        Specific, day = BBB_SharedFunctions.CheckSpecificDate(time_window[0])
        # Convert times to seconds
        start_time = time_window[1]
        end_time = time_window[2]
        if not start_time:
            start_time = "00:00"
        if not end_time:
            end_time = "23:59"
        start_sec, end_sec = BBB_SharedFunctions.ConvertTimeWindowToSeconds(
            start_time, end_time)
        # Clean up arrival/departure time choice
        DepOrArr = BBB_SharedFunctions.CleanUpDepOrArr(time_window[3])

        # Get the trips running in this time window for each route/direction pair
        # Get the service_ids serving the correct days
        serviceidlist, serviceidlist_yest, serviceidlist_tom = \
            BBB_SharedFunctions.GetServiceIDListsAndNonOverlaps(day, start_sec, end_sec, DepOrArr, Specific)

        # Retrieve the stop_times for the time window broken out by route/direction
        stoproutedir_dict = {
        }  # {(stop_id, route_id, direction_id): [NumTrips, NumTripsPerHour, MaxWaitTimeSec, AvgHeadwayMin]}
        for rtdirpair in trip_route_dict:
            # Get trips running with these service_ids
            trip_serv_list = trip_route_dict[rtdirpair]
            triplist = []
            for tripserv in trip_serv_list:
                # Only keep trips running on the correct day
                if tripserv[1] in serviceidlist or tripserv[1] in serviceidlist_tom or \
                    tripserv[1] in serviceidlist_yest:
                    triplist.append(tripserv[0])

            # Get the stop_times that occur during this time window for these trips
            try:
                stoptimedict = BBB_SharedFunctions.GetStopTimesForStopsInTimeWindow(
                    start_sec, end_sec, DepOrArr, triplist, "today",
                    frequencies_dict)
            except KeyError:  # No trips
                pass
            try:
                stoptimedict_yest = BBB_SharedFunctions.GetStopTimesForStopsInTimeWindow(
                    start_sec, end_sec, DepOrArr, triplist, "yesterday",
                    frequencies_dict)
            except KeyError:  # No trips
                pass
            try:
                stoptimedict_tom = BBB_SharedFunctions.GetStopTimesForStopsInTimeWindow(
                    start_sec, end_sec, DepOrArr, triplist, "tomorrow",
                    frequencies_dict)
            except KeyError:  # No trips
                pass

            # Combine the three dictionaries into one master
            for stop in stoptimedict_yest:
                stoptimedict[stop] = stoptimedict.setdefault(
                    stop, []) + stoptimedict_yest[stop]
            for stop in stoptimedict_tom:
                stoptimedict[stop] = stoptimedict.setdefault(
                    stop, []) + stoptimedict_tom[stop]

            for stop in stoptimedict.keys():
                # Get Stop-Route-Dir Frequencies by time period
                vals = RetrieveFrequencyStatsForStop(stop, stoptimedict,
                                                     start_sec, end_sec)
                key = (
                    stop,
                    rtdirpair[0],
                    rtdirpair[1],
                )
                if key not in final_stop_freq_dict:
                    final_stop_freq_dict[key] = {prefix: vals}
                else:
                    final_stop_freq_dict[key][prefix] = vals

    # ----- Write the stops and stats to the output feature class -----
    arcpy.AddMessage("Writing outputs...")
    # Make the basic feature class for stops with correct gtfs fields
    with arcpy.EnvManager(overwriteOutput=True):
        output_coords = BBB_SharedFunctions.CreateStopsFeatureClass(
            output_stop_file)

    # Add fields specific to this tool's outputs
    arcpy.management.AddField(output_stop_file, 'route_id', "TEXT")
    arcpy.management.AddField(output_stop_file, 'direction_id', "SHORT")
    # Create fields for stats for each time window using prefix
    base_field_names = [
        '_NumTrips', '_NumTripsPerHr', '_MaxWaitTime', '_AvgHeadway'
    ]
    new_fields = []
    for time_window in time_window_value_table:
        for base_field in base_field_names:
            new_field = time_window[4] + base_field
            new_fields.append(new_field)
            arcpy.management.AddField(output_stop_file, new_field, "DOUBLE")

    # Get the stop info from the GTFS SQL file
    StopTable = BBB_SharedFunctions.GetStopsData()
    stop_dict = {stop[0]: stop for stop in StopTable}

    # Make a dictionary to track whether we have inserted all stops at least once into the output
    used_stops = {stop[0]: False for stop in StopTable}
    # Store stop geometries in dictionary so they can be inserted multiple times without recalculating
    stop_geoms = {
        stop[0]: BBB_SharedFunctions.MakeStopGeometry(stop[4], stop[5],
                                                      output_coords)
        for stop in StopTable
    }

    # Add the stops with stats to the feature class
    fields = [
        "SHAPE@", "stop_id", "stop_code", "stop_name", "stop_desc", "zone_id",
        "stop_url", "location_type", "parent_station", "route_id",
        "direction_id"
    ] + new_fields
    with arcpy.da.InsertCursor(output_stop_file, fields) as cur3:
        # Iterate over all unique stop, route_id, direction_id groups and insert values
        for key in sorted(final_stop_freq_dict.keys()):
            stop_id = key[0]
            used_stops[stop_id] = True
            route_id = key[1]
            direction_id = key[2]
            stop_data = stop_dict[stop_id]
            # Schema of StopTable
            ##   0 - stop_id
            ##   1 - stop_code
            ##   2 - stop_name
            ##   3 - stop_desc
            ##   4 - stop_lat
            ##   5 - stop_lon
            ##   6 - zone_id
            ##   7 - stop_url
            ##   8 - location_type
            ##   9 - parent_station
            row = [
                stop_geoms[stop_id],  # Geometry
                stop_data[0],
                stop_data[1],
                stop_data[2],
                stop_data[3],
                stop_data[6],
                stop_data[7],
                stop_data[8],
                stop_data[9],  # GTFS data
                route_id,
                direction_id  # route and direction IDs
            ]
            # Populate stats fields for each prefix
            for time_window in time_window_value_table:
                prefix = time_window[4]
                try:
                    vals = final_stop_freq_dict[key][prefix]
                except KeyError:
                    # This stop/route/direction group had no service for this time window
                    vals = [0, 0, None, None]
                row += vals

            # Insert the row
            cur3.insertRow(row)

        # Insert row for any remaining stops that were not used at all
        for stop_id in used_stops:
            if used_stops[stop_id]:
                # This one was already inserted
                continue
            stop_data = stop_dict[stop_id]
            row = [
                stop_geoms[stop_id],  # Geometry
                stop_data[0],
                stop_data[1],
                stop_data[2],
                stop_data[3],
                stop_data[6],
                stop_data[7],
                stop_data[8],
                stop_data[9],  # GTFS data
                None,
                None  # route and direction IDs - None because not used
            ]
            # Populate stats fields for each prefix
            for time_window in time_window_value_table:
                row += [0, 0, None, None]
            # Insert the row
            cur3.insertRow(row)

    # Close Connection
    conn.close()
    arcpy.AddMessage("Finished!")
    arcpy.AddMessage("Calculated trip counts, frequency, max wait time, and \
headway were written to an output stops file by route-direction pairs.")
コード例 #27
0
    # Process: Add Fields (multiple) (Add Fields (multiple)) 
    Property_1 = arcpy.AddFields_management(in_table=property_soil_shp, field_description=[["low_kg", "FLOAT", "", "", "", ""], ["med_kg", "FLOAT", "", "", "", ""], ["high_kg", "FLOAT", "", "", "", ""], ["Area_m2", "FLOAT", "", "", "", ""]])[0]

    # Process: Calculate Geometry Attributes (Calculate Geometry Attributes) 
    Property_1 = arcpy.management.CalculateGeometryAttributes(in_features=Property_1, geometry_property=[["Area_m2", "AREA"]], length_unit="", area_unit="SQUARE_METERS", coordinate_system="", coordinate_format="SAME_AS_INPUT")[0]

    # Process: Calculate Field (Calculate Field) 
    Property_1 = arcpy.management.CalculateField(in_table=Property_1, field="low_kg", expression="!low_Carbon! * !Area_m2!", expression_type="PYTHON3", code_block="", field_type="TEXT")[0]

    # Process: Calculate Field (2) (Calculate Field) 
    Property_1 = arcpy.management.CalculateField(in_table=Property_1, field="med_kg", expression="!med_Carbon! * !Area_m2!", expression_type="PYTHON3", code_block="", field_type="TEXT")[0]

    # Process: Calculate Field (3) (Calculate Field) 
    Property_1 = arcpy.management.CalculateField(in_table=Property_1, field="high_kg", expression="!high_Carbo! * !Area_m2!", expression_type="PYTHON3", code_block="", field_type="TEXT")[0]

    # Process: Summary Statistics (Summary Statistics) 
    property_soil_carbon = "D:\\Greenbelt\\soils\\April\\SoilCarbon\\" + property_name + "_SoilCarbon"
    arcpy.Statistics_analysis(in_table=Property_1, out_table=property_soil_carbon, statistics_fields=[["low_kg", "SUM"], ["med_kg", "SUM"], ["high_kg", "SUM"]], case_field=["Cover"])

    # Process: Table To Excel (Table To Excel) 
    property_soil_carbon_xlsx = "D:\\Greenbelt\\soils\\April\\SoilCarbonTables\\" + property_name + "_SoilCarbon.xlsx"
    arcpy.conversion.TableToExcel(Input_Table=property_soil_carbon, Output_Excel_File=property_soil_carbon_xlsx, Use_field_alias_as_column_header="NAME", Use_domain_and_subtype_description="CODE")


if __name__ == '__main__':
    # Global Environment settings
    for property_path in existing_properties:
        with arcpy.EnvManager(scratchWorkspace=r"D:\Greenbelt\soils\April\April Soils.gdb", workspace=r"D:\Greenbelt\soils\April\April Soils.gdb"):
            Model(property_path)
コード例 #28
0
)
gdb = r'C:\Users\ASF\Documents\COVID19\Disasters\Watermaps\MosaicDatasets' '\\' + projtag + '\\' + projtag + '_' + today + '.gdb'

print('Generating watermap extent AID package...')
md_wm = gdb + '\\' + 'watermap_extent'
aid_wm = r'C:\Users\ASF\Documents\COVID19\Disasters\Watermaps\AID_Packages\HKH_WatermapExtent_' + today + '.zmd'

# clip mosaic dataset to boundary
extent_mask = r'C:\Users\ASF\Documents\COVID19\Disasters\Watermaps\Watermaps.gdb\HKH_ServiceExtentMask'
arcpy.management.ImportMosaicDatasetGeometry(md_wm, "BOUNDARY", "OBJECTID",
                                             extent_mask, "FID")
print('Clipped mosaic dataset to reference shapefile.')

with arcpy.EnvManager(
        scratchWorkspace=
        r"C:\Users\ASF\Documents\COVID19\Disasters\Watermaps\Watermaps.gdb",
        workspace=
        r"C:\Users\ASF\Documents\COVID19\Disasters\Watermaps\Watermaps.gdb"):
    try:
        arcpy.AID.AIDISDP(md_wm, aid_wm, None)
    except:
        print("AID errors generated and ignored.")
        pass
print('Watermap extent AID package complete.')

print('Generating RGB AID package...')
md_rgb = gdb + '\\' + 'rgb'
aid_rgb = r'C:\Users\ASF\Documents\COVID19\Disasters\Watermaps\AID_Packages\HKH_RGB_' + today + '.zmd'
with arcpy.EnvManager(
        scratchWorkspace=
        r"C:\Users\ASF\Documents\COVID19\Disasters\Watermaps\Watermaps.gdb",
コード例 #29
0
cellsizes = [100]
ages = {'Adult': 'Age > 24 AND Age < 45',
        'Elder': 'Age > 64',
        'Mature': 'Age > 44 AND Age < 65',
        'MidAge': 'Age > 24 AND Age < 65',
        'Youth': 'Age < 25',
        'Tot': 'Age > 0'}

# PointFilter = "ExclusionCode NOT IN (1) AND " \
#               "Incident_Year IN (2014, 2015, 2016, 2017, 2018)"
PointFilter = "ExclusionCode NOT IN (1) AND " \
              "Incident_Year IN (2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018)"

arcpy.env.overwriteOutput = True

with arcpy.EnvManager(scratchWorkspace=out_gdb, workspace=out_gdb):

    # arcpy.env.outputCoordinateSystem = arcpy.SpatialReference(3857)
    # Parrallel processing doesn't ALWAYS help!
    # parallel_processing = '8'
    # arcpy.env.parallelProcessingFactor = parallel_processing

    arcpy.CheckOutExtension("spatial")
    arcpy.CheckOutExtension("ImageAnalyst")

    try:
        for gender, genderFilt in genders.items():
            for scale, searchradius in scales.items():
                for cellsize in cellsizes:
                    for age, ageRange in ages.items():
                        OutputKDr = "{}\KDr_{}_{}_{}_{}_{}".format(out_gdb, name_base, age, gender, cellsize, scale)
コード例 #30
0
    elif size_class == \"medium\" and (altitude > 16000):
        return 36
    elif size_class == \"small\" and altitude <=4000:
        return 67
    elif size_class == \"small\" and (altitude > 4000 and altitude <= 5000):
        return 54
    elif size_class == \"small\" and (altitude > 5000):
        return 43", field_type="TEXT")[0]

    # Process: Point to Raster (Mean aggregated) (Point to Raster) 
    Mean_Noise_may_2020 = "C:\\Users\\Dell\\Documents\\ArcGIS\\Projects\\mar apr may\\mar apr may.gdb\\airtraff_may2020_a_PointToRaster"
    arcpy.PointToRaster_conversion(in_features=Classified_noiselevel, value_field="noiselevel", out_rasterdataset=Mean_Noise_may_2020, cell_assignment="MEAN", priority_field="NONE", cellsize="0.001")

    # Process: extract cell value (Raster to Point) 
    Points_represent_noiselevel = "C:\\Users\\Dell\\Documents\\ArcGIS\\Projects\\mar apr may\\mar apr may.gdb\\RasterT_airtraf1"
    arcpy.RasterToPoint_conversion(in_raster=Mean_Noise_may_2020, out_point_features=Points_represent_noiselevel, raster_field="VALUE")

    # Process: Feature To Polygon (Feature To Polygon) 
    Mean_NoiseMay2020_hexagon = "C:\\Users\\Dell\\Documents\\ArcGIS\\Projects\\mar apr may\\mar apr may.gdb\\GenerateTessellation_Feature"
    arcpy.FeatureToPolygon_management(in_features=[Hexagonal_grid_per_1_km2], out_feature_class=Mean_NoiseMay2020_hexagon, cluster_tolerance="", attributes="ATTRIBUTES", label_features=Points_represent_noiselevel)

    # Process: Polygon to Raster (Polygon to Raster) 
    Mean_NoiseZone_may2020 = "C:\\Users\\Dell\\Documents\\ArcGIS\\Projects\\mar apr may\\mar apr may.gdb\\GenerateTessellation_Feature_PolygonToRaster"
    if Mean_NoiseMay2020_hexagon:
        arcpy.PolygonToRaster_conversion(in_features=Mean_NoiseMay2020_hexagon_2_, value_field="grid_code", out_rasterdataset=Mean_NoiseZone_may2020, cell_assignment="CELL_CENTER", priority_field="NONE", cellsize="0.02")

if __name__ == '__main__':
    # Global Environment settings
    with arcpy.EnvManager(scratchWorkspace=r"C:\Users\Dell\Documents\ArcGIS\Projects\mar apr may\mar apr may.gdb", workspace=r"C:\Users\Dell\Documents\ArcGIS\Projects\mar apr may\mar apr may.gdb"):
        Model()