Exemple #1
0
def get_layout_templates(gis=None):
    """


This function returns the content of the GIS's layout templates formatted as dict.

Parameters:

gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.


Returns:
   output_json - layout templates as Python dict

See https://utility.arcgisonline.com/arcgis/rest/directories/arcgisoutput/Utilities/PrintingTools_GPServer/Utilities_PrintingTools/GetLayoutTemplatesInfo.htm for additional help.
    """
    from arcgis.geoprocessing import DataFile
    from arcgis.geoprocessing._support import _execute_gp_tool

    kwargs = locals()

    param_db = {
        "output_json": (str, "Output JSON"),
    }
    return_values = [
        {"name": "output_json", "display_name": "Output JSON", "type": str},
    ]

    if gis is None:
        gis = arcgis.env.active_gis

    url = gis.properties.helperServices.printTask.url[:-len('/Export%20Web%20Map%20Task')]

    return _execute_gp_tool(gis, "Get Layout Templates Info Task", kwargs, param_db, return_values, _use_async, url)
def get_travel_modes(
    gis = None):
    """


Get a list of travel modes that can be used with directions and routing services available in your portal.

Parameters:

gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.


Returns the following as a named tuple:
   supported_travel_modes - Supported Travel Modes as a FeatureSet
   default_travel_mode - Default Travel Mode as a str

See https://logistics.arcgis.com/arcgis/rest/directories/arcgisoutput/World/Utilities_GPServer/World_Utilities/GetTravelModes.htm for additional help.
    """
    kwargs = locals()

    param_db = {
        "supported_travel_modes": (FeatureSet, "Supported Travel Modes"),
        "default_travel_mode": (str, "Default Travel Mode"),
    }
    return_values = [
        {"name": "supported_travel_modes", "display_name": "Supported Travel Modes", "type": FeatureSet},
        {"name": "default_travel_mode", "display_name": "Default Travel Mode", "type": str},
    ]

    if gis is None:
        gis = arcgis.env.active_gis

    url = gis.properties.helperServices.routingUtilities.url

    return _execute_gp_tool(gis, "GetTravelModes", kwargs, param_db, return_values, _use_async, url)
Exemple #3
0
def _estimate_credits(task, parameters, gis=None):
    """
    Estimates the number of credits a spatial analysis operation will take.

    =======================     ====================================================================
    **Argument**                **Description**
    -----------------------     --------------------------------------------------------------------
    task                        Required String. The name of the analysis tool.
    -----------------------     --------------------------------------------------------------------
    parameters                  Required String.  The input parameters for the tool.
    -----------------------     --------------------------------------------------------------------
    gis                         Optional GIS.  The enterprise connection object.
    =======================     ====================================================================

    :returns: float

    """

    if gis is None and \
       _arcgis.env.active_gis:
        gis = _arcgis.env.active_gis
    elif gis is None and \
         _arcgis.env.active_gis is None:
        raise Exception("A GIS must be provided and/or set as active.")
    if gis.version >= [6,4] and \
       gis._portal.is_arcgisonline:
        url = gis.properties['helperServices']['creditEstimation']['url']
        gptask = "EstimateCredits"
        url = "{base}/{gptask}/execute".format(base=url, gptask=gptask)
        params = {
            'f': 'json',
            'taskName': task,
            'taskParameters': json.dumps(parameters)
        }
        kwargs = locals()
        param_db = {
            "task": (str, "taskName"),
            "parameters": (str, "taskParameters"),
            "credit_estimate": (str, "creditEstimate"),
        }
        return_values = [
            {
                "name": "credit_estimate",
                "display_name": "creditEstimate",
                "type": str
            },
        ]
        res = _execute_gp_tool(gis,
                               gptask,
                               kwargs,
                               param_db,
                               return_values,
                               False,
                               url,
                               webtool=True,
                               add_token=False)
        if 'cost' in res:
            return float(res['cost'])
        return res
    return
def summarize_elevation(input_features: FeatureSet = {},
                        feature_id_field: str = None,
                        dem_resolution: str = None,
                        include_slope_aspect: bool = False,
                        gis=None) -> FeatureSet:
    """   
    .. image:: _static/images/summarize_elevation/summarize_elevation.png     

    The ``summarize_elevation`` method calculates summary statistics for features you provide based 
    on ArcGIS Online Elevation data. It accepts point, line, or polygon input and returns statistics 
    for the elevation, slope, and aspect of the features. 

    =========================    =========================================================
    **Parameter**                **Description**
    -------------------------    ---------------------------------------------------------
    input_features               Reqauired FeatureSet. Input features to summarize the elevation for. The features can be point, line, or area. See :ref:`Feature Input<FeatureInput>`.
    -------------------------    ---------------------------------------------------------
    feature_id_field             Optional string. The unique ID field to use for the input features.  
    -------------------------    ---------------------------------------------------------
    dem_resolution               Optional string. The approximate spatial resolution (cell size) of the source elevation data used for the calculation.
                                 
                                 Choice list:[' ', 'FINEST', '10m', '30m', '90m']
                                 
                                 The default value is None. 
    -------------------------    ---------------------------------------------------------
    include_slope_aspect         Optional boolean. Determines if slope and aspect for the input feature(s) will be included in the output. The slope and aspect values in the output are in degrees.
                                 
                                 The default value is False.
    =========================    =========================================================

    :returns: result_layer : Output Summary as a FeatureSet

    .. code-block:: python

        # USAGE EXAMPLE: To calculate summary statistics for mountain polyline features.
        summarize = summarize_elevation(input_features=mountain_fs,
                           dem_resolution='FINEST',
                           include_slope_aspect=True)  
    """
    kwargs = locals()

    param_db = {
        "input_features": (FeatureSet, "InputFeatures"),
        "feature_id_field": (str, "FeatureIDField"),
        "dem_resolution": (str, "DEMResolution"),
        "include_slope_aspect": (bool, "IncludeSlopeAspect"),
        "output_summary": (FeatureSet, "Output Summary"),
    }
    return_values = [
        {"name": "output_summary", "display_name": "Output Summary", "type": FeatureSet},
    ]

    if gis is None:
        gis = arcgis.env.active_gis

    url = gis.properties.helperServices.elevation.url

    return _execute_gp_tool(gis, "SummarizeElevation", kwargs, param_db, return_values, _use_async, url)
Exemple #5
0
def export_map(web_map_as_json = None,
               format = """PDF""",
               layout_template = """MAP_ONLY""",
               gis=None):
    """


This function takes the state of the web map(for example, included services, layer visibility
settings, client-side graphics, and so forth) and returns either (a) a page layout or
(b) a map without page surrounds of the specified area of interest in raster or vector format.
The input for this function is a piece of text in JavaScript object notation (JSON) format describing the layers,
graphics, and other settings in the web map. The JSON must be structured according to the WebMap specification
in the ArcGIS HelpThis tool is shipped with ArcGIS Server to support web services for printing, including the
preconfigured service named PrintingTools.
Parameters:

   web_map_as_json: Web Map as JSON (str). Required parameter.  A JSON representation of the state of the map to be exported as it appears in the web application. See the WebMap specification in the ArcGIS Help to understand how this text should be formatted. The ArcGIS web APIs (for JavaScript, Flex, Silverlight, etc.) allow developers to easily get this JSON string from the map.

   format: Format (str). Optional parameter.  The format in which the map image for printing will be delivered. The following strings are accepted.For example:PNG8 (default if the parameter is left blank)PDFPNG32JPGGIFEPSSVGSVGZ
      Choice list:['PDF', 'PNG32', 'PNG8', 'JPG', 'GIF', 'EPS', 'SVG', 'SVGZ']

   layout_template: Layout Template (str). Optional parameter.  Either a name of a template from the list or the keyword MAP_ONLY. When MAP_ONLY is chosen or an empty string is passed in, the output map does not contain any page layout surroundings (for example title, legends, scale bar, and so forth)
      Choice list:['A3 Landscape', 'A3 Portrait', 'A4 Landscape', 'A4 Portrait', 'Letter ANSI A Landscape', 'Letter ANSI A Portrait', 'Tabloid ANSI B Landscape', 'Tabloid ANSI B Portrait', 'MAP_ONLY']

    gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.


Returns:
   output_file - Output File as a DataFile

See https://utility.arcgisonline.com/arcgis/rest/directories/arcgisoutput/Utilities/PrintingTools_GPServer/Utilities_PrintingTools/ExportWebMapTask.htm for additional help.
    """

    from arcgis.geoprocessing import DataFile
    from arcgis.geoprocessing._support import _execute_gp_tool

    kwargs = locals()

    param_db = {
        "web_map_as_json": (str, "Web_Map_as_JSON"),
        "format": (str, "Format"),
        "layout_template": (str, "Layout_Template"),
        "output_file": (DataFile, "Output File"),
    }
    return_values = [
        {"name": "output_file", "display_name": "Output File", "type": DataFile},
    ]

    if gis is None:
        gis = arcgis.env.active_gis

    url = gis.properties.helperServices.printTask.url[:-len('/Export%20Web%20Map%20Task')]

    return _execute_gp_tool(gis, "Export Web Map Task", kwargs, param_db, return_values, _use_async, url)
Exemple #6
0
def get_tool_info(service_name='asyncRoute', tool_name='FindRoutes', gis=None):
    """


Get additional information such as the description of the network dataset used for the analysis and the execution limits for a tool in a geoprocessing service.

Parameters:

   service_name: Service Name (str). Required parameter.  Specify the service name containing the tool. The parameter value should be specified using one of the following keywords that reference a particular geoprocessing service.asyncClosestFacility - The asynchronous geoprocessing service used to perform the closest facility analysis.asyncLocationAllocation - The asynchronous geoprocessing service used to perform the location-allocation analysis.asyncRoute - The asynchronous geoprocessing service used to perform the route analysis.asyncServiceArea - The asynchronous geoprocessing service used to perform the service area analysis.asyncVRP - The asynchronous geoprocessing service used to perform the vehicle routing problem analysis.syncVRP - The synchronous geoprocessing service used to perform the vehicle routing problem analysis.The default value is asyncRoute.
      Choice list:['asyncClosestFacility', 'asyncLocationAllocation', 'asyncODCostMatrix', 'asyncRoute', 'asyncServiceArea', 'asyncVRP', 'syncVRP']

   tool_name: Tool Name (str). Required parameter.  Specify the tool name in the geoprocessing service. The parameter value should be a valid tool name in the geoprocessing service specified by the serviceName parameter. The default value is FindRoutes.
      Choice list:['EditVehicleRoutingProblem', 'FindClosestFacilities', 'FindRoutes', 'GenerateOriginDestinationCostMatrix', 'GenerateServiceAreas', 'SolveLocationAllocation', 'SolveVehicleRoutingProblem']

gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.


Returns:
   tool_info - Tool Info as a str

See https://logistics.arcgis.com/arcgis/rest/directories/arcgisoutput/World/Utilities_GPServer/World_Utilities/GetToolInfo.htm for additional help.
    """
    kwargs = locals()

    param_db = {
        "service_name": (str, "serviceName"),
        "tool_name": (str, "toolName"),
        "tool_info": (str, "Tool Info"),
    }
    return_values = [
        {
            "name": "tool_info",
            "display_name": "Tool Info",
            "type": str
        },
    ]

    if gis is None:
        gis = arcgis.env.active_gis

    url = gis.properties.helperServices.routingUtilities.url
    return _execute_gp_tool(gis, "GetToolInfo", kwargs, param_db,
                            return_values, _use_async, url)
Exemple #7
0
def run_python_script(code, layers=None, gis=None):
    """

    The `run_python_script` method executes a Python script on your ArcGIS
    GeoAnalytics Server site. In the script, you can create an analysis
    pipeline by chaining together multiple GeoAnalytics Tools without
    writing intermediate results to a data store. You can also use other
    Python functionality in the script that can be distributed across your
    GeoAnalytics Server.

    For example, suppose that each week you receive a new dataset of
    vehicle locations containing billions of point features. Each time you
    receive a new dataset, you must perform the same workflow involving
    multiple GeoAnalytics Tools to create an information product that you
    share within your organization. This workflow creates several large
    intermediate layers that take up lots of space in your data store. By
    scripting this workflow in Python and executing the code in the Run
    Python Script task, you can avoid creating these unnecessary
    intermediate layers, while simplifying the steps to create the
    information product.

    When you use Run Python Script, the Python code is executed on your
    GeoAnalytics Server. The script runs with the Python 3.6 environment
    that is installed with GeoAnalytics Server, and all console output is
    returned as job messages. Some Python modules can be used in your
    script to execute code across multiple cores of one or more machines
    in your GeoAnalytics Server using Spark 2.2.0(the compute platform that
    distributes analysis for GeoAnalytics Tools).

    A geoanalytics module is available and allows you to run GeoAnalytics
    Tools in the script. This package is imported automatically when you
    use Run Python Script.

    To interact directly with Spark in the Run Python Script task, use the
    pyspark module, which is imported automatically when you run the task.
    The pyspark module is the Python API for Spark and provides a
    collection of distributed analysis tools for data management,
    clustering, regression, and more that can be called in Run Python
    Script and run across your GeoAnalytics Server.

    When using the geoanalytics and pyspark packages, most functions return
    analysis results in memory as Spark DataFrames. Spark data frames can be
    written to a data store or used in the script. This allows for the
    chaining together of multiple geoanalytics and pyspark tools, while only
    writing out the final result to a data store, eliminating the need to
    create any intermediate result layers.

    For advanced users, an instance of SparkContext is instantiated
    automatically as sc and can be used in the script to interact with Spark.
    This allows for the execution of custom distributed analysis across your
    GeoAnalytics Server.

    It is recommended that you use an integrated development environment
    (IDE) to write your Python script, and copy the script text into the Run
    Python Script tool. This makes it easier to identify syntax errors and
    typos prior to running your script. It is also recommended that you run
    your script using a small subset of the input data first to verify that
    there are no logic errors or exceptions. You can use the Describe
    Dataset task to create a sample layer for this purpose.

    ================  ===============================================================
    code              Required String/Python Method. Python code to execute.
    ----------------  ---------------------------------------------------------------
    layers            Optional List. A list of FeatureLayers to operate on.
    ----------------  ---------------------------------------------------------------
    gis               optional GIS. The GIS object where the analysis will take place.
    ================  ===============================================================

    :returns: Dictionary of messages from the code provided.


    """
    if layers is None:
        layers = []
    import inspect
    params = {'f': 'json'}

    if inspect.isfunction(code):
        params['code'] = inspect.getsource(code)
    elif isinstance(code, str):
        params['code'] = code
    else:
        raise ValueError("code must be a string or Python Function.")

    if isinstance(layers, (tuple, list)):
        params['layers'] = layers
    else:
        raise ValueError("layers must be a list or tuple")

    tool_name = "RunPythonScript"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url

    _set_context(params)

    param_db = {
        "layers": (_FeatureSet, "inputLayers"),
        "code": (str, "pythonScript"),
        "context": (str, "context"),
    }

    try:
        res, msg = _execute_gp_tool(gis,
                                    tool_name,
                                    params,
                                    param_db, [],
                                    _use_async,
                                    url,
                                    True,
                                    return_messages=True)
        return msg
    except:
        raise
    return None
Exemple #8
0
def dissolve_boundaries(input_layer,
                        dissolve_fields=None,
                        summary_fields=None,
                        multipart=False,
                        output_name=None,
                        gis=None):
    """

    The Dissolve Boundaries task finds polygons that intersect or have the same field values and merges them together to form a single polygon.

    Examples:

        A city council wants to control liquor sales by refusing new licenses to stores within 1,000 feet of schools, libraries, and parks. After creating a 1,000-foot buffer around the schools, libraries, and parks, the buffered layers can be joined together and the boundaries can be dissolved to create a single layer of restricted areas.

    Usage Notes:



    Only available at ArcGIS Enterprise 10.7 and later.

    ================  ===============================================================
    **Argument**      **Description**
    ----------------  ---------------------------------------------------------------
    input_layer       required FeatureLayer. The point, line or polygon features.
    ----------------  ---------------------------------------------------------------
    dissolve_fields   Optional string. A comma seperated list of strings for each
                      field that you want to dissolve on.
    ----------------  ---------------------------------------------------------------
    summary_fields    Optional list. Calculate one or more statistics for the
                      dissolved areas by using the summary_fields parameter. The
                      input is a list of key/value pairs in the following format:

                         [{"statisticType" : "<stat>", "onStatisticField" : "<field name>"}]

                      Allows statistics are:

                        + Any (string fields only)
                        + Count
                        + Sum
                        + Minimum
                        + Maximum
                        + Average
                        + Variance
                        + Standard Deviation

                      Example:

                       summary_fields = [{"statisticType" : "Sum", "onStatisticField" : "quadrat_area_km2"},
                                         {"statisticType" : "Mean", "onStatisticField" : "soil_depth_cm"},
                                         {"statisticType" : "Any", "onStatisticField" : "quadrat_desc"}]
    ----------------  ---------------------------------------------------------------
    multipart         Optional boolean. If True, the output service can contain
                      multipart features. If False (default):, the output service
                      will only contain single-part features, and individual features
                      will be created for each part.
    ----------------  ---------------------------------------------------------------
    output_name       optional string. The task will create a feature service of the results. You define the name of the service.
    ----------------  ---------------------------------------------------------------
    gis               optional GIS. The GIS object where the analysis will take place.
    ================  ===============================================================

    :returns: FeatureLayer
    """
    kwargs = locals()
    tool_name = "DissolveBoundaries"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {
        "f": "json",
    }
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Dissolve_Bounds_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Merge Layers')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "multipart": (bool, "multipart"),
        "summary_fields": (list, "summaryFields"),
        "dissolve_fields": (list, "dissolveFields"),
        "output_name": (str, "OutputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "output"),
    }

    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]

    try:
        _execute_gp_tool(gis, tool_name, params, param_db, return_values,
                         _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise
    return
def create_space_time_cube(point_layer: _FeatureSet,
                           bin_size: float,
                           bin_size_unit: str,
                           time_step_interval: int,
                           time_step_interval_unit: str,
                           time_step_alignment: str = None,
                           time_step_reference: _datetime = None,
                           summary_fields: str = None,
                           output_name: str = None,
                           context: str = None,
                           gis=None) -> DataFile:
    """
    Summarizes a set of points into a netCDF data structure by aggregating them into space-time bins. Within each bin,
    the points are counted and specified attributes are aggregated. For all bin locations, the trend for counts and
    summary field values are evaluated.

    Parameters:

       point_layer: Input Features (FeatureSet). Required parameter.

       bin_size: Distance Interval (float). Required parameter.

       bin_size_unit: Distance Interval Unit (str). Required parameter.
          Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'NauticalMiles']

       time_step_interval: Time Step Interval (int). Required parameter.

       time_step_interval_unit: Time Step Interval Unit (str). Required parameter.
          Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds']

       time_step_alignment: Time Step Alignment (str). Optional parameter.
          Choice list:['EndTime', 'StartTime', 'ReferenceTime']

       time_step_reference: Time Step Reference (datetime). Optional parameter.

       summary_fields: Summary Fields (str). Optional parameter.

       output_name: Output Name (str). Required parameter.

       context: Context (str). Optional parameter.

        gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.


    Returns:
       output_cube - Output Space Time Cube as a DataFile

    """
    kwargs = locals()

    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url

    params = {}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    _set_context(params)

    param_db = {
        "point_layer": (_FeatureSet, "pointLayer"),
        "bin_size": (float, "binSize"),
        "bin_size_unit": (str, "binSizeUnit"),
        "time_step_interval": (int, "timeStepInterval"),
        "time_step_interval_unit": (str, "timeStepIntervalUnit"),
        "time_step_alignment": (str, "timeStepAlignment"),
        "time_step_reference": (_datetime, "timeStepReference"),
        "summary_fields": (str, "summaryFields"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output_cube": (DataFile, "Output Space Time Cube"),
    }
    return_values = [
        {
            "name": "output_cube",
            "display_name": "Output Space Time Cube",
            "type": DataFile
        },
    ]

    return _execute_gp_tool(gis, "CreateSpaceTimeCube", params, param_db,
                            return_values, _use_async, url, True)
Exemple #10
0
def watershed(input_points, point_id_field=None, snap_distance=10, snap_distance_units='Meters',
              source_database='Finest', generalize=False, gis=None):
    """
    The Watershed task is used to identify catchment areas based on a particular location you
    provide and ArcGIS Online Elevation data.


    ==================     ====================================================================
    **Argument**           **Description**
    ------------------     --------------------------------------------------------------------
    input_points           Required Feature Set or Spatially Enabled DataFrame
                           Points delineating the starting location to calculate the downstream
                           location from.
    ------------------     --------------------------------------------------------------------
    point_id_field         Optional String
                           Field used to identify the feature from the source data. This is
                           useful for relating the results back to the original source data.
    ------------------     --------------------------------------------------------------------
    snap_distance          Optional Integer - Default 10
    ------------------     --------------------------------------------------------------------
    snap_distance_units    Optional String - Default Meters
                           Meters | Kilometers | Feet | Yards | Miles
    ------------------     --------------------------------------------------------------------
    source_database        Optional String - Default "Finest"
                           Keyword indicating the source data that will be used in the analysis.
                           This keyword is an approximation of the spatial resolution of the
                           digital elevation model used to build the foundation hydrologic
                           database. Since many elevation sources are distributed with units of
                           arc seconds, this keyword is an approximation in meters for easier
                           understanding.

                           - Finest: Finest resolution available at each location from all
                             possible data sources.
                           - 10m: The hydrologic source was built from 1/3 arc second -
                             approximately 10 meter resolution, elevation data.
                           - 30m: The hydrologic source was built from 1 arc second -
                             approximately 30 meter resolution, elevation data.
                           - 90m: The hydrologic source was built from 3 arc second -
                             approximately 90 meter resolution, elevation data.
    ------------------     --------------------------------------------------------------------
    generalize             Optional Boolean - Default False
                           Determines if the output downstream trace lines will be smoothed
                           into simpler lines.
    ------------------     --------------------------------------------------------------------
    gis                    Optional GIS Object instance
                           If not provided as input, a GIS object instance logged into an
                           active portal with elevation helper services defined must already
                           be created in the active Python session. A GIS object instance can
                           also be optionally explicitly passed in through this parameter.
    ==================     ====================================================================

    :return:
        Result object comprised of two FeatureSets - one for watershed_area, and another for
        snapped_points
    """

    kwargs = locals()

    param_db = {
        "input_points": (FeatureSet, "InputPoints"),
        "point_id_field": (str, 'PointIDField'),
        "snap_distance": (int, 'SnapDistance'),
        "snap_distance_units": (int, 'SnapDistanceUnits'),
        "source_database": (str, 'SourceDatabase'),
        "generalize": (str, 'Generalize'),
        "watershed_area": (FeatureSet, "Watershed Area"),
        "snapped_points": (FeatureSet, "SnappedPoints")
    }

    return_values = [
        {"name": "watershed_area", "display_name": "Watershed Area", "type": FeatureSet},
        {"name": "snapped_points", "display_name": "Snapped Points", "type": FeatureSet}
    ]

    # use helper function to evaluate the input points and convert them, if necessary, to a FeatureSet
    input_fs = _evaluate_spatial_input(input_points)

    if input_fs.geometry_type != 'esriGeometryPoint':
        raise Exception('input_points FeatureSet must be point esriGeometryPoint, not {}.'.format(input_fs.geometry_type))

    input_fields = input_fs.fields
    if point_id_field and point_id_field not in [f['name'] for f in input_fields] and len(input_fields):
        input_fields_str = ','.join(input_fields)
        raise Exception('The provided point_id_field {} does not appear to be in the input_points FeatureSet fields - {}'.format(point_id_field, input_fields_str))

    if gis is None and arcgis.env.active_gis is None:
        raise Exception('GIS must be defined either by directly passing in a GIS object created using credentials, or one must already be created in the active Python session.')
    elif gis is None:
        gis = arcgis.env.active_gis

    url = gis.properties.helperServices.hydrology.url

    return _execute_gp_tool(gis, "Watershed", kwargs, param_db, return_values, True, url)
def profile(input_line_features: FeatureSet = {'exceededTransferLimit': False,
                                               'spatialReference': {'latestWkid': 3857, 'wkid': 102100},
                                               'geometryType': 'esriGeometryPolyline',
                                               'fields': [{'name': 'OID', 'type': 'esriFieldTypeOID', 'alias': 'OID'},
                                                          {'name': 'Shape_Length', 'type': 'esriFieldTypeDouble',
                                                           'alias': 'Shape_Length'}], 'displayFieldName': '',
                                               'features': []},
            profile_id_field: str = None,
            dem_resolution: str = None,
            maximum_sample_distance: float = None,
            maximum_sample_distance_units: str = """Meters""",
            gis=None) -> FeatureSet:
    """            
    .. image:: _static/images/elevation_profile/elevation_profile.png 

    The profile method is used to create profiles along input lines from which a profile graph can be created.

    In asynchronous mode, the maximum number of input line features that can be accepted by the task for each request is 1000.

    =====================================    ===========================================================================
    **Argument**                             **Description**
    -------------------------------------    ---------------------------------------------------------------------------
    input_line_features                      Required featureset. The line features that will be profiled over the surface.
    -------------------------------------    ---------------------------------------------------------------------------
    profile_id_field                         Optional string. A unique identifier to tie profiles to their corresponding input line features.
    -------------------------------------    ---------------------------------------------------------------------------
    dem_resolution                           Optional string. The approximate spatial resolution (cell size) of the source elevation data used for the calculation.
                                             The resolution values are an approximation of the spatial resolution of the digital elevation model. While many elevation sources are distributed in units of arc seconds, the keyword is an approximation of those resolutions in meters for easier understanding.
    -------------------------------------    ---------------------------------------------------------------------------
    maximum_sample_distance                  Optional float. The maximum sampling distance along the line to sample elevation values.
    -------------------------------------    ---------------------------------------------------------------------------
    maximum_sample_distance_units            Optional string. The units for the MaximumSampleDistance.
  
                                             Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
    =====================================    ===========================================================================
    
    :Returns: Output Profile as a FeatureSet

    .. code-block:: python

        USAGE EXAMPLE: To create profile of mountains feature.
        elevation = profile(input_line_features=mountain_fs,
                            dem_resolution='FINEST',
                            maximum_sample_distance=500,
                            maximum_sample_distance_units='Meters')
    """
    kwargs = locals()

    param_db = {
        "input_line_features": (FeatureSet, "InputLineFeatures"),
        "profile_id_field": (str, "ProfileIDField"),
        "dem_resolution": (str, "DEMResolution"),
        "maximum_sample_distance": (float, "MaximumSampleDistance"),
        "maximum_sample_distance_units": (str, "MaximumSampleDistanceUnits"),
        "output_profile": (FeatureSet, "Output Profile"),
    }
    return_values = [
        {"name": "output_profile", "display_name": "Output Profile", "type": FeatureSet},
    ]

    if gis is None:
        gis = arcgis.env.active_gis

    url = gis.properties.helperServices.elevation.url

    return _execute_gp_tool(gis, "Profile", kwargs, param_db, return_values, _use_async, url)
Exemple #12
0
def calculate_fields(input_layer,
                     field_name,
                     data_type,
                     expression,
                     track_aware=False,
                     track_fields=None,
                     time_boundary_split=None,
                     time_split_unit=None,
                     time_reference=None,
                     output_name=None,
                     gis=None):
    """
    The Calculate Field task works with a layer to create and populate a
    new field. The output is a new feature layer, that is the same as the
    input features, with the additional field added.

    ==========================   ===============================================================
    **Argument**                 **Description**
    --------------------------   ---------------------------------------------------------------
    input_layer                  required service , The table, point, line or polygon features
                                 containing potential incidents.
    --------------------------   ---------------------------------------------------------------
    field_name                   required string, A string representing the name of the new
                                 field. If the name already exists in the dataset, then a
                                 numeric value will be appended to the field name.
    --------------------------   ---------------------------------------------------------------
    data_type                    required string, the type for the new field.
                                 Values: Date, Double, Integer, String
    --------------------------   ---------------------------------------------------------------
    expression                   required string, An Arcade expression used to calculate the new
                                 field values. You can use any of the Date, Logical,
                                 Mathematical or Text function available with Arcade.
    --------------------------   ---------------------------------------------------------------
    track_aware                  optional boolean, Boolean value denoting if the expression is
                                 track aware.
                                 Default: False
    --------------------------   ---------------------------------------------------------------
    track_fields                 optional string, The fields used to identify distinct tracks.
                                 There can be multiple track_fields. track_fields are only
                                 required when track_aware is true.
    --------------------------   ---------------------------------------------------------------
    time_boundary_split          Optional Int.  A time boundary to detect and incident.
    --------------------------   ---------------------------------------------------------------
    time_split_unit              Optional String.  The unit to detect an incident is `time_boundary_split` is used.
                                 Allowed values: Years, Months, Weeks, Days, Hours, Minutes, Seconds, Milliseconds
    --------------------------   ---------------------------------------------------------------
    time_reference               Optional Datetime. The starting date/time where analysis will
                                 begin from.
    --------------------------   ---------------------------------------------------------------
    output_name                  optional string, The task will create a feature service of the
                                 results. You define the name of the service.
    --------------------------   ---------------------------------------------------------------
    gis                          optional GIS, the GIS on which this tool runs. If not
                                 specified, the active GIS is used.
    ==========================  ================================================================

    :returns:
       Feature Layer
    """
    kwargs = locals()
    tool_name = "CalculateField"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {"f": "json"}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Calculate_Fields_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Calculate Fields')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "field_name": (str, "fieldName"),
        "data_type": (str, "dataType"),
        "expression": (str, "expression"),
        "track_aware": (bool, "trackAware"),
        "track_fields": (str, "trackFields"),
        "time_boundary_split": (int, "timeBoundarySplit"),
        "time_split_unit": (str, "timeBoundarySplitUnit"),
        "time_reference": (datetime.datetime, "timeBoundaryReference"),
        "output_name": (str, "outputName"),
        "output": (_FeatureSet, "output"),
        "context": (str, "context")
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]
    try:
        _execute_gp_tool(gis, tool_name, params, param_db, return_values,
                         _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise

    return
Exemple #13
0
def overlay_data(input_layer,
                 overlay_layer,
                 overlay_type="intersect",
                 output_name=None,
                 gis=None):
    """
    Only available at ArcGIS Enterprise 10.6.1 and later.

    ================  ===============================================================
    **Argument**      **Description**
    ----------------  ---------------------------------------------------------------
    input_layer       required FeatureLayer. The point, line or polygon features.
    ----------------  ---------------------------------------------------------------
    overlay_layer     required FeatureLayer. The features that will be overlaid with the input_layer features.
    ----------------  ---------------------------------------------------------------
    overlay_type      optional string. The type of overlay to be performed.
                      Values: intersect, erase

                      + intersect - Computes a geometric intersection of the input layers. Features or portions of features that overlap in both the inputLayer and overlayLayer layers will be written to the output layer. This is the default.
                      + erase - Only those features or portions of features in the overlay_layer that are not within the features in the input_layer layer are written to the output.
                      + union - Computes a geometric union of the input_layer and overlay_layer. All features and their attributes will be written to the layer.
                      + identity - Computes a geometric intersection of the input features and identity features. Features or portions of features that overlap in both input_layer and overlay_layer will be written to the output layer.
                      + symmetricaldifference - Features or portions of features in the input_layer and overlay_layer that do not overlap will be written to the output layer.

    ----------------  ---------------------------------------------------------------
    output_name       optional string. The task will create a feature service of the results. You define the name of the service.
    ----------------  ---------------------------------------------------------------
    gis               optional GIS. The GIS object where the analysis will take place.
    ================  ===============================================================

    :returns: FeatureLayer
    """
    kwargs = locals()
    tool_name = "OverlayLayers"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {
        "f": "json",
        "outputType": "Input",
        'tolerance': 0,
        'snapToInput': 'false'
    }
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Overlay_Layers_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Overlay Layers')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "overlay_layer": (_FeatureSet, "overlayLayer"),
        "outputType": (str, 'outputType'),
        "overlay_type": (str, "overlayType"),
        "output_name": (str, "OutputName"),
        "context": (str, "context"),
        'tolerance': (int, 'tolerance'),
        "output": (_FeatureSet, "output"),
        'snapToInput': (str, 'snapToInput')
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]
    try:
        _execute_gp_tool(gis, tool_name, params, param_db, return_values,
                         _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise

    return
def forest(input_layer,
           var_prediction,
           var_explanatory,
           trees,
           max_tree_depth=None,
           random_vars=None,
           sample_size=100,
           min_leaf_size=None,
           prediction_type="train",
           features_to_predict=None,
           validation=10,
           importance_tbl=False,
           exp_var_matching=None,
           output_name=None,
           gis=None):
    """
    The 'forest' method is a forest-based classification and regression
    task that creates models and generates predictions using an adaptation of
    Leo Breiman's random forest algorithm, which is a supervised machine
    learning method. Predictions can be performed for both categorical
    variables (classification) and continuous variables (regression).
    Explanatory variables can take the form of fields in the attribute
    table of the training features. In addition to validation of model
    performance based on the training data, predictions can be made to
    another feature dataset.

    The following are examples:

        + Given data on occurrence of seagrass, as well as a number of environmental explanatory
          variables represented as both attributes which has been enriched using a multi-variable grid
          to calculate distances to factories upstream and major ports, future seagrass occurrence can
          be predicted based on future projections for those same environmental explanatory variables.
        + Suppose you have crop yield data at hundreds of farms across the country along with other
          attributes at each of those farms (number of employees, acreage, and so on). Using these
          pieces of data, you can provide a set of features representing farms where you don't have
          crop yield (but you do have all of the other variables), and make a prediction about crop
          yield.
        + Housing values can be predicted based on the prices of houses that have been sold in the
          current year. The sale price of homes sold along with information about the number of
          bedrooms, distance to schools, proximity to major highways, average income, and crime counts
          can be used to predict sale prices of similar homes.


    **Forest Based Classification and Regression is available at ArcGIS Enterprise 10.7.**


    ==========================   ===============================================================
    **Argument**                 **Description**
    --------------------------   ---------------------------------------------------------------
    input_layer                  required FeatureSet, The table, point, line or polygon features
                                 containing potential incidents.
    --------------------------   ---------------------------------------------------------------
    var_prediction               Required dict. The variable from the input_layer parameter
                                 containing the values to be used to train the model, and a
                                 boolean denoting if it's categorical. This field contains known
                                 (training) values of the variable that will be used to predict
                                 at unknown locations.
    --------------------------   ---------------------------------------------------------------
    var_explanatory              Required List. A list of fields representing the explanatory
                                 variables and a Boolean value denoting whether the fields are
                                 categorical. The explanatory variables help predict the value
                                 or category of the `var_prediction` parameter. Use the
                                 categorical parameter for any variables that represent classes
                                 or categories (such as land cover or presence or absence).
                                 Specify the variable as true for any that represent classes or
                                 categories such as land cover or presence or absence and false
                                 if the variable is continuous.
    --------------------------   ---------------------------------------------------------------
    trees                        Required int. The number of trees to create in the forest model.
                                 More trees will generally result in more accurate model
                                 prediction, but the model will take longer to calculate.
    --------------------------   ---------------------------------------------------------------
    max_tree_depth               Optional int. The maximum number of splits that will be made
                                 down a tree. Using a large maximum depth, more splits will be
                                 created, which may increase the chances of overfitting the
                                 model. The default is data driven and depends on the number of
                                 trees created and the number of variables included.
    --------------------------   ---------------------------------------------------------------
    random_vars                  Optional Int. Specifies the number of explanatory variables
                                 used to create each decision tree.Each of the decision trees in
                                 the forest is created using a random subset of the explanatory
                                 variables specified. Increasing the number of variables used in
                                 each decision tree will increase the chances of overfitting
                                 your model particularly if there is one or a couple dominant
                                 variables. A common practice is to use the square root of the
                                 total number of explanatory variables (fields, distances, and
                                 rasters combined) if your variablePredict is numeric or divide
                                 the total number of explanatory variables (fields, distances,
                                 and rasters combined) by 3 if var_prediction is categorical.
    --------------------------   ---------------------------------------------------------------
    sample_size                  Optional int. Specifies the percentage of the input_layer used
                                 for each decision tree. The default is 100 percent of the data.
                                 Samples for each tree are taken randomly from two-thirds of the
                                 data specified.
    --------------------------   ---------------------------------------------------------------
    min_leaf_size                Optional int. The minimum number of observations required to
                                 keep a leaf (that is the terminal node on a tree without
                                 further splits). The default minimum for regression is 5 and
                                 the default for classification is 1. For very large data,
                                 increasing these numbers will decrease the run time of the
                                 tool.
    --------------------------   ---------------------------------------------------------------
    prediction_type              Specifies the operation mode of the tool. The tool can be run to
                                 train a model to only assess performance, or train a model and
                                 predict features. Prediction types are as follows:

                                    + Train - This is the default. A model will be trained, but
                                      no predictions will be generated. Use this option to
                                      assess the accuracy of your model before generating
                                      predictions. This option will output model diagnostics in
                                      the messages window and a chart of variable importance.
                                    + TrainAndPredict - Predictions or classifications will be
                                      generated for features. Explanatory variables must be
                                      provided for both the training features and the features
                                      to be predicted. The output of this option will be a
                                      feature service, model diagnostics, and an optional
                                      table of variable importance.
    --------------------------   ---------------------------------------------------------------
    features_to_predict          Optional dict. The variable from the `input_layer` parameter
                                 containing the values to be used to train the model, and a
                                 boolean denoting if it's categorical. This field contains known
                                 (training) values of the variable that will be used to predict
                                 at unknown locations.
    --------------------------   ---------------------------------------------------------------
    validation                   Optional Int. Specifies the percentage (between 10 percent
                                 and 50 percent) of inFeatures to reserve as the test dataset
                                 for validation. The model will be trained without this random
                                 subset of data, and the observed values for those features will
                                 be compared to the predicted value. The default is 10 percent.
    --------------------------   ---------------------------------------------------------------
    importance_tbl               Optional Boolean. Specifies whether an output table will be
                                 generated that contains information describing the importance
                                 of each explanatory variable used in the model created.
    --------------------------   ---------------------------------------------------------------
    exp_var_matching             A list of fields representing the explanatory variables and a
                                 boolean values denoting if the fields are categorical. The
                                 explanatory variables help predict the value or category of the
                                 variable_predict. Use the categorical parameter for any
                                 variables that represent classes or categories (such as
                                 landcover or presence or absence). Specify the variable as
                                 true for any that represent classes or categories such as
                                 landcover or presence or absence and false if the variable is
                                 continuous.

                                 Syntax: [{"fieldName":"<explanatory field name>", "categorical":true},

                                    + fieldname is the name of the field in the inFeatures used
                                      to predict the variable_predict.
                                    + categorical is one of: true or false. A string field should
                                      always be true, and a continue value should always be set as false.
    --------------------------   ---------------------------------------------------------------
    output_name                  optional String, The task will create a feature service of the
                                 results. You define the name of the service.
    --------------------------   ---------------------------------------------------------------
    gis                          optional GIS, the GIS on which this tool runs. If not
                                 specified, the active GIS is used.
    ==========================   ===============================================================

    :returns:
       Output feature layer item


    """
    allowed_prediction_types = {
        'train': "Train",
        'trainandpredict': 'TrainAndPredict'
    }
    if str(prediction_type).lower() not in allowed_prediction_types:
        raise ValueError("Invalid Prediction type.")
    else:
        prediction_type = allowed_prediction_types[prediction_type.lower()]

    kwargs = locals()

    gis = _arcgis.env.active_gis if gis is None else gis

    if gis.version < [7]:
        return None
    url = gis.properties.helperServices.geoanalytics.url

    params = {}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Forest Based Regression_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(
        gis, output_name, output_service_name,
        'Forest Based Classification And Regression')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inFeatures"),
        "prediction_type": (str, "predictionType"),
        "features_to_predict": (_FeatureSet, "featuresToPredict"),
        "var_prediction": (dict, "variablePredict"),
        "var_explanatory": (list, "explanatoryVariables"),
        "exp_var_matching": (list, "explanatoryVariableMatching"),
        "return_importance_table": (bool, "returnVariableOfImportanceTable"),
        "trees": (int, "numberOfTrees"),
        "max_tree_depth": (int, "maximumTreeDepth"),
        "min_leaf_size": (int, "minimumLeafSize"),
        "sample_size": (int, "sampleSize"),
        "random_vars": (int, "randomVariables"),
        "validation": (float, "percentageForValidation"),
        "output_name": (str, "outputTrainedName"),
        "context": (str, "context"),
        "importance_tbl": (bool, "createVariableOfImportanceTable"),
        "output_trained": (_FeatureSet, "outputTrained"),
        "output_predicted": (_FeatureSet, "outputPredicted"),
        "variable_of_importance": (_FeatureSet, "variableOfImportance"),
    }
    return_values = [{
        "name": 'output_trained',
        "display_name": "Output Features",
        "type": _FeatureSet
    }, {
        "name": "output_predicted",
        "display_name": "Output Predicted",
        "type": _FeatureSet
    }, {
        "name": "variable_of_importance",
        "display_name": "Variable of Importance",
        "type": _FeatureSet
    }]
    if features_to_predict is None and prediction_type == 'TrainAndPredict':
        kwargs["features_to_predict"] = input_layer
        #param_db.pop("features_to_predict")
    try:
        res = _execute_gp_tool(gis, "ForestBasedClassificationAndRegression",
                               params, param_db, return_values, _use_async,
                               url, True)
        return output_service
    except:
        output_service.delete()
        raise

    return
Exemple #15
0
def find_similar_locations(input_layer,
                           search_layer,
                           analysis_fields,
                           most_or_least_similar="MostSimilar",
                           match_method="AttributeValues",
                           number_of_results=10,
                           append_fields=None,
                           output_name=None,
                           gis=None,
                           context=None,
                           future=False,
                           return_tuple=False):
    """
    .. image:: _static/images/find_similar_locations/find_similar_locations.png 

    The ``find_similar_locations`` task measures the similarity of candidate locations to one or more reference locations.

    Based on criteria you specify, ``find_similar_locations`` can answer questions such as the following:

        * Which of your stores are most similar to your top performers with regard to customer profiles?
        * Based on characteristics of villages hardest hit by the disease, which other villages are high risk?
        * To answer questions such as these, you provide the reference locations (the ``input_layer`` parameter), 
          the candidate locations (the ``search_layer`` parameter), and the fields representing the criteria 
          you want to match. For example, the ``input_layer`` might be a layer containing your top performing stores 
          or the villages hardest hit by the disease. The ``search_layer`` contains your candidate locations to search. 
          This might be all of your stores or all other villages. Finally, you supply a list of fields to use for 
          measuring similarity. The ``find_similar_locations`` task will rank all of the candidate locations by how 
          closely they match your reference locations across all of the fields you have selected.
 
    ==========================   ===============================================================
    **Argument**                 **Description**
    --------------------------   ---------------------------------------------------------------
    input_layer                  Required layer. The ``input_layer`` contains one or more reference locations 
                                 against which features in the ``search_layer`` will be evaluated for similarity. 
                                 For example, the ``input_layer`` might contain your top performing stores or the 
                                 villages hardest hit by a disease. See :ref:`Feature Input<FeatureInput>`.
                                  
                                 It is not uncommon for ``input_layer`` and ``search_layer`` to be the same feature service. 
                                 For example, the feature service contains locations of all stores, one of which 
                                 is your top performing store. If you want to rank the remaining stores from most 
                                 to least similar to your top performing store, you can provide a filter for both 
                                 ``input_layer`` and ``search_layer``. The filter on ``input_layer`` would select the top performing 
                                 store, while the filter on ``search_layer`` would select all stores except for the top 
                                 performing store. You can use the optional filter parameter to specify reference locations.

                                 If there is more than one reference location, similarity will be based on averages 
                                 for the fields you specify in the ``analysis_fields`` parameter. For example, if there 
                                 are two reference locations and you are interested in matching population, the task 
                                 will look for candidate locations in ``search_layer`` with populations that are most 
                                 like the average population for both reference locations. If the values for the 
                                 reference locations are 100 and 102, for example, the task will look for candidate 
                                 locations with populations near 101. Consequently, you will want to use fields for 
                                 the reference locations fields that have similar values. If, for example, the 
                                 population values for one reference location is 100 and the other is 100,000, 
                                 the tool will look for candidate locations with population values near the average 
                                 of those two values: 50,050. Notice that this averaged value is nothing like the 
                                 population for either of the reference locations. 
    --------------------------   ---------------------------------------------------------------  
    search_layer                 Required layer. The layer containing candidate locations that will be 
                                 evaluated against the reference locations.  See :ref:`Feature Input<FeatureInput>`.
    --------------------------   ---------------------------------------------------------------      
    analysis_fields              Required string. A list of fields whose values are used to determine similarity. 
                                 They must be numeric fields, and the fields must exist on both the ``input_layer`` 
                                 and the ``search_layer``. Depending on the ``match_method`` selected, the task will 
                                 find features that are most similar based on values or profiles of the fields.
    --------------------------   ---------------------------------------------------------------      
    most_or_least_similar        Optional string. The features you want to be returned. You can search for 
                                 features that are either most similar or least similar to the ``input_layer``, 
                                 or search both the most and least similar.
                                 
                                 Choice list:['MostSimilar', 'LeastSimilar', 'Both']

                                 The default value is 'MostSimilar'.
    --------------------------   ---------------------------------------------------------------
    match_method                 Optional string. The method you select determines how matching is determined. 

                                 Choice list:['AttributeValues', 'AttributeProfiles']    

                                    * The ``AttributeValues`` method uses the squared differences of standardized values. 
                                    * The ``AttributeProfiles`` method uses cosine similarity mathematics to compare the profile 
                                      of standardized values. Using ``AttributeProfiles`` requires the use of at least two analysis fields.   

                                 The default value is 'AttributeValues'.
    --------------------------   ---------------------------------------------------------------    
    number_of_results            Optional integer. The number of ranked candidate locations output 
                                 to ``similar_result_layer``. If ``number_of_results`` is not set, the 10 
                                 locations will be returned. The maximum number of results is 10000.

                                 The default value is 10.
    --------------------------   ---------------------------------------------------------------   
    append_fields                Optional string. Optionally add fields to your data from your search layer. 
                                 By default, all fields from the search layer are appended.
    --------------------------   ---------------------------------------------------------------      
    output_name                  Optional string. The task will create a feature service of the results. 
                                 You define the name of the service.
    --------------------------   ---------------------------------------------------------------      
    gis                          Optional GIS. The GIS on which this tool runs. If not specified, the active GIS is used.
    --------------------------   ---------------------------------------------------------------      
    context                      Optional dict. The context parameter contains additional settings that affect task execution. For this task, there are four settings:

                                 #. Extent (``extent``) - A bounding box that defines the analysis area. Only those features that intersect the bounding box will be analyzed.
                                 #. Processing spatial reference (``processSR``) - The features will be projected into this coordinate system for analysis.
                                 #. Output spatial reference (``outSR``) - The features will be projected into this coordinate system after the analysis to be saved. The output spatial reference for the spatiotemporal big data store is always WGS84.
                                 #. Data store (``dataStore``) - Results will be saved to the specified data store. The default is the spatiotemporal big data store.
    --------------------------   ---------------------------------------------------------------      
    future                       Optional boolean. If 'True', a GPJob is returned instead of results. The GPJob can be queried on the status of the execution.

                                 The default value is 'False'.
    --------------------------   ---------------------------------------------------------------      
    return_tuple                 Optional boolean. If 'True', a named tuple with multiple output keys is returned.
                                 
                                 The default value is 'False'. 
    ==========================   ===============================================================

    :returns: named tuple with the following keys if ``return_tuple`` is set to 'True':

      "output" : feature layer

      "process_info" : list

    else returns a feature layer of the results.

    .. code-block:: python

            # Usage Example: To find potential retail locations based on the current top locations and their attributes.
            
            similar_location_result = find_similar_locations(input_layer=stores_layer,
                                                             search_layer=locations,
                                                             analysis_fields="median_income, population, nearest_competitor",
                                                             most_or_least_similar="MostSimilar", 
                                                             match_method="AttributeValues", 
                                                             number_of_results=50, 
                                                             output_name="similar_locations")
    """
    kwargs = locals()

    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url

    params = {}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Similar Locations_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Find Similar Locations')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    if context is not None:
        params["context"] = context
    else:
        _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "search_layer": (_FeatureSet, "searchLayer"),
        "analysis_fields": (str, "analysisFields"),
        "most_or_least_similar": (str, "mostOrLeastSimilar"),
        "match_method": (str, "matchMethod"),
        "number_of_results": (int, "numberOfResults"),
        "append_fields": (str, "appendFields"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "return_tuple": (bool, "returnTuple"),
        "output": (_FeatureSet, "Output Features"),
        "process_info": (list, "processInfo")
    }
    return_values = [{
        "name": "output",
        "display_name": "Output Features",
        "type": _FeatureSet
    }, {
        "name": "process_info",
        "display_name": "Process Information",
        "type": list
    }]
    try:
        if future:
            gpjob = _execute_gp_tool(gis,
                                     "FindSimilarLocations",
                                     params,
                                     param_db,
                                     return_values,
                                     _use_async,
                                     url,
                                     True,
                                     future=future)
            return GAJob(gpjob=gpjob, return_service=output_service)
        res = _execute_gp_tool(gis,
                               "FindSimilarLocations",
                               params,
                               param_db,
                               return_values,
                               _use_async,
                               url,
                               True,
                               future=future)

        if return_tuple:
            return res
        else:
            return output_service
    except:
        output_service.delete()
        raise
Exemple #16
0
def detect_incidents(input_layer,
                     track_fields,
                     start_condition_expression,
                     end_condition_expression=None,
                     output_mode="AllFeatures",
                     time_boundary_split=None,
                     time_split_unit=None,
                     time_reference=None,
                     output_name=None,
                     gis=None,
                     context=None,
                     future=False):
    """

    .. image:: _static/images/detect_incidents/detect_incidents.png 

    The ``detect_incidents`` task works with a time-enabled layer of points,
    lines, areas, or tables that represents an instant in time. Using
    sequentially ordered features, called tracks, this tool determines
    which features are incidents of interest. Incidents are determined by
    conditions that you specify. First, the tool determines which features
    belong to a track using one or more fields. Using the time at each
    feature, the tracks are ordered sequentially and the incident condition
    is applied. Features that meet the starting incident condition are
    marked as an incident. You can optionally apply an ending incident
    condition; when the end condition is 'True', the feature is no longer
    an incident. The results will be returned with the original features
    with new columns representing the incident name and indicate which
    feature meets the incident condition. You can return all original
    features, only the features that are incidents, or all of the features
    within tracks where at least one incident occurred.

    For example, suppose you have GPS measurements of hurricanes every 10
    minutes. Each GPS measurement records the hurricane's name, location,
    time of recording, and wind speed. Using these fields, you could create
    an incident where any measurement with a wind speed greater than 208
    km/h is an incident titled Catastrophic. By not setting an end
    condition, the incident would end if the feature no longer meets the
    start condition (wind speed slows down to less than 208).

    Using another example, suppose you were monitoring concentrations of a
    chemical in your local water supply using a field called
    contanimateLevel. You know that the recommended levels are less than
    0.01 mg/L, and dangerous levels are above 0.03 mg/L. To detect
    incidents, where a value above 0.03mg/L is an incident, and remains an
    incident until contamination levels are back to normal, you create an
    incident using a start condition of contanimateLevel > 0.03 and an end
    condition of contanimateLevel < 0.01. This will mark any sequence where
    values exceed 0.03mg/L until they return to a value less than 0.01.

    ==========================   ===============================================================
    **Argument**                 **Description**
    --------------------------   ---------------------------------------------------------------
    input_layer                  Required layer. The table, point, line or polygon features 
                                 containing potential incidents. See :ref:`Feature Input<FeatureInput>`.
    --------------------------   ---------------------------------------------------------------
    track_fields                 Required string. The fields used to identify distinct tracks.
                                 There can be multiple ``track_fields``.
    --------------------------   ---------------------------------------------------------------
    start_condition_expression   Required string. The condition used to identify incidents. If there
                                 is no ``end_condition_expression`` specified, any feature
                                 that meets this condition is an incident. If there
                                 is an end condition, any feature that meets the
                                 ``start_condition_expression`` and does not meet the
                                 ``end_condition_expression`` is an incident.
                                 The expressions are Arcade expressions.
    --------------------------   ---------------------------------------------------------------
    end_condition_expression     Optional string. The condition used to identify incidents. If there is
                                 no ``end_condition_expression`` specified, any feature that
                                 meets this condition is an incident. If there is an
                                 end condition, any feature that meets the
                                 ``start_condition_expression`` and does not meet the
                                 ``end_condition_expression`` is an incident. This is an
                                 Arcade expression.
    --------------------------   ---------------------------------------------------------------
    output_mode                  Optional string. Determines which features are returned. 

                                 Choice list: [AllFeatures', 'Incidents']

                                 - ``AllFeatures`` - All of the input features are returned.
                                 - ``Incidents`` - Only features that were found to be incidents
                                   are returned.

                                 The default value is 'AllFeatures'.
    --------------------------   ---------------------------------------------------------------
    time_boundary_split          Optional integer. A time boundary to detect and incident. A time 
                                 boundary allows your to analyze values within a defined time span. 
                                 For example, if you use a time boundary of 1 day, starting on January 
                                 1st, 1980 tracks will be analyzed 1 day at a time. The time boundary 
                                 parameter was introduced in ArcGIS Enterprise 10.7.

                                 The ``time_boundary_split`` parameter defines the scale of the time boundary. 
                                 In the case above, this would be 1. See the portal documentation for 
                                 this tool to learn more.
    --------------------------   ---------------------------------------------------------------
    time_split_unit              Optional string. The unit to detect an incident is `time_boundary_split` is used.

                                 Choice list: ['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds'].
    --------------------------   ---------------------------------------------------------------
    time_reference               Optional datetime.detetime. The starting date/time where analysis will
                                 begin from.
    --------------------------   ---------------------------------------------------------------
    output_name                  optional string, The task will create a feature service of the
                                 results. You define the name of the service.
    --------------------------   ---------------------------------------------------------------
    gis                          optional GIS, the GIS on which this tool runs. If not
                                 specified, the active GIS is used.
    --------------------------   ---------------------------------------------------------------
    context                      Optionl dict. The context parameter contains additional settings that affect task execution. For this task, there are four settings:

                                 #. Extent (``extent``) - A bounding box that defines the analysis area. Only those features that intersect the bounding box will be analyzed.
                                 #. Processing spatial reference (``processSR``) - The features will be projected into this coordinate system for analysis.
                                 #. Output spatial reference (``outSR``) - The features will be projected into this coordinate system after the analysis to be saved. The output spatial reference for the spatiotemporal big data store is always WGS84.
                                 #. Data store (``dataStore``) - Results will be saved to the specified data store. The default is the spatiotemporal big data store.
    --------------------------   ---------------------------------------------------------------
    future                       optional boolean. If True, a GPJob is returned instead of
                                 results. The GPJob can be queried on the status of the execution.

                                 The default value is 'False'.
    ==========================   ===============================================================

    :returns: result_layer : Output Features as feature layer collection item.

    .. code-block:: python

            # Usage Example: This example finds when and where snowplows were moving slower than 10 miles per hour by calculating the mean of a moving window of five speed values.
            
            arcgis.env.verbose = True # set environment
            arcgis.env.defaultAggregations = True # set environment

            delay_incidents = output = detect_incidents(input_layer=snowplows, 
                                                        track_fields="plowID, dayOfYear", 
                                                        start_condition_expression="Mean($track.field["speed"].window(-5, 0)) < 10", 
                                                        output_name="Slow_Plow_Incidents")
    """
    kwargs = locals()
    tool_name = "DetectIncidents"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {"f": "json"}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Detect_Incidents_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Detect Track Incidents')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    if context is not None:
        params["context"] = context
    else:
        _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "track_fields": (str, "trackFields"),
        "start_condition_expression": (str, "startConditionExpression"),
        "end_condition_expression": (str, "endConditionExpression"),
        "output_mode": (str, "outputMode"),
        "time_boundary_split": (int, "timeBoundarySplit"),
        "time_split_unit": (str, "timeBoundarySplitUnit"),
        "time_reference": (datetime.datetime, "timeBoundaryReference"),
        "output_name": (str, "outputName"),
        "output": (_FeatureSet, "output"),
        "context": (str, "context")
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]
    try:
        if future:
            gpjob = _execute_gp_tool(gis,
                                     tool_name,
                                     params,
                                     param_db,
                                     return_values,
                                     _use_async,
                                     url,
                                     True,
                                     future=future)
            return GAJob(gpjob=gpjob, return_service=output_service)
        _execute_gp_tool(gis,
                         tool_name,
                         params,
                         param_db,
                         return_values,
                         _use_async,
                         url,
                         True,
                         future=future)
        return output_service
    except:
        output_service.delete()
        raise

    return
Exemple #17
0
def geocode_locations(input_layer,
                      country=None,
                      category=None,
                      include_attributes=True,
                      locator_parameters=None,
                      output_name=None,
                      geocode_service=None,
                      geocode_parameters=None,
                      gis=None,
                      context=None,
                      future=False):
    """
    .. image:: _static/images/geocode_locations/geocode_locations.png 

    The ``geocode_locations`` task geocodes a table from a big data file share. 
    The task uses a geocode utility service configured with your portal. If you 
    do not have a geocode utility service configured, talk to your administrator. 
    `Learn more about configuring a locator 
    service <https://enterprise.arcgis.com/en/portal/latest/administer/windows/configure-portal-to-geocode-addresses.htm>`_.

    When preparing to use the Geocode Location task be sure to review `Best Practices 
    for geocoding with GeoAnalytics Server <https://enterprise.arcgis.com/en/portal/latest/use/geoanalytics-geocoding-best-practices.htm>`_.

    ==========================   ===============================================================
    **Argument**                 **Description**
    --------------------------   ---------------------------------------------------------------
    input_layer                  Required layer. The tabular input that will be geocoded. See :ref:`Feature Input<FeatureInput>`.
    --------------------------   ---------------------------------------------------------------
    country                      Optional string. If all your data is in one country, this helps
                                 improve performance for locators that accept that variable.
    --------------------------   ---------------------------------------------------------------
    category                     Optional string. Enter a category for more precise geocoding
                                 results, if applicable. Some geocoding services do not support
                                 category, and the available options depend on your geocode service.
    --------------------------   ---------------------------------------------------------------
    include_attributes           Optional boolean. A Boolean value to return the output fields 
                                 from the geocoding service in the results. To output all available 
                                 output fields, set this value to 'True'. Setting the value to false will 
                                 return your original data and with geocode coordinates. Some geocoding 
                                 services do not support output fields, and the available options depend 
                                 on your geocode service.
    --------------------------   ---------------------------------------------------------------
    locator_parameters           Optional dict. Additional parameters specific to your locator.
    --------------------------   ---------------------------------------------------------------
    output_name                  Optional string. The task will create a feature service of the
                                 results. You define the name of the service.
    --------------------------   ---------------------------------------------------------------
    geocode_service              Optional string or Geocoder. The URL of the geocode service 
                                 that you want to geocode your addresses against. The URL must end in 
                                 geocodeServer and allow batch requests. The geocode service must be 
                                 configured to allow for batch geocoding. For more information, 
                                 see `Configuring batch geocoding <https://enterprise.arcgis.com/en/portal/latest/administer/windows/configure-portal-to-geocode-addresses.htm>`_
    --------------------------   ---------------------------------------------------------------
    geocode_parameters           optional dict. This includes parameters that help parse
                                 the input data, as well the field lengths and a field mapping.
                                 This value is the output from the AnalyzeGeocodeInput tool
                                 available on your server designated to geocode. It is important
                                 to inspect the field mapping closely and adjust them accordingly
                                 before submitting your job, otherwise your geocoding results may
                                 not be accurate. It is recommended to use the output from
                                 AnalyzeGeocodeInput and modify the field mapping instead of
                                 constructing this JSON by hand.

                                 **Values**

                                 **field_info** - A list of triples with the field names of your input
                                 data, the field type (usually TEXT), and the allowed length
                                 (usually 255).
                                 Example: [['ObjectID', 'TEXT', 255], ['Address', 'TEXT', 255],
                                          ['Region', 'TEXT', 255], ['Postal', 'TEXT', 255]]

                                 **header_row_exists** - Enter true or false.

                                 **column_names** - Submit the column names of your data if your data
                                 does not have a header row.

                                 **field_mapping** - Field mapping between each input field and
                                 candidate fields on the geocoding service.
                                 Example: [['ObjectID', 'OBJECTID'], ['Address', 'Address'],
                                          ['Region', 'Region'], ['Postal', 'Postal']]
    --------------------------   ---------------------------------------------------------------
    gis                          Optional GIS. The GIS on which this tool runs. If not
                                 specified, the active GIS is used.
    --------------------------   ---------------------------------------------------------------
    context                      Optional dict. Context contains additional settings that affect task execution. 
                                 For this task, there are three settings:

                                 Processing spatial reference (``processSR``) - The features will be projected into this coordinate system for analysis.
                                 Output spatial reference (``outSR``) - The features will be projected into this coordinate system after the analysis to be saved. The output spatial reference for the spatiotemporal big data store is always WGS84.
                                 Data store (``dataStore``) - Results will be saved to the specified data store. The default is the spatiotemporal big data store.
    --------------------------   ---------------------------------------------------------------
    future                       Optional boolean. If True, a GPJob is returned instead of
                                 results. The GPJob can be queried on the status of the execution.
    ==========================   ===============================================================


    :returns: Feature Layer

    .. code-block:: python

            # Usage Example: To geocode a big data file share of mailing addresses in the United States Northwest.

            geocode_server = "https://mymachine.domain.com/server/rest/services/USALocator/GeocodeServer"
            geo_parameters = {"field_info": "[('ObjectID', 'TEXT', 255), ('Street', 'TEXT', 255), ('City', 'TEXT', 255), ('Region', 'TEXT', 255), ('State', 'TEXT', 255)]", "column_names": "", "file_type": "table", "header_row_exists": "true", "field_mapping": "[[\"Street\", \"Street\"], [\"City\", \"City\"], [\"State\", \"State\"], [\"ZIP\", \"ZIP\"]]"}
            geocode_result = find_locations.geocode_locations(input_layer=NW_addresses, 
                                                        output_name="geocoded_NW_USA",
                                                        geocode_service=geocode_server,
                                                        geocode_parameters = geo_parameters)

    """
    from arcgis.features.layer import Layer
    from arcgis.gis import Item
    from arcgis.geocoding._functions import Geocoder
    kwargs = locals()
    tool_name = "GeocodeLocations"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {"f": "json"}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value
    if output_name is None:
        output_service_name = 'Geocoding_Results_' + _id_generator()
        output_service_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    if isinstance(input_layer, str):
        input_layer = {'url': input_layer}
    elif isinstance(input_layer, Item):
        input_layer = input_layer.layers[0]._lyr_dict
        if 'type' in input_layer:
            input_layer.pop('type')
    elif isinstance(input_layer, Layer):
        input_layer = input_layer._lyr_dict
        if 'type' in input_layer:
            input_layer.pop('type')
    elif isinstance(input_layer, dict) and \
         not "url" in input_layer:
        raise ValueError("Invalid Input: input_layer dictionary" + \
                         " must have format {'url' : <url>}")
    elif isinstance(input_layer, dict) and "url" in input_layer:
        pass
    else:
        raise ValueError("Invalid input_layer input. Please pass an Item, " + \
                         "Big DataStore Layer or Big DataStore URL to geocode.")

    if geocode_service is None:
        for service in gis.properties.helperServices.geocode:
            if 'batch' in service and service['batch'] == True:
                geocode_service_url = service["url"]
                break
        if geocode_service_url is None:
            raise ValueError("A geocoder with batch enabled must be configured" + \
                             " with this portal to use this service.")
        params['geocode_service_url'] = geocode_service_url
    elif isinstance(geocode_service, Geocoder):
        geocode_service = geocode_service.url
        params['geocode_service_url'] = geocode_service_url
    elif isinstance(geocode_service, str):
        params['geocode_service_url'] = geocode_service
    else:
        raise ValueError("geocode_service_url must be a string or GeoCoder")

    if geocode_parameters is None:
        from arcgis.geoprocessing._tool import Toolbox
        analyze_geocode_url = gis.properties.helperServices.asyncGeocode.url
        tbx = Toolbox(url=analyze_geocode_url, gis=gis)
        geocode_parameters = tbx.analyze_geocode_input(
            input_table=input_layer, geocode_service_url=geocode_service_url)
        params['geocode_parameters'] = geocode_parameters
    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Geocoded Locations')
    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "geocode_service_url": (str, "geocodeServiceURL"),
        "geocode_parameters": (str, "geocodeParameters"),
        "country": (str, "sourceCountry"),
        "category": (str, "category"),
        "include_attributes": (bool, "includeAttributes"),
        "locator_parameters": (str, "locatorParameters"),
        "output_name": (str, "outputName"),
        "output": (_FeatureSet, "output"),
        "context": (str, "context")
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]
    try:
        if future:
            gpjob = _execute_gp_tool(gis,
                                     tool_name,
                                     params,
                                     param_db,
                                     return_values,
                                     _use_async,
                                     url,
                                     True,
                                     future=future)
            return GAJob(gpjob=gpjob, return_service=output_service)
        res = _execute_gp_tool(gis,
                               tool_name,
                               params,
                               param_db,
                               return_values,
                               _use_async,
                               url,
                               True,
                               future=future)
        return output_service
    except:
        output_service.delete()
        raise
    return
def enrich_from_grid(input_layer,
                     grid_layer,
                     enrichment_attributes=None,
                     output_name=None,
                     gis=None,
                     context=None,
                     future=False):
    """
    .. image:: _static/images/enrich_from_grid/enrich_from_grid.png 

    The Enrich From Multi-Variable Grid task joins attributes from a multivariable grid to a point layer. 
    The multivariable grid must be created using the ``build_multivariable_grid`` task. Metadata from the 
    multivariable grid is used to efficiently enrich the input point features, making it faster than the 
    Join Features task. Attributes in the multivariable grid are joined to the input point features when 
    the features intersect the grid.

    The attributes in the multivariable grid can be used as explanatory variables when modeling spatial 
    relationships with your input point features, and this task allows you to join those attributes to 
    the point features quickly.

    .. note::
        Only available at ArcGIS Enterprise 10.7 and later.

    ======================  ===============================================================
    **Argument**            **Description**
    ----------------------  ---------------------------------------------------------------
    input_layer             Required layer. The point features that will be enriched
                            by the multi-variable grid. See :ref:`Feature Input<FeatureInput>`.
    ----------------------  ---------------------------------------------------------------
    grid_layer              Required layer. The multivariable grid layer created using the Build Multi-Variable Grid task. 
                            See :ref:`Feature Input<FeatureInput>`.
    ----------------------  ---------------------------------------------------------------
    enrichment_attributes   optional string. A list of fields in the multi-variable grid
                            that will be joined to the input point features. If the
                            attributes are not provided, all fields in the multi-variable
                            grid will be joined to the input point features.
    ----------------------  ---------------------------------------------------------------
    output_name             optional string. The task will create a feature service of the
                            results. You define the name of the service.
    ----------------------  ---------------------------------------------------------------
    gis                     optional GIS. The GIS object where the analysis will take place.
    ----------------------  ---------------------------------------------------------------
    context                 Optional dict. The context parameter contains additional settings that affect task execution. For this task, there are five settings:

                            #. Extent (``extent``) - A bounding box that defines the analysis area. Only those features that intersect the bounding box will be analyzed.
                            #. Processing spatial reference (``processSR``) - The features will be projected into this coordinate system for analysis.
                            #. Output spatial reference (``outSR``) - The features will be projected into this coordinate system after the analysis to be saved. The output spatial reference for the spatiotemporal big data store is always WGS84.
                            #. Data store (``dataStore``) - Results will be saved to the specified data store. The default is the spatiotemporal big data store.
                            #. Default aggregation styles (``defaultAggregationStyles``) - If set to 'True', results will have square, hexagon, and triangle aggregation styles enabled on results map services.
    ----------------------  ---------------------------------------------------------------
    future                  optional boolean. If 'True', a GPJob is returned instead of
                            results. The GPJob can be queried on the status of the execution.

                            The default value is 'False'.
    ======================  ===============================================================

    :returns: result_layer : Output Features as feature layer item.

    .. code-block:: python

            # Usage Example: To enrich a layer of crime data with a multivariable grid containing demographic information.

            enrich_result = enrich_from_grid(input_layer=crime_lyr, 
                                             grid_layer=mvg_layer,
                                             output_name="chicago_crimes_enriched")

            
    """
    kwargs = locals()
    tool_name = "EnrichFromMultiVariableGrid"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {
        "f": "json",
    }
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Enrich_Grid_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Enrich Grid Layers')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    if context is not None:
        params["context"] = context
    else:
        _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputFeatures"),
        "grid_layer": (_FeatureSet, "gridLayer"),
        "enrichment_attributes": (str, "enrichAttributes"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "output"),
    }

    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]

    try:
        if future:
            gpjob = _execute_gp_tool(gis,
                                     tool_name,
                                     params,
                                     param_db,
                                     return_values,
                                     _use_async,
                                     url,
                                     True,
                                     future=future)
            return GAJob(gpjob=gpjob, return_service=output_service)
        _execute_gp_tool(gis,
                         tool_name,
                         params,
                         param_db,
                         return_values,
                         _use_async,
                         url,
                         True,
                         future=future)
        return output_service
    except:
        output_service.delete()
        raise
    return
Exemple #19
0
def trace_downstream(input_points, point_id_field=None, source_database='Finest', generalize=False,
                     gis=None):
    """
    
    .. image:: _static/images/trace_downstream/trace_downstream.png   

    The ``trace_downstream`` method delineates the downstream path from a specified location. 
    Esri-curated elevation data is used to create an output polyline delineating the flow path 
    downstream from the specified input location. This method accesses a service using multiple 
    source databases which are available for different geographic areas and at different 
    spatial scales.
    
    ==================     ====================================================================
    **Argument**           **Description**
    ------------------     --------------------------------------------------------------------
    input_points           Required FeatureSet or Spatially Enabled DataFrame
                           Points delineating the starting location to calculate the downstream 
                           location from. See :ref:`Feature Input<FeatureInput>`.
    ------------------     --------------------------------------------------------------------
    point_id_field         Optional string. Field used to identify the feature from the source data. This is 
                           useful for relating the results back to the original source data.
    ------------------     --------------------------------------------------------------------
    source_database        Optional string. Keyword indicating the source data that will be used in the
                           analysis. This keyword is an approximation of the spatial resolution
                           of the digital elevation model used to build the foundation
                           hydrologic  database. Since many elevation sources are distributed
                           with units of  arc seconds, this keyword is an approximation in
                           meters for easier understanding.


                           - Finest: Finest resolution available at each location from all
                             possible data sources.
                           - 10m: The hydrologic source was built from 1/3 arc second -
                             approximately 10 meter resolution, elevation data.
                           - 30m: The hydrologic source was built from 1 arc second -
                             approximately 30 meter resolution, elevation data.
                           - 90m: The hydrologic source was built from 3 arc second -
                             approximately 90 meter resolution, elevation data.

                           The default value is 'Finest'.
    ------------------     --------------------------------------------------------------------
    generalize             Optional boolean. Determines if the output downstream trace lines will be smoothed 
                           into simpler lines.

                           The default value is False.
    ------------------     --------------------------------------------------------------------
    gis                    Optional GIS Object instance. If not provided as input, a GIS object instance logged into an 
                           active portal with elevation helper services defined must already 
                           be created in the active Python session. A GIS object instance can 
                           also be optionally explicitly passed in through this parameter.
    ==================     ====================================================================

    :return: FeatureSet

    .. code-block:: python

        # USAGE EXAMPLE: To trace downstream path from from the outlet points.
        path = trace_downstream(input_points=fs,
                                source_database='Finest',
                                generalize=False)
    """
    kwargs = locals()

    param_db = {
        "input_points": (FeatureSet, "InputPoints"),
        "point_id_field": (str, 'PointIDField'),
        "source_database": (str, 'SourceDatabase'),
        "generalize": (str, 'Generalize'),
        "output_trace_line": (FeatureSet, "Output Trace Line"),
    }

    return_values = [
        {"name": "output_trace_line", "display_name": "Output Trace Line", "type": FeatureSet},
    ]

    # use helper function to evaluate the input points and convert them, if necessary, to a FeatureSet
    input_fs = _evaluate_spatial_input(input_points)

    if input_fs.geometry_type != 'esriGeometryPoint':
        raise Exception('input_points FeatureSet must be point esriGeometryPoint, not {}'.format(input_fs.geometry_type))

    input_fields = input_fs.fields
    if point_id_field and point_id_field not in [f['name'] for f in input_fields] and len(input_fields):
        input_fields_str = ','.join(input_fields)
        raise Exception('The provided point_id_field {} does not appear to be in the input_points FeatureSet fields - {}'.format(point_id_field, input_fields_str))

    if source_database not in ['Finest', '10m', '30m', '90m']:
        raise Exception('source_database must be either "Finest", "10m", "30m", or "90m". {} does not appear to be one of these.'.format(source_database))

    if gis is None and arcgis.env.active_gis is None:
        raise Exception('GIS must be defined either by directly passing in a GIS object created using credentials, or one must already be created in the active Python session.')
    elif gis is None:
        gis = arcgis.env.active_gis

    url = gis.properties.helperServices.hydrology.url

    return _execute_gp_tool(gis, "TraceDownstream", kwargs, param_db, return_values, True, url)
Exemple #20
0
def merge_layers(input_layer,
                 merge_layer,
                 merge_attributes=None,
                 output_name=None,
                 gis=None):
    """
    The Merge Layers task combines two feature layers to create a single output layer. The tool
    requires that both layers have the same geometry type (tabular, point, line, or polygon). If
    time is enabled on one layer, the other must also be time enabled and have the same time type
    (instant or interval). The result will always contain all fields from the input layer. All
    fields from the merge layer will be included by default, or you can specify custom merge rules
    to define the resulting schema. For example:

    - I have three layers for England, Wales, and Scotland and I want a single layer of Great
      Britain. I can use Merge Layers to combine the areas and maintain all fields from each area.
    - I have two layers containing parcel information for contiguous townships. I want to join them
      together into a single layer, keeping only the fields that have the same name and type in the
      two layers.

    Only available at **ArcGIS Enterprise 10.7** and later.

    ================  ===============================================================
    **Argument**      **Description**
    ----------------  ---------------------------------------------------------------
    input_layer       Required FeatureLayer. The point, line or polygon features.
    ----------------  ---------------------------------------------------------------
    merge_layer       Required FeatureLayer. The point, line, or polygon features to
                      merge with the input_layer. The merge_layer must contain the
                      same geometry type (tabular, point, line, or polygon) and the
                      same time type (none, instant, or interval) as the input_layer.
                      All fields in the merge_layer will be included in the result
                      layer by default or you can define merge_attributes to
                      customize the resulting schema.
    ----------------  ---------------------------------------------------------------
    merge_attributes  Optional list. Defines how the fields in mergeLayer will be
                      modified. By default, all fields from both inputs will be
                      included in the output layer.

                      If a field exists in one layer but not the other, the output
                      layer will still contain the field. The output field will
                      contain null values for the input features that did not have the
                      field. For example, if the input_layer contains a field named
                      TYPE but the merge_layer does not contain TYPE, the output will
                      contain TYPE, but its values will be null for all the features
                      copied from the merge_layer.

                      You can control how fields in the merge_layer are written to the
                      output layer using the following merge types that operate on a
                      specified merge_layer field:

                      + Remove - The field in the merge_layer will be removed from the output layer.
                      + Rename - The field in the merge_layer will be renamed in the output layer. You cannot rename a field in the merge_layer to a field in the inputLayer. If you want to make field names equivalent, use Match.
                      + Match - A field in the merge_layer is made equivalent to a field in the input_layer specified by mergeValue. For example, the input_layer has a field named CODE and the merge_layer has a field named STATUS. You can match STATUS to CODE, and the output will contain the CODE field with values of the STATUS field used for features copied from the merge_layer. Type casting is supported (for example, double to integer, integer to string) except for string to numeric.
                      REST web example:

                      Syntax: This example matches Average_Sales to Mean_Sales,
                              removesBonus, and renamesField4 to Errors.

                      ```

                        [{
                            "mergeLayerField": "Mean_Sales",
                            "mergeType": "Match",
                            "mergeValue": "Average_Sales"
                        },
                        {
                            "mergeLayerField": "Bonus",
                            "mergeType": "Remove",
                        },
                        {
                            "mergeLayerField": "Field4",
                            "mergeType": "Rename",
                            "mergeValue": "Errors"
                        }]

                      ```

    ----------------  ---------------------------------------------------------------
    output_name       Optional string. The task will create a feature service of the results. You define the name of the service.
    ----------------  ---------------------------------------------------------------
    gis               Optional GIS. The GIS object where the analysis will take place.
    ================  ===============================================================

    :returns: FeatureLayer
    """
    kwargs = locals()
    tool_name = "MergeLayers"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {
        "f": "json",
    }
    for key, value in kwargs.items():

        if value is not None:
            params[key] = value
        elif key == 'merge_attributes' and value is None:
            params[key] = []
    if output_name is None:
        output_service_name = 'Merge_Layers_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Merge Layers')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "merge_layer": (_FeatureSet, "mergeLayer"),
        "merge_attributes": (list, "mergingAttributes"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "output"),
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]
    try:
        _execute_gp_tool(gis, tool_name, params, param_db, return_values,
                         _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise

    return
Exemple #21
0
def clip_layer(input_layer, clip_layer, output_name=None, gis=None):
    """
    Clip_layer features from one layer to the extent of a boundary layer. Use this tool to cut out a piece
    of one feature class using one or more of the features in another feature class as a cookie
    cutter. This is particularly useful for creating a new feature layers - also referred to as study
    area or area of interest (AOI)- that contains a geographic subset of the features in another,
    larger feature class.

    Only available at **ArcGIS Enterprise 10.7** and later.

    ================  ===============================================================
    **Argument**      **Description**
    ----------------  ---------------------------------------------------------------
    input_layer       required FeatureLayer. The point, line or polygon features.
    ----------------  ---------------------------------------------------------------
    clip_layer        required FeatureLayer. The features that will be clipping the input_layer features.
    ----------------  ---------------------------------------------------------------
    output_name       optional string. The task will create a feature service of the results. You define the name of the service.
    ----------------  ---------------------------------------------------------------
    gis               optional GIS. The GIS object where the analysis will take place.
    ================  ===============================================================

    :returns: FeatureLayer

    """
    kwargs = locals()
    tool_name = "ClipLayer"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {
        "f": "json",
    }
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value
    if output_name is None:
        output_service_name = 'Clip_Layers_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Overlay Layers')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "clip_layer": (_FeatureSet, "clipLayer"),
        "outputType": (str, 'outputType'),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "output"),
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]
    try:
        _execute_gp_tool(gis, tool_name, params, param_db, return_values,
                         _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise

    return
def glr(input_layer,
        var_dependent,
        var_explanatory,
        regression_family="Continuous",
        features_to_predict=None,
        gen_coeff_table=False,
        exp_var_matching=None,
        dep_mapping=None,
        output_name=None,
        gis=None):
    """

    This tool performs Generalized Linear Regression (glr) to generate
    predictions or to model a dependent variable's relationship to a set of
    explanatory variables. This tool can be used to fit continuous
    (Gaussian/OLS), binary (logistic), and count (Poisson) models.

    The following are examples of the tool's utility:

        + What demographic characteristics contribute to high rates of public transportation usage?
        + Is there a positive relationship between vandalism and burglary?
        + Which variables effectively predict 911 call volume? Given future projections, what is the expected demand for emergency response resources?
        + What variables affect low birth rates?

    ==========================   ===============================================================
    **Argument**                 **Description**
    --------------------------   ---------------------------------------------------------------
    input_layer                  Required FeatureSet. The layer containing the dependent and
                                 independent variables.
    --------------------------   ---------------------------------------------------------------
    var_dependent                      Required String. The numeric field containing the observed
                                 values you want to model.
    --------------------------   ---------------------------------------------------------------
    var_explanatory              Required String. One or more fields representing independent
                                 explanatory variables in your regression model.
    --------------------------   ---------------------------------------------------------------
    regression_family            Required String. This field specifies the type of data you are
                                 modeling.

                                 regression_family is one of the following:

                                    + Continuous - The dependent_variable is continuous. The
                                                   model used is Gaussian, and the tool performs
                                                   ordinary least squares regression.
                                    + Binary - The dependent_variable represents presence or
                                               absence. Values must be 0 (absence) or 1 (presence)
                                               values, or mapped to 0 and 1 values using the
                                               parameter.
                                    + Count - The dependent_variable is discrete and represents
                                              events, such as crime counts, disease incidents,
                                              or traffic accidents. The model used is Poisson
                                              regression.
    --------------------------   ---------------------------------------------------------------
    features_to_predict          Required FeatureSet. A layer containing features representing
                                 locations where estimates should be computed. Each feature in
                                 this dataset should contain values for all the explanatory
                                 variables specified. The dependent variable for these features
                                 will be estimated using the model calibrated for the input
                                 layer data.

                                 Syntax: As described in Feature input, this parameter can be
                                         one of the following:

                                    + A URL to a feature service layer with an optional filter
                                      to select specific features
                                    + A URL to a big data catalog service layer with an
                                      optional filter to select specific features
                                    + A feature collection
    --------------------------   ---------------------------------------------------------------
    gen_coeff_table              Optional Boolean. Determines if a table with coefficient values
                                 will be returned. By default, the coefficient table is not
                                 returned.
    --------------------------   ---------------------------------------------------------------
    exp_var_matching             Optional List. A list of the explanatoryVariables specified from
                                 the input_layer and their corresponding fields from the
                                 features_to_predict. By default, if an var_explanatoryiables is
                                 not mapped, it will match to a field with the same name in the
                                 features_to_predict. This parameter is only used if there is a
                                 features_to_predict input. You do not need to use it if the
                                 names and types of the fields match between your two input
                                 datasets.

                                 Syntax: [{"predictionLayerField":"<field name>",
                                          "trainingLayerField": "<field name>"},...]

                                    + predictionLayerField is the name of a field specified in the
                                      var_explanatoryiables parameter.
                                    + trainingLayerField is the field that will match to the field
                                      in the var_explanatoryiables parameter.

                                 REST scripting example:

    --------------------------   ---------------------------------------------------------------
    dep_mapping                  Optional List. A list representing the values used to map to 0
                                 (absence) and 1 (presence) for binary regression.

                                 Syntax: [{"value0":"<false value>"},{"value1":"<true value>"}]

                                    + value0 is the string that will be used to represent 0
                                      (absence values).
                                    + value1 is the string that will be used to represent 1
                                      (presence values).

    --------------------------   ---------------------------------------------------------------
    output_name                  Optional String. The task will create a feature service of the
                                 results. You define the name of the service.
    --------------------------   ---------------------------------------------------------------
    gis                          Optional GIS, the GIS on which this tool runs. If not
                                 specified, the active GIS is used.
    ==========================   ===============================================================

    :returns:
       Output feature layer item


    """

    _allowed_regression_family = {
        "continuous": "Continuous",
        "binary": "Binary",
        "count": "Count"
    }
    kwargs = locals()

    if regression_family.lower() in _allowed_regression_family:
        regression_family = _allowed_regression_family[
            regression_family.lower()]
        if 'regression_family' in kwargs:
            kwargs['regression_family'] = _allowed_regression_family[
                regression_family.lower()]
    else:
        raise ValueError("Invalid regression_family.")

    gis = _arcgis.env.active_gis if gis is None else gis

    if gis.version < [7]:
        return None
    url = gis.properties.helperServices.geoanalytics.url

    params = {}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'GLR_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Generalized Linear Regression')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "regression_family": (str, "regressionFamily"),
        "gen_coeff_table": (bool, "generateCoefficientTable"),
        "exp_var_matching": (list, "explanatoryVariableMatching"),
        "var_dependent": (list, "dependentVariable"),
        "var_explanatory": (list, "explanatoryVariables"),
        "features_to_predict": (_FeatureSet, "featuresToPredict"),
        "dep_mapping": (list, "dependentMapping"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "output"),
        "output_predicted": (_FeatureSet, "outputPredicted"),
        "coefficient_table": (_FeatureSet, "coefficientTable")
    }
    return_values = [
        {
            "name": 'output',
            "display_name": "Output Features",
            "type": _FeatureSet
        },
        {
            "name": "output_predicted",
            "display_name": "Output Predicted",
            "type": _FeatureSet
        },
        {
            "name": "coefficient_table",
            "display_name": "Coefficient Table",
            "type": _FeatureSet
        },
        #{"name" : "variable_of_importance", "display_name" : "Variable of Importance", "type" : _FeatureSet}
    ]

    try:
        res = _execute_gp_tool(gis, "GeneralizedLinearRegression", params,
                               param_db, return_values, _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise

    return
Exemple #23
0
def append_data(input_layer, append_layer, field_mapping=None, gis=None):
    """
    Only available at ArcGIS Enterprise 10.6.1 and later.

    The Append Data task appends tabular, point, line, or polygon data to an existing layer.
    The input layer must be a hosted feature layer. The tool will add the appended data as
    rows to the input layer. No new output layer is created.

    ================  ===============================================================
    **Argument**      **Description**
    ----------------  ---------------------------------------------------------------
    input_layer       required FeatureLayer , The table, point, line or polygon features.
    ----------------  ---------------------------------------------------------------
    append_layer      required FeatureLayer. The table, point, line, or polygon features
                      to be appended to the input_layer. To append geometry, the
                      append_layer must have the same geometry type as the
                      input_layer. If the geometry types are not the same, the
                      append_layer geometry will be removed and all other matching
                      fields will be appended. The geometry of the input_layer will
                      always be maintained.
    ----------------  ---------------------------------------------------------------
    field_mapping     Defines how the fields in append_layer are appended to the
                      input_layer.

                      The following are set by default:

                        - All append_layer fields that match input_layer schema will be appended.
                        - Fields that exist in the input_layer and not in the append_layer will be appended with null values.
                        - Fields that exist in the append_layer and not in the input_layer will not be appended.

                      Optionally choose how input_layer fields will be appended from the following:

                      - AppendField - Matches the input_layer field with an append_layer field of a different name. Field types must match.
                      - Expression - Calculates values for the resulting field. Values are calculated using Arcade expressions. To assign null values, use 'null'.
    ----------------  ---------------------------------------------------------------
    gis               optional GIS, the GIS on which this tool runs. If not
                      specified, the active GIS is used.
    ================  ===============================================================

    :returns: boolean

    """
    kwargs = locals()
    tool_name = "AppendData"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {"f": "json"}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "append_layer": (_FeatureSet, "appendLayer"),
        "field_mapping": (str, "fieldMapping"),
        "context": (str, "context")
    }
    return_values = []
    try:
        _execute_gp_tool(gis, tool_name, params, param_db, return_values,
                         _use_async, url, True)
        return True
    except:
        raise

    return False
def viewshed(input_points: FeatureSet = {'exceededTransferLimit': False,
                                         'spatialReference': {'latestWkid': 3857, 'wkid': 102100},
                                         'geometryType': 'esriGeometryPoint',
                                         'fields': [{'name': 'OID', 'type': 'esriFieldTypeOID', 'alias': 'OID'},
                                                    {'name': 'offseta', 'type': 'esriFieldTypeDouble',
                                                     'alias': 'offseta'},
                                                    {'name': 'offsetb', 'type': 'esriFieldTypeDouble',
                                                     'alias': 'offsetb'}], 'displayFieldName': '', 'features': []},
             maximum_distance: float = None,
             maximum_distance_units: str = """Meters""",
             dem_resolution: str = None,
             observer_height: float = None,
             observer_height_units: str = """Meters""",
             surface_offset: float = None,
             surface_offset_units: str = """Meters""",
             generalize_viewshed_polygons: bool = True,
             gis=None) -> FeatureSet:
    """
    .. image:: _static/images/elevation_viewshed/elevation_viewshed.png         

    The ``viewshed`` method is used to identify visible areas based on observer locations you provide as well as ArcGIS Online Elevation data.

    ===============================    =========================================================
    **Parameter**                      **Description**
    -------------------------------    ---------------------------------------------------------
    input_points                       Required FeatureSet. The point features to use as the observer locations. See :ref:`Feature Input<FeatureInput>`.
    -------------------------------    ---------------------------------------------------------
    maximum_distance                   Optional float. This is a cutoff distance where the computation of visible areas stops. 
                                       Beyond this distance, it is unknown whether the analysis points and the other objects can see each other.

                                       It is useful for modeling current weather conditions or a given time of day, such as dusk. Large values increase computation time.

                                       Unless specified, a default maximum distance will be computed based on the resolution and extent of the source DEM. 
                                       The allowed maximum value is 50 kilometers.

                                       Use ``maximum_distance_units`` to set the units for ``maximum_distance``.
    -------------------------------    ---------------------------------------------------------
    maximum_distance_units             Optional string. The units for the ``maximum_distance`` parameter.
                                
                                       Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles'].
                                 
                                       The default is 'Meters'.
    -------------------------------    ---------------------------------------------------------
    dem_resolution                     Optional string. The approximate spatial resolution (cell size) of the source elevation data used for the calculation.
                                       The resolution values are an approximation of the spatial resolution of the digital elevation model. 
                                       While many elevation sources are distributed in units of arc seconds, the keyword is an approximation of those resolutions in meters for easier understanding.

                                       Choice list:[' ', 'FINEST', '10m', '30m', '90m'].

                                       The default is 90m.
    -------------------------------    ---------------------------------------------------------
    observer_height                    Optional float. This is the height above the ground of the observer locations.

                                       The default is 1.75 meters, which is approximately the average height of a person. 
                                       If you are looking from an elevated location, such as an observation tower or a tall building, use that height instead.

                                       Use ``observer_height_units`` to set the units for ``observer_height``.
    -------------------------------    ---------------------------------------------------------
    observer_height_units              Optional string. The units for the ``observer_height`` parameter.

                                       Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
    -------------------------------    ---------------------------------------------------------
    surface_offset                     Optional float. The height above the surface of the object you are trying to see.

                                       The default value is 0.0. If you are trying to see buildings or wind turbines, use their height here.
    -------------------------------    ---------------------------------------------------------
    surface_offset_units               Optional string. The units for the ``surface_offset`` parameter.
                                 
                                       Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
    -------------------------------    ---------------------------------------------------------
    generalize_viewshed_polygons       Optional boolean. Determines whether or not the viewshed polygons are to be generalized.

                                       The viewshed calculation is based on a raster elevation model that creates a result with 
                                       stair-stepped edges. To create a more pleasing appearance and improve performance, the 
                                       default behavior is to generalize the polygons. The generalization process smooths the 
                                       boundary of the visible areas and may remove some single-cell visible areas.
    ===============================    =========================================================

    :returns: output_viewshed - Output Viewshed as a FeatureSet (polygons of visible areas for a given set of input observation points.)

    .. code-block:: python

        # USAGE EXAMPLE: To identify visible areas from esri headquarter office.
        visible_windfarms = viewshed(input_points=hq_fs,
                                     maximum_distance=200,
                                     maximum_distance_units='Meters',
                                     observer_height=6,
                                     observer_height_units='Feet',
                                     surface_offset=100,
                                     surface_offset_units='Meters',
                                     generalize_viewshed_polygons=True)
    """
    kwargs = locals()

    param_db = {
        "input_points": (FeatureSet, "InputPoints"),
        "maximum_distance": (float, "MaximumDistance"),
        "maximum_distance_units": (str, "MaximumDistanceUnits"),
        "dem_resolution": (str, "DEMResolution"),
        "observer_height": (float, "ObserverHeight"),
        "observer_height_units": (str, "ObserverHeightUnits"),
        "surface_offset": (float, "SurfaceOffset"),
        "surface_offset_units": (str, "SurfaceOffsetUnits"),
        "generalize_viewshed_polygons": (bool, "GeneralizeViewshedPolygons"),
        "output_viewshed": (FeatureSet, "Output Viewshed"),
    }
    return_values = [
        {"name": "output_viewshed", "display_name": "Output Viewshed", "type": FeatureSet},
    ]

    if gis is None:
        gis = arcgis.env.active_gis

    url = gis.properties.helperServices.elevation.url

    return _execute_gp_tool(gis, "Viewshed", kwargs, param_db, return_values, _use_async, url)
Exemple #25
0
def copy_to_data_store(input_layer, output_name=None, gis=None):
    """

    Copies an input feature layer or table to an ArcGIS Data Store and creates a layer in your web GIS.

    For example

    * Copy a collection of .csv files in a big data file share to the spatiotemporal data store for visualization.

    * Copy the features in the current map extent that are stored in the spatiotemporal data store to the relational data store.

    This tool will take an input layer and copy it to a data store. Data will be copied to the ArcGIS Data Store and will be stored in your relational or spatiotemporal data store.

    For example, you could copy features that are stored in a big data file share to a relational data store and specify that only features within the current map extent will be copied. This would create a hosted feature service with only those features that were within the specified map extent.

   Parameters:

   input_layer: Input Layer (feature layer). Required parameter.

   output_name: Output Layer Name (str). Required parameter.

   gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.


Returns:
   output - Output Layer as a feature layer collection item


    """
    kwargs = locals()

    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url

    params = {}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Data Store Copy_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Copy To Data Store')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "Output Layer"),
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Layer",
            "type": _FeatureSet
        },
    ]
    try:
        _execute_gp_tool(gis, "CopyToDataStore", params, param_db,
                         return_values, _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise
def find_hot_spots(point_layer,
                   bin_size=5,
                   bin_size_unit="Miles",
                   neighborhood_distance=5,
                   neighborhood_distance_unit="Miles",
                   time_step_interval=None,
                   time_step_interval_unit=None,
                   time_step_alignment=None,
                   time_step_reference=None,
                   output_name=None,
                   gis=None):
    """

    Parameters:

       point_layer: Input Points (FeatureSet). Required parameter.

       bin_size: Bin Size (float). Optional parameter.

       bin_size_unit: Bin Size Unit (str). Optional parameter.
          Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'NauticalMiles']

       neighborhood_distance: Neighborhood Distance (float). Optional parameter.

       neighborhood_distance_unit: Neighborhood Distance Unit (str). Optional parameter.
          Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'NauticalMiles']

       time_step_interval: Time Step Interval (int). Optional parameter.

       time_step_interval_unit: Time Step Interval Unit (str). Optional parameter.
          Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds']

       time_step_alignment: Time Step Alignment (str). Optional parameter.
          Choice list:['EndTime', 'StartTime', 'ReferenceTime']

       time_step_reference: Time Step Reference (_datetime). Optional parameter.

       output_name: Output Features Name (str). Optional parameter.

       gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.


    Returns:
       output - Output Features as a feature layer collection item


    """
    kwargs = locals()

    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url

    params = {}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Hotspot Analysis_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Find Hotspots')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "point_layer": (_FeatureSet, "pointLayer"),
        "bin_size": (float, "binSize"),
        "bin_size_unit": (str, "binSizeUnit"),
        "neighborhood_distance": (float, "neighborhoodDistance"),
        "neighborhood_distance_unit": (str, "neighborhoodDistanceUnit"),
        "time_step_interval": (int, "timeStepInterval"),
        "time_step_interval_unit": (str, "timeStepIntervalUnit"),
        "time_step_alignment": (str, "timeStepAlignment"),
        "time_step_reference": (_datetime, "timeStepReference"),
        #"cell_size" : (int, "cellSize"),
        #"cell_size_units": (str, "cellSizeUnits"),
        #"shape_type" : (str, "shapeType"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "Output Features"),
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]

    try:
        _execute_gp_tool(gis, "FindHotSpots", params, param_db, return_values,
                         _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise
def find_point_clusters(input_layer,
                        method,
                        min_feature_clusters,
                        search_distance=None,
                        distance_unit=None,
                        output_name=None,
                        gis=None):
    """
    This tool extracts clusters from your input point features and identifies any surrounding noise.

    For example, a nongovernmental organization is studying a particular pest-borne disease. It has
    a point dataset representing households in a study area, some of which are infested, and some of
    which are not. By using the Find Point Clusters tool, an analyst can determine clusters of
    infested households to help pinpoint an area to begin treatment and extermination of pests.

    ==========================   ===============================================================
    **Argument**                 **Description**
    --------------------------   ---------------------------------------------------------------
    input_layer                  required FeatureSet, The table, point, line or polygon features
                                 containing potential incidents.
    --------------------------   ---------------------------------------------------------------
    method                       required String. The algorithm used for cluster analysis. This
                                 parameter must be specified as DBSCAN or HDBSCAN.
    --------------------------   ---------------------------------------------------------------
    min_feature_clusters         optional Integer. Minimum number of clusters to find in a dataset.
    --------------------------   ---------------------------------------------------------------
    search_distance              optional Float.  The distance to search between points to form
                                 a cluster.  This is required for DBSCAN.
    --------------------------   ---------------------------------------------------------------
    distance_unit                optional String. The `search_distance` units.
    --------------------------   ---------------------------------------------------------------
    output_name                  optional string, The task will create a feature service of the
                                 results. You define the name of the service.
    --------------------------   ---------------------------------------------------------------
    gis                          optional GIS, the GIS on which this tool runs. If not
                                 specified, the active GIS is used.
    ==========================   ===============================================================

    :returns:
       Output feature layer item

    """
    kwargs = locals()

    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url

    params = {}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Find Point Clusters_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Find Point Clusters')

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "method": (str, "clusterMethod"),
        "min_feature_clusters": (int, "minFeaturesCluster"),
        "distance_unit": (str, "searchDistanceUnit"),
        "search_distance": (float, "searchDistance"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "Output Features"),
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]

    try:
        _execute_gp_tool(gis, "FindPointClusters", params, param_db,
                         return_values, _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise

    return
Exemple #28
0
def enrich_from_grid(input_layer,
                     grid_layer,
                     enrichment_attributes=None,
                     output_name=None,
                     gis=None):
    """
    The Enrich From Multi-Variable Grid task joins attributes from a multi-variable grid to a point
    layer. The multi-variable grid must be created using the Build Multi-Variable Grid task.
    Metadata from the multi-variable grid is used to efficiently enrich the input point features,
    making it faster than the Join Features task. Attributes in the multi-variable grid are joined
    to the input point features when the features intersect the grid.

    The attributes in the multi-variable grid can be used as explanatory variables when modeling
    spatial relationships with your input point features, and this task allows you to join those
    attributes to the point features quickly.

        Usage Notes:

        Only available at ArcGIS Enterprise 10.7 and later.

    ======================  ===============================================================
    **Argument**            **Description**
    ----------------------  ---------------------------------------------------------------
    input_layer             required FeatureLayer. The point features that will be enriched
                            by the multi-variable grid.
    ----------------------  ---------------------------------------------------------------
    grid_layer              required FeatureLayer. The multi-variable grid layer.
    ----------------------  ---------------------------------------------------------------
    enrichment_attributes   optional String. A list of fields in the multi-variable grid
                            that will be joined to the input point features. If the
                            attributes are not provided, all fields in the multi-variable
                            grid will be joined to the input point features.
    ----------------------  ---------------------------------------------------------------
    output_name             optional string. The task will create a feature service of the
                            results. You define the name of the service.
    ----------------------  ---------------------------------------------------------------
    gis                     optional GIS. The GIS object where the analysis will take place.
    ======================  ===============================================================

    :returns: FeatureLayer

    """
    kwargs = locals()
    tool_name = "EnrichFromMultiVariableGrid"
    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url
    params = {
        "f": "json",
    }
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Enrich_Grid_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Enrich Grid Layers')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputFeatures"),
        "grid_layer": (_FeatureSet, "gridLayer"),
        "enrichment_attributes": (str, "enrichAttributes"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "output"),
    }

    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]

    try:
        _execute_gp_tool(gis, tool_name, params, param_db, return_values,
                         _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise
    return
def create_buffers(
    input_layer,
    distance = 1,
    distance_unit = "Miles",
    field = None,
    method = """Planar""",
    dissolve_option = """None""",
    dissolve_fields = None,
    summary_fields = None,
    multipart = False,
    output_name = None,
    context = None,
    gis = None):
    """

    A buffer is an area that covers a given distance from a point, line, or polygon feature.

    Buffers are typically used to create areas that can be further analyzed using other tools. For example, if the question is What buildings are within 1 mile of the school?, the answer can be found by creating a 1-mile buffer around the school and overlaying the buffer with the layer containing building footprints. The end result is a layer of those buildings within 1 mile of the school.

    For example

    * Using linear river features, buffer each river by 50 times the width of the river to determine a proposed riparian boundary.

    * Given areas representing countries, buffer each country by 200 nautical miles to determine the maritime boundary.



Parameters:

   input_layer: Input Features (_FeatureSet). Required parameter.

   distance: Buffer Distance (float). Optional parameter.

   distance_unit: Buffer Distance Unit (str). Optional parameter.
      Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'NauticalMiles']

   field: Buffer Distance Field (str). Optional parameter.

   method: Method (str). Required parameter.
      Choice list:['Geodesic', 'Planar']

   dissolve_option: Dissolve Option (str). Optional parameter.
      Choice list:['All', 'List', 'None']

   dissolve_fields: Dissolve Fields (str). Optional parameter.

   summary_fields: Summary Statistics (str). Optional parameter.

   multipart: Allow Multipart Geometries (bool). Optional parameter.

   output_name: Output Features Name (str). Required parameter.


   gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.


Returns:
   output - Output Features as a feature layer collection item


    """
    kwargs = locals()


    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url

    if isinstance(input_layer, FeatureCollection) and \
       'layers' in input_layer.properties and \
       len(input_layer.properties.layers) > 0:
        input_layer = _FeatureSet.from_dict(
            featureset_dict=input_layer._lazy_properties.layers[0].featureSet)

    params = {}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Create Buffers Analysis_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name, output_service_name, 'Create Buffers')

    params['output_name'] = _json.dumps({
        "serviceProperties": {"name" : output_name, "serviceUrl" : output_service.url},
        "itemProperties": {"itemId" : output_service.itemid}})

    _set_context(params)
    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "distance": (float, "distance"),
        "distance_unit": (str, "distanceUnit"),
        "field": (str, "field"),
        "method": (str, "method"),
        "dissolve_option": (str, "dissolveOption"),
        "dissolve_fields": (str, "dissolveFields"),
        "summary_fields": (str, "summaryFields"),
        "multipart": (bool, "multipart"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "Output Features"),
    }
    return_values = [
        {"name": "output", "display_name": "Output Features", "type": _FeatureSet},
    ]

    try:
        _execute_gp_tool(gis, "CreateBuffers", params, param_db, return_values, _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise
def calculate_density(input_layer,
                      fields=None,
                      weight="""Uniform""",
                      bin_type="""Square""",
                      bin_size=None,
                      bin_size_unit=None,
                      time_step_interval=None,
                      time_step_interval_unit=None,
                      time_step_repeat_interval=None,
                      time_step_repeat_interval_unit=None,
                      time_step_reference=None,
                      radius=None,
                      radius_unit=None,
                      area_units="""SquareKilometers""",
                      output_name=None,
                      gis=None):
    """




Parameters:

   input_layer: Input Points (Feature layer). Required parameter.

   fields: Population Field (str). Optional parameter.

   weight: Weight (str). Required parameter.
      Choice list:['Uniform', 'Kernel']

   bin_type: Output Bin Type (str). Required parameter.
      Choice list:['Square', 'Hexagon']

   bin_size: Output Bin Size (float). Required parameter.

   bin_size_unit: Output Bin Size Unit (str). Required parameter.
      Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'NauticalMiles']

   time_step_interval: Time Step Interval (int). Optional parameter.

   time_step_interval_unit: Time Step Interval Unit (str). Optional parameter.
      Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds']

   time_step_repeat_interval: Time Step Repeat Interval (int). Optional parameter.

   time_step_repeat_interval_unit: Time Step Repeat Interval Unit (str). Optional parameter.
      Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds']

   time_step_reference: Time Step Reference (_datetime). Optional parameter.

   radius: Radius (float). Required parameter.

   radius_unit: Radius Unit (str). Required parameter.
      Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'NauticalMiles']

   area_units: Area Unit Scale Factor (str). Optional parameter.
      Choice list:['SquareMeters', 'SquareKilometers', 'Hectares', 'SquareFeet', 'SquareYards', 'SquareMiles', 'Acres']

   output_name: Output Features Name (str). Required parameter.

   gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.


Returns:
   output - Output Features as a feature layer collection item


    """
    kwargs = locals()

    gis = _arcgis.env.active_gis if gis is None else gis
    url = gis.properties.helperServices.geoanalytics.url

    params = {}
    for key, value in kwargs.items():
        if value is not None:
            params[key] = value

    if output_name is None:
        output_service_name = 'Calculate Density Analysis_' + _id_generator()
        output_name = output_service_name.replace(' ', '_')
    else:
        output_service_name = output_name.replace(' ', '_')

    output_service = _create_output_service(gis, output_name,
                                            output_service_name,
                                            'Calculate Density')

    params['output_name'] = _json.dumps({
        "serviceProperties": {
            "name": output_name,
            "serviceUrl": output_service.url
        },
        "itemProperties": {
            "itemId": output_service.itemid
        }
    })

    _set_context(params)

    param_db = {
        "input_layer": (_FeatureSet, "inputLayer"),
        "fields": (str, "fields"),
        "weight": (str, "weight"),
        "bin_type": (str, "binType"),
        "bin_size": (float, "binSize"),
        "bin_size_unit": (str, "binSizeUnit"),
        "time_step_interval": (int, "timeStepInterval"),
        "time_step_interval_unit": (str, "timeStepIntervalUnit"),
        "time_step_repeat_interval": (int, "timeStepRepeatInterval"),
        "time_step_repeat_interval_unit": (str, "timeStepRepeatIntervalUnit"),
        "time_step_reference": (_datetime, "timeStepReference"),
        "radius": (float, "radius"),
        "radius_unit": (str, "radiusUnit"),
        "area_units": (str, "areaUnits"),
        "output_name": (str, "outputName"),
        "context": (str, "context"),
        "output": (_FeatureSet, "Output Features"),
    }
    return_values = [
        {
            "name": "output",
            "display_name": "Output Features",
            "type": _FeatureSet
        },
    ]

    try:
        _execute_gp_tool(gis, "CalculateDensity", params, param_db,
                         return_values, _use_async, url, True)
        return output_service
    except:
        output_service.delete()
        raise