Пример #1
0
def erase(dataset_path, erase_dataset_path, **kwargs):
    """Erase feature geometry where it overlaps erase-dataset geometry.

    Args:
        dataset_path (str): Path of the dataset.
        erase_dataset_path (str): Path of the dataset defining the erase-area.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        erase_where_sql (str): SQL where-clause for erase-dataset subselection.
        tolerance (float): Tolerance for coincidence, in dataset's units.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('erase_where_sql')
    kwargs.setdefault('tolerance')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Erase features in %s where overlapping %s.",
        dataset_path,
        erase_dataset_path,
    )
    meta = {'dataset': arcobj.dataset_metadata(dataset_path)}
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql']),
        'erase': arcobj.DatasetView(erase_dataset_path,
                                    kwargs['erase_where_sql']),
    }
    temp_output_path = unique_path('output')
    with view['dataset'], view['erase']:
        arcpy.analysis.Erase(
            in_features=view['dataset'].name,
            erase_features=view['erase'].name,
            out_feature_class=temp_output_path,
            cluster_tolerance=kwargs['tolerance'],
        )
    session = arcobj.Editor(meta['dataset']['workspace_path'],
                            kwargs['use_edit_session'])
    with session:
        delete(dataset_path,
               dataset_where_sql=kwargs['dataset_where_sql'],
               log_level=None)
        insert_from_path(dataset_path,
                         insert_dataset_path=temp_output_path,
                         log_level=None)
    dataset.delete(temp_output_path, log_level=None)
    log("End: Erase.")
    return dataset_path
Пример #2
0
def keep_by_location(dataset_path, location_dataset_path, **kwargs):
    """Keep features where geometry overlaps location-dataset geometry.

    Args:
        dataset_path (str): Path of the dataset.
        location_dataset_path (str): Path of location-dataset.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        location_where_sql (str): SQL where-clause for location-dataset subselection.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        collections.Counter: Counts for each feature action.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('location_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Keep features in %s where overlapping %s.",
        dataset_path,
        location_dataset_path,
    )
    meta = {'dataset': arcobj.dataset_metadata(dataset_path)}
    session = arcobj.Editor(meta['dataset']['workspace_path'],
                            kwargs['use_edit_session'])
    view = {
        'dataset':
        arcobj.DatasetView(dataset_path, kwargs['dataset_where_sql']),
        'location':
        arcobj.DatasetView(location_dataset_path,
                           kwargs['location_where_sql']),
    }
    with session, view['dataset'], view['location']:
        arcpy.management.SelectLayerByLocation(
            in_layer=view['dataset'].name,
            overlap_type='intersect',
            select_features=view['location'].name,
            selection_type='new_selection',
        )
        arcpy.management.SelectLayerByLocation(
            in_layer=view['dataset'].name, selection_type='switch_selection')
        feature_count = delete(view['dataset'].name, log_level=None)
    for key in ['deleted', 'unchanged']:
        log("%s features %s.", feature_count[key], key)
    log("End: Keep.")
    return feature_count
Пример #3
0
def closest_facility_route(
    dataset_path,
    id_field_name,
    facility_path,
    facility_id_field_name,
    network_path,
    cost_attribute,
    **kwargs
):
    """Generate route info dictionaries for dataset features's closest facility.

    Args:
        dataset_path (str): Path of the dataset.
        id_field_name (str): Name of the dataset ID field.
        facility_path (str): Path of the facilities dataset.
        facility_id_field_name (str): Name of the facility ID field.
        network_path (str): Path of the network dataset.
        cost_attribute (str): Name of the network cost attribute to use.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        facility_where_sql (str): SQL where-clause for facility subselection.
        max_cost (float): Maximum travel cost the search will attempt, in the cost
            attribute's units.
        restriction_attributes (iter): Collection of network restriction attribute
            names to use.
        travel_from_facility (bool): Flag to indicate performing the analysis
            travelling from (True) or to (False) the facility. Default is False.
        log_level (str): Level to log the function at. Default is 'info'.

    Yields:
        dict: The next feature's analysis result details.
            Dictionary keys: 'dataset_id', 'facility_id', 'cost', 'geometry',
            'cost' value (float) will match units of the cost_attribute.
            'geometry' (arcpy.Geometry) will match spatial reference to the dataset.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('facility_where_sql')
    kwargs.setdefault('max_cost')
    kwargs.setdefault('restriction_attributes')
    kwargs.setdefault('travel_from_facility', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Generate closest facility in %s to locations in %s.",
        facility_path,
        dataset_path,
    )
    meta = {
        'id_field': {
            'dataset': arcobj.field_metadata(dataset_path, id_field_name),
            'facility': arcobj.field_metadata(facility_path, facility_id_field_name),
        }
    }
    keys = {
        'cursor': [
            'FacilityID', 'IncidentID', 'total_{}'.format(cost_attribute), 'shape@'
        ]
    }
    view = {
        'dataset': arcobj.DatasetView(dataset_path, kwargs['dataset_where_sql']),
        'facility': arcobj.DatasetView(facility_path, kwargs['facility_where_sql']),
    }
    with arcobj.ArcExtension('Network'):
        arcpy.na.MakeClosestFacilityLayer(
            in_network_dataset=network_path,
            out_network_analysis_layer='closest',
            impedance_attribute=cost_attribute,
            travel_from_to=(
                'travel_from' if kwargs['travel_from_facility'] else 'travel_to'
            ),
            default_cutoff=kwargs['max_cost'],
            UTurn_policy='allow_dead_ends_and_intersections_only',
            restriction_attribute_name=kwargs['restriction_attributes'],
            hierarchy='no_hierarchy',
            output_path_shape='true_lines_with_measures',
        )
        # Load facilities.
        with view['facility']:
            arcpy.na.AddFieldToAnalysisLayer(
                in_network_analysis_layer='closest',
                sub_layer='Facilities',
                field_name='facility_id',
                field_type=meta['id_field']['facility']['type'],
                field_precision=meta['id_field']['facility']['precision'],
                field_scale=meta['id_field']['facility']['scale'],
                field_length=meta['id_field']['facility']['length'],
                field_is_nullable=True,
            )
            arcpy.na.AddLocations(
                in_network_analysis_layer='closest',
                sub_layer='Facilities',
                in_table=view['facility'].name,
                field_mappings='facility_id {} #'.format(facility_id_field_name),
                append=False,
                exclude_restricted_elements=True,
            )
        facility_oid_id = attributes.id_map(
            'closest/Facilities', id_field_names='oid@', field_names='facility_id'
        )
        # Load dataset locations.
        with view['dataset']:
            arcpy.na.AddFieldToAnalysisLayer(
                in_network_analysis_layer='closest',
                sub_layer='Incidents',
                field_name='dataset_id',
                field_type=meta['id_field']['dataset']['type'],
                field_precision=meta['id_field']['dataset']['precision'],
                field_scale=meta['id_field']['dataset']['scale'],
                field_length=meta['id_field']['dataset']['length'],
                field_is_nullable=True,
            )
            arcpy.na.AddLocations(
                in_network_analysis_layer='closest',
                sub_layer='Incidents',
                in_table=view['dataset'].name,
                field_mappings='dataset_id {} #'.format(id_field_name),
                append=False,
                snap_to_position_along_network=False,
                exclude_restricted_elements=True,
            )
        dataset_oid_id = attributes.id_map(
            'closest/Incidents', id_field_names='oid@', field_names='dataset_id'
        )
        arcpy.na.Solve(
            in_network_analysis_layer='closest',
            ignore_invalids=True,
            terminate_on_solve_error=True,
        )
    cursor = arcpy.da.SearchCursor('closest/Routes', field_names=keys['cursor'])
    with cursor:
        for row in cursor:
            feat = dict(zip(keys['cursor'], row))
            yield {
                'dataset_id': dataset_oid_id[feat['IncidentID']],
                'facility_id': facility_oid_id[feat['FacilityID']],
                'cost': feat['total_' + cost_attribute],
                'geometry': feat['shape@'],
            }

    dataset.delete('closest', log_level=None)
    log("End: Generate.")
Пример #4
0
def generate_service_rings(
    dataset_path,
    output_path,
    network_path,
    cost_attribute,
    ring_width,
    max_distance,
    **kwargs
):
    """Create facility service ring features using a network dataset.

    Args:
        dataset_path (str): Path of the dataset.
        output_path (str): Path of the output service rings dataset.
        network_path (str): Path of the network dataset.
        cost_attribute (str): Name of the network cost attribute to use.
        ring_width (float): Distance a service ring represents in travel, in the
            dataset's units.
        max_distance (float): Distance in travel from the facility the outer ring will
            extend to, in the dataset's units.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        id_field_name (str): Name of facility ID field.
        restriction_attributes (iter): Collection of network restriction attribute
            names to use.
        travel_from_facility (bool): Flag to indicate performing the analysis
            travelling from (True) or to (False) the facility. Default is False.
        detailed_features (bool): Flag to generate high-detail features. Default is
            False.
        overlap_facilities (bool): Flag to overlap different facility service areas.
            Default is True.
        trim_value (float): Dstance from the network features to trim service areas at.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the output service rings dataset.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('id_field_name')
    kwargs.setdefault('restriction_attributes')
    kwargs.setdefault('travel_from_facility', False)
    kwargs.setdefault('detailed_features', False)
    kwargs.setdefault('overlap_facilities', True)
    kwargs.setdefault('trim_value')
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log("Start: Generate service rings for %s.", dataset_path)
    # trim_value assumes meters if not input as linear_unit string.
    if kwargs['trim_value'] is not None:
        trim_value = arcobj.linear_unit_string(kwargs['trim_value'], dataset_path)
    else:
        trim_value = None
    view = {'dataset': arcobj.DatasetView(dataset_path, kwargs['dataset_where_sql'])}
    with arcobj.ArcExtension('Network'):
        arcpy.na.MakeServiceAreaLayer(
            in_network_dataset=network_path,
            out_network_analysis_layer='service_area',
            impedance_attribute=cost_attribute,
            travel_from_to=(
                'travel_from' if kwargs['travel_from_facility'] else 'travel_to'
            ),
            default_break_values=(
                ' '.join(
                    str(x) for x in range(ring_width, max_distance + 1, ring_width)
                )
            ),
            polygon_type=(
                'detailed_polys' if kwargs['detailed_features'] else 'simple_polys'
            ),
            merge=('no_merge' if kwargs['overlap_facilities'] else 'no_overlap'),
            nesting_type='rings',
            UTurn_policy='allow_dead_ends_and_intersections_only',
            restriction_attribute_name=kwargs['restriction_attributes'],
            polygon_trim=(True if trim_value else False),
            poly_trim_value=trim_value,
            hierarchy='no_hierarchy',
        )
        with view['dataset']:
            arcpy.na.AddLocations(
                in_network_analysis_layer="service_area",
                sub_layer="Facilities",
                in_table=view['dataset'].name,
                field_mappings='Name {} #'.format(kwargs['id_field_name']),
                search_tolerance=max_distance,
                match_type='match_to_closest',
                append='clear',
                snap_to_position_along_network='no_snap',
                exclude_restricted_elements=True,
            )
        arcpy.na.Solve(
            in_network_analysis_layer="service_area",
            ignore_invalids=True,
            terminate_on_solve_error=True,
        )
    dataset.copy('service_area/Polygons', output_path, log_level=None)
    dataset.delete('service_area', log_level=None)
    if kwargs['id_field_name']:
        meta = {
            'id_field': arcobj.field_metadata(dataset_path, kwargs['id_field_name'])
        }
        dataset.add_field_from_metadata(output_path, meta['id_field'], log_level=None)
        attributes.update_by_function(
            output_path,
            meta['id_field']['name'],
            function=TYPE_ID_FUNCTION_MAP[meta['id_field']['type']],
            field_as_first_arg=False,
            arg_field_names=['Name'],
            log_level=None,
        )
    log("End: Generate.")
    return output_path
Пример #5
0
def delete(dataset_path, **kwargs):
    """Delete features in the dataset.

    Args:
        dataset_path (str): Path of the dataset.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        collections.Counter: Counts for each feature action.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    if kwargs['dataset_where_sql']:
        log(
            "Start: Delete features from %s where `%s`.",
            dataset_path,
            kwargs['dataset_where_sql'],
        )
    else:
        log("Start: Delete features from %s.", dataset_path)
    meta = {'dataset': arcobj.dataset_metadata(dataset_path)}
    truncate_error_codes = [
        # "Only supports Geodatabase tables and feature classes."
        'ERROR 000187',
        # "Operation not supported on a versioned table."
        'ERROR 001259',
        # "Operation not supported on table {table name}."
        'ERROR 001260',
        # Operation not supported on a feature class in a controller dataset.
        'ERROR 001395',
    ]
    feature_count = Counter()
    # Can use (faster) truncate when no sub-selection or edit session.
    run_truncate = (kwargs['dataset_where_sql'] is None
                    and kwargs['use_edit_session'] is False)
    if run_truncate:
        feature_count['deleted'] = dataset.feature_count(dataset_path)
        feature_count['unchanged'] = 0
        try:
            arcpy.management.TruncateTable(in_table=dataset_path)
        except arcpy.ExecuteError:
            # Avoid arcpy.GetReturnCode(); error code position inconsistent.
            # Search messages for 'ERROR ######' instead.
            if any(code in arcpy.GetMessages()
                   for code in truncate_error_codes):
                LOG.debug("Truncate unsupported; will try deleting rows.")
                run_truncate = False
            else:
                raise

    if not run_truncate:
        view = {
            'dataset':
            arcobj.DatasetView(dataset_path, kwargs['dataset_where_sql'])
        }
        session = arcobj.Editor(meta['dataset']['workspace_path'],
                                kwargs['use_edit_session'])
        with view['dataset'], session:
            feature_count['deleted'] = view['dataset'].count
            arcpy.management.DeleteRows(in_rows=view['dataset'].name)
        feature_count['unchanged'] = dataset.feature_count(dataset_path)
    for key in ['deleted', 'unchanged']:
        log("%s features %s.", feature_count[key], key)
    log("End: Delete.")
    return feature_count
Пример #6
0
def update_from_path(dataset_path,
                     update_dataset_path,
                     id_field_names,
                     field_names=None,
                     **kwargs):
    """Update features in dataset from another dataset.

    Args:
        dataset_path (str): Path of the dataset.
        update_dataset_path (str): Path of dataset to update features from.
        id_field_names (iter, str): Name(s) of the ID field/key(s).
        field_names (iter): Collection of field names/keys to check & update. Listed
            field must be present in both datasets. If field_names is None, all fields
            will be inserted.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection. WARNING:
            defining this has major effects: filtered features will not be considered
            for updating or deletion, and duplicates in the update features will be
            inserted as if novel.
        update_where_sql (str): SQL where-clause for update-dataset subselection.
        chunk_where_sqls (iter): Collection of SQL where-clauses for updating between
            the datasets in chunks.
        delete_missing_features (bool): True if update should delete features missing
            from update_features, False otherwise. Default is True.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            True.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        collections.Counter: Counts for each feature action.

    """
    for key in ['dataset_where_sql', 'update_where_sql']:
        kwargs.setdefault(key)
        if not kwargs[key]:
            kwargs[key] = "1=1"
    kwargs.setdefault('subset_where_sqls', ["1=1"])
    kwargs.setdefault('delete_missing_features', True)
    kwargs.setdefault('use_edit_session', True)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log("Start: Update features in %s from %s.", dataset_path,
        update_dataset_path)
    meta = {
        'dataset': arcobj.dataset_metadata(dataset_path),
        'update': arcobj.dataset_metadata(update_dataset_path),
    }
    if field_names is None:
        field_names = (
            set(name.lower()
                for name in meta['dataset']['field_names_tokenized'])
            & set(name.lower()
                  for name in meta['update']['field_names_tokenized']))
    else:
        field_names = set(name.lower() for name in field_names)
    # OIDs & area/length "fields" have no business being part of an update.
    for key in ['oid@', 'shape@area', 'shape@length']:
        field_names.discard(key)
    keys = {
        'id': list(contain(id_field_names)),
        'attr': list(contain(field_names))
    }
    keys['row'] = keys['id'] + keys['attr']
    feature_count = Counter()
    for kwargs['subset_where_sql'] in contain(kwargs['subset_where_sqls']):
        if not kwargs['subset_where_sql'] == "1=1":
            log("Subset: `%s`", kwargs['subset_where_sql'])
        iters = attributes.as_iters(
            update_dataset_path,
            keys['row'],
            dataset_where_sql=(
                "({update_where_sql}) and ({subset_where_sql})".format(
                    **kwargs)))
        view = arcobj.DatasetView(
            dataset_path,
            dataset_where_sql=(
                "({dataset_where_sql}) and ({subset_where_sql})".format(
                    **kwargs)),
            field_names=keys['row'])
        with view:
            feature_count.update(
                update_from_iters(
                    dataset_path=view.name,
                    update_features=iters,
                    id_field_names=keys['id'],
                    field_names=keys['row'],
                    delete_missing_features=kwargs['delete_missing_features'],
                    use_edit_session=kwargs['use_edit_session'],
                    log_level=None,
                ))
    for key in UPDATE_TYPES:
        log("%s features %s.", feature_count[key], key)
    log("End: Update.")
    return feature_count
Пример #7
0
def insert_from_path(dataset_path,
                     insert_dataset_path,
                     field_names=None,
                     **kwargs):
    """Insert features into dataset from another dataset.

    Args:
        dataset_path (str): Path of the dataset.
        insert_dataset_path (str): Path of dataset to insert features from.
        field_names (iter): Collection of field names to insert. Listed field must be
            present in both datasets. If field_names is None, all fields will be
            inserted.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        insert_where_sql (str): SQL where-clause for insert-dataset subselection.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        collections.Counter: Counts for each feature action.

    """
    kwargs.setdefault('insert_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log("Start: Insert features into %s from %s.", dataset_path,
        insert_dataset_path)
    meta = {
        'dataset': arcobj.dataset_metadata(dataset_path),
        'insert': arcobj.dataset_metadata(insert_dataset_path),
    }
    if field_names is None:
        keys = set.intersection(*(set(
            name.lower() for name in _meta['field_names_tokenized'])
                                  for _meta in meta.values()))
    else:
        keys = set(name.lower() for name in contain(field_names))
    # OIDs & area/length "fields" have no business being part of an insert.
    # Geometry itself is handled separately in append function.
    for _meta in meta.values():
        for key in chain(*_meta['field_token'].items()):
            keys.discard(key)
    append_kwargs = {
        'inputs': unique_name('view'),
        'target': dataset_path,
        'schema_type': 'no_test',
        'field_mapping': arcpy.FieldMappings(),
    }
    # Create field maps.
    # ArcGIS Pro's no-test append is case-sensitive (verified 1.0-1.1.1).
    # Avoid this problem by using field mapping.
    # BUG-000090970 - ArcGIS Pro 'No test' field mapping in Append tool does not auto-
    # map to the same field name if naming convention differs.
    for key in keys:
        field_map = arcpy.FieldMap()
        field_map.addInputField(insert_dataset_path, key)
        append_kwargs['field_mapping'].addFieldMap(field_map)
    view = arcobj.DatasetView(
        insert_dataset_path,
        kwargs['insert_where_sql'],
        view_name=append_kwargs['inputs'],
        # Must be nonspatial to append to nonspatial table.
        force_nonspatial=(not meta['dataset']['is_spatial']),
    )
    session = arcobj.Editor(meta['dataset']['workspace_path'],
                            kwargs['use_edit_session'])
    with view, session:
        arcpy.management.Append(**append_kwargs)
        feature_count = Counter({'inserted': view.count})
    log("%s features inserted.", feature_count['inserted'])
    log("End: Insert.")
    return feature_count
Пример #8
0
def eliminate_interior_rings(dataset_path,
                             max_area=None,
                             max_percent_total_area=None,
                             **kwargs):
    """Eliminate interior rings of polygon features.

    Note:
        If no value if provided for either max_area or max_percent_total_area, (nearly)
        all interior rings will be removed. Technically, max_percent_total_area will be
        set to 99.9999.

    Args:
        dataset_path (str): Path of the dataset.
        max_area (float, str): Maximum area which parts smaller than are eliminated.
            Numeric area will be in dataset's units. String area will be formatted as
            '{number} {unit}'.
        max_percent_total_area (float): Maximum percent of total area which parts
            smaller than are eliminated.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log("Start: Eliminate interior rings in %s.", dataset_path)
    # Only set max_percent_total_area default if neither it or area defined.
    if all([max_area is None, max_percent_total_area is None]):
        max_percent_total_area = 99.9999
    if all([max_area is not None, max_percent_total_area is not None]):
        condition = 'area_or_percent'
    elif max_area is not None:
        condition = 'area'
    else:
        condition = 'percent'
    meta = {'dataset': arcobj.dataset_metadata(dataset_path)}
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql'])
    }
    temp_output_path = unique_path('output')
    with view['dataset']:
        arcpy.management.EliminatePolygonPart(
            in_features=view['dataset'].name,
            out_feature_class=temp_output_path,
            condition=condition,
            part_area=max_area,
            part_area_percent=max_percent_total_area,
            part_option='contained_only',
        )
    session = arcobj.Editor(meta['dataset']['workspace_path'],
                            kwargs['use_edit_session'])
    with session:
        delete(dataset_path,
               dataset_where_sql=kwargs['dataset_where_sql'],
               log_level=None)
        insert_from_path(dataset_path,
                         insert_dataset_path=temp_output_path,
                         log_level=None)
    dataset.delete(temp_output_path, log_level=None)
    log("End: Eliminate.")
    return dataset_path
Пример #9
0
def dissolve(dataset_path,
             dissolve_field_names=None,
             multipart=True,
             **kwargs):
    """Dissolve geometry of features that share values in given fields.

    Args:
        dataset_path (str): Path of the dataset.
        dissolve_field_names (iter): Iterable of field names to dissolve on.
        multipart (bool): Flag to allow multipart features in output.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        unsplit_lines (bool): Flag to merge line features when endpoints meet without
            crossing features. Default is False.
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        tolerance (float): Tolerance for coincidence, in dataset's units.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('unsplit_lines', False)
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Dissolve features in %s on fields: %s.",
        dataset_path,
        dissolve_field_names,
    )
    meta = {
        'dataset': arcobj.dataset_metadata(dataset_path),
        'orig_tolerance': arcpy.env.XYTolerance,
    }
    keys = {'dissolve': tuple(contain(dissolve_field_names))}
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql'])
    }
    temp_output_path = unique_path('output')
    with view['dataset']:
        if 'tolerance' in kwargs:
            arcpy.env.XYTolerance = kwargs['tolerance']
        arcpy.management.Dissolve(
            in_features=view['dataset'].name,
            out_feature_class=temp_output_path,
            dissolve_field=keys['dissolve'],
            multi_part=multipart,
            unsplit_lines=kwargs['unsplit_lines'],
        )
        if 'tolerance' in kwargs:
            arcpy.env.XYTolerance = meta['orig_tolerance']
    session = arcobj.Editor(meta['dataset']['workspace_path'],
                            kwargs['use_edit_session'])
    with session:
        delete(dataset_path,
               dataset_where_sql=kwargs['dataset_where_sql'],
               log_level=None)
        insert_from_path(dataset_path,
                         insert_dataset_path=temp_output_path,
                         log_level=None)
    dataset.delete(temp_output_path, log_level=None)
    log("End: Dissolve.")
    return dataset_path
Пример #10
0
def id_near_info_map(dataset_path,
                     dataset_id_field_name,
                     near_dataset_path,
                     near_id_field_name,
                     max_near_distance=None,
                     **kwargs):
    """Return mapping dictionary of feature IDs/near-feature info.

    Args:
        dataset_path (str): Path of the dataset.
        dataset_id_field_name (str): Name of ID field.
        near_dataset_path (str): Path of the near-dataset.
        near_id_field_name (str): Name of the near ID field.
        max_near_distance (float): Maximum distance to search for near-features, in
            units of the dataset's spatial reference.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        near_where_sql (str): SQL where-clause for near-dataset subselection.
        near_rank (int): Nearness rank of the feature to map info for. Default is 1.

    Returns:
        dict: Mapping of the dataset ID to a near-feature info dictionary.
            Info dictionary keys: 'id', 'near_id', 'rank', 'distance',
            'angle', 'near_x', 'near_y'.
            'distance' value (float) will match linear unit of the dataset's
            spatial reference.
            'angle' value (float) is in decimal degrees.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('near_where_sql')
    kwargs.setdefault('near_rank', 1)
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql']),
        'near': arcobj.DatasetView(near_dataset_path,
                                   kwargs['near_where_sql']),
    }
    with view['dataset'], view['near']:
        temp_near_path = unique_path('near')
        arcpy.analysis.GenerateNearTable(
            in_features=view['dataset'].name,
            near_features=view['near'].name,
            out_table=temp_near_path,
            search_radius=max_near_distance,
            location=True,
            angle=True,
            closest=False,
            closest_count=kwargs['near_rank'],
        )
        oid_id_map = attributes.id_map(view['dataset'].name, 'oid@',
                                       dataset_id_field_name)
        near_oid_id_map = attributes.id_map(view['near'].name, 'oid@',
                                            near_id_field_name)
    field_names = [
        'in_fid', 'near_fid', 'near_dist', 'near_angle', 'near_x', 'near_y',
        'near_rank'
    ]
    near_info_map = {}
    for near_info in attributes.as_dicts(temp_near_path, field_names):
        if near_info['near_rank'] == kwargs['near_rank']:
            _id = oid_id_map[near_info['in_fid']]
            near_info_map[_id] = {
                'id': _id,
                'near_id': near_oid_id_map[near_info['near_fid']],
                'rank': near_info['near_rank'],
                'distance': near_info['near_dist'],
                'angle': near_info['near_angle'],
                'near_x': near_info['near_x'],
                'near_y': near_info['near_y'],
            }
    dataset.delete(temp_near_path, log_level=None)
    return near_info_map
Пример #11
0
def union(dataset_path, field_name, union_dataset_path, union_field_name,
          **kwargs):
    """Assign union attribute to features, splitting where necessary.

    Note:
        This function has a 'chunking' loop routine in order to avoid an unhelpful
        output error that occurs when the inputs are rather large. For some reason the
        identity will 'succeed' with an empty output warning, but not create an output
        dataset. Running the identity against smaller sets of data generally avoids
        this conundrum.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the dataset's field to assign to.
        union_dataset_path (str): Path of the union dataset.
        union_field_name (str): Name of union dataset's field with values to assign.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        chunk_size (int): Number of features to process per loop. Default is 4096.
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        union_where_sql (str): SQL where-clause for the union dataset subselection.
        replacement_value: Value to replace overlay field values with.
        tolerance (float): Tolerance for coincidence, in dataset's units.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('chunk_size', 4096)
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('union_where_sql')
    kwargs.setdefault('tolerance')
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Union-set attributes in %s on %s by overlay values in %s on %s.",
        field_name,
        dataset_path,
        union_field_name,
        union_dataset_path,
    )
    if 'replacement_value' in kwargs and kwargs[
            'replacement_value'] is not None:
        update_function = (lambda x: kwargs['replacement_value']
                           if x else None)
    else:
        # Union puts empty string when identity feature not present.
        # Fix to null (replacement value function does this inherently).
        update_function = (lambda x: None if x == '' else x)
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql'])
    }
    # Create a temporary copy of the union dataset.
    temp_union = arcobj.TempDatasetCopy(union_dataset_path,
                                        kwargs['union_where_sql'],
                                        field_names=[union_field_name])
    with view['dataset'], temp_union:
        # Avoid field name collisions with neutral field name.
        temp_union.field_name = dataset.rename_field(
            temp_union.path,
            union_field_name,
            new_field_name=unique_name(union_field_name),
            log_level=None,
        )
        for view['chunk'] in view['dataset'].as_chunks(kwargs['chunk_size']):
            temp_output_path = unique_path('output')
            arcpy.analysis.Union(
                in_features=[view['chunk'].name, temp_union.path],
                out_feature_class=temp_output_path,
                join_attributes='all',
                cluster_tolerance=kwargs['tolerance'],
                gaps=False,
            )
            # Clean up bad or null geometry created in processing.
            arcpy.management.RepairGeometry(temp_output_path)
            # Push identity (or replacement) value from temp to update field.
            attributes.update_by_function(
                temp_output_path,
                field_name,
                update_function,
                field_as_first_arg=False,
                arg_field_names=[temp_union.field_name],
                log_level=None,
            )
            # Replace original chunk features with new features.
            features.delete(view['chunk'].name, log_level=None)
            features.insert_from_path(dataset_path,
                                      temp_output_path,
                                      log_level=None)
            dataset.delete(temp_output_path, log_level=None)
    log("End: Union.")
    return dataset_path
Пример #12
0
def overlay(dataset_path, field_name, overlay_dataset_path, overlay_field_name,
            **kwargs):
    """Assign overlay attribute to features, splitting where necessary.

    Note:
        Only one overlay flag at a time can be used. If mutliple are set to True, the
        first one referenced in the code will be used. If no overlay flags are set, the
        operation will perform a basic intersection check, and the result will be at
        the whim of the geoprocessing environment's merge rule for the update field.

        This function has a 'chunking' loop routine in order to avoid an unhelpful
        output error that occurs when the inputs are rather large. For some reason the
        identity will 'succeed' with an empty output warning, but not create an output
        dataset. Running the identity against smaller sets of data generally avoids this
        conundrum.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the dataset's field to assign to.
        overlay_dataset_path (str): Path of the overlay dataset.
        overlay_field_name (str): Name of overlay dataset's field with values to
            assign.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        chunk_size (int): Number of features to process per loop. Default is 4096.
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        overlay_central_coincident (bool): Flag to overlay the centrally-coincident
            value. Default is False.
        overlay_most_coincident (bool): Flag to overlay the most coincident value.
            Default is False.
        overlay_where_sql (str): SQL where-clause for the overlay dataset subselection.
        replacement_value: Value to replace overlay field values with.
        tolerance (float): Tolerance for coincidence, in dataset's units.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('chunk_size', 4096)
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('overlay_central_coincident', False)
    kwargs.setdefault('overlay_most_coincident', False)
    kwargs.setdefault('overlay_where_sql')
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Overlay-set attributes in %s on %s by overlay values in %s on %s.",
        field_name,
        dataset_path,
        overlay_field_name,
        overlay_dataset_path,
    )
    # Check flags & set details for spatial join call.
    join_kwargs = {
        'join_operation': 'join_one_to_many',
        'join_type': 'keep_all'
    }
    if kwargs['overlay_central_coincident']:
        join_kwargs['match_option'] = 'have_their_center_in'
    elif kwargs['overlay_most_coincident']:
        raise NotImplementedError(
            "overlay_most_coincident not yet implemented.")

    else:
        join_kwargs['match_option'] = 'intersect'
    if 'replacement_value' in kwargs and kwargs[
            'replacement_value'] is not None:
        update_function = (lambda x: kwargs['replacement_value']
                           if x else None)
    else:
        update_function = (lambda x: x)
    meta = {'orig_tolerance': arcpy.env.XYTolerance}
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql'])
    }
    # Create temporary copy of overlay dataset.
    temp_overlay = arcobj.TempDatasetCopy(
        overlay_dataset_path,
        kwargs['overlay_where_sql'],
        field_names=[overlay_field_name],
    )
    with view['dataset'], temp_overlay:
        # Avoid field name collisions with neutral field name.
        temp_overlay.field_name = dataset.rename_field(
            temp_overlay.path,
            overlay_field_name,
            new_field_name=unique_name(overlay_field_name),
            log_level=None,
        )
        if 'tolerance' in kwargs:
            arcpy.env.XYTolerance = kwargs['tolerance']
        for view['chunk'] in view['dataset'].as_chunks(kwargs['chunk_size']):
            temp_output_path = unique_path('output')
            arcpy.analysis.SpatialJoin(target_features=view['chunk'].name,
                                       join_features=temp_overlay.path,
                                       out_feature_class=temp_output_path,
                                       **join_kwargs)
            # Clean up bad or null geometry created in processing.
            arcpy.management.RepairGeometry(temp_output_path)
            # Push identity (or replacement) value from temp to update field.
            attributes.update_by_function(
                temp_output_path,
                field_name,
                update_function,
                field_as_first_arg=False,
                arg_field_names=[temp_overlay.field_name],
                log_level=None,
            )
            # Replace original chunk features with new features.
            features.delete(view['chunk'].name, log_level=None)
            features.insert_from_path(dataset_path,
                                      temp_output_path,
                                      log_level=None)
            dataset.delete(temp_output_path, log_level=None)
        if 'tolerance' in kwargs:
            arcpy.env.XYTolerance = meta['orig_tolerance']
    log("End: Overlay.")
    return dataset_path