Esempio n. 1
0
def keep_by_location(
    dataset_path: Union[Path, str],
    *,
    location_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    location_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Keep features where geometry overlaps location-dataset geometry.

    Args:
        dataset_path: Path to dataset.
        location_path: Path to location-dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        location_where_sql: SQL where-clause for location-dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each keep-state.
    """
    dataset_path = Path(dataset_path)
    location_path = Path(location_path)
    LOG.log(
        log_level,
        "Start: Keep features in `%s` where location overlaps `%s`.",
        dataset_path,
        location_path,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    location_view = DatasetView(location_path,
                                dataset_where_sql=location_where_sql)
    with session, view, location_view:
        arcpy.management.SelectLayerByLocation(
            in_layer=view.name,
            overlap_type="INTERSECT",
            select_features=location_view.name,
            selection_type="NEW_SELECTION",
        )
        arcpy.management.SelectLayerByLocation(
            in_layer=view.name, selection_type="SWITCH_SELECTION")
        states["deleted"] = delete(view.name,
                                   log_level=logging.DEBUG)["deleted"]
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Keep.")
    return states
Esempio n. 2
0
def erase_features(
    dataset_path: Union[Path, str],
    *,
    erase_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    erase_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    log_level: int = logging.INFO,
) -> Counter:
    """Erase feature geometry where it overlaps erase-dataset geometry.

    Args:
        dataset_path: Path to dataset.
        erase_path: Path to erase-dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        erase_where_sql: SQL where-clause for erase-dataset subselection.
        output_path: Path to output dataset.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    erase_path = Path(erase_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Erase features in `%s` where overlapping features in `%s`.",
        dataset_path,
        erase_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    erase_view = DatasetView(erase_path,
                             field_names=[],
                             dataset_where_sql=erase_where_sql)
    with view, erase_view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.Erase(
            in_features=view.name,
            erase_features=erase_view.name,
            out_feature_class=str(output_path),
        )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Erase.")
    return states
Esempio n. 3
0
def delete(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Delete features in dataset.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each delete-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Delete features from `%s`.", dataset_path)
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view, session:
        states["deleted"] = view.count
        arcpy.management.DeleteRows(in_rows=view.name)
        states["remaining"] = dataset.feature_count(dataset_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Delete.")
    return states
Esempio n. 4
0
def adjacent_neighbors_map(
    dataset_path: Union[Path, str],
    *,
    id_field_names: Iterable[str],
    dataset_where_sql: Optional[str] = None,
    exclude_overlap: bool = False,
    include_corner: bool = False,
) -> Dict[Union[Tuple[Any], Any], Set[Union[Tuple[Any], Any]]]:
    """Return mapping of feature ID to set of adjacent feature IDs.

    Notes:
        Only works for polygon geometries.
        If id_field_names only has one field name, feature IDs in the mapping will be
            the single value of that field, not a tuple.

    Args:
        dataset_path: Path to dataset.
        id_field_names: Names of the feature ID fields.
        dataset_where_sql: SQL where-clause for dataset subselection.
        exclude_overlap: Exclude features that overlap, but do not have adjacent edges
            or nodes if True.
        include_corner: Include features that have adjacent corner nodes, but no
            adjacent edges if True.
    """
    dataset_path = Path(dataset_path)
    id_field_names = list(id_field_names)
    # Lowercase to avoid casing mismatch.
    # id_field_names = [name.lower() for name in id_field_names]
    view = DatasetView(dataset_path,
                       field_names=id_field_names,
                       dataset_where_sql=dataset_where_sql)
    with view:
        temp_neighbor_path = unique_path("neighbor")
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.PolygonNeighbors(
            in_features=view.name,
            out_table=str(temp_neighbor_path),
            in_fields=id_field_names,
            area_overlap=not exclude_overlap,
            both_sides=True,
        )
    adjacent_neighbors = {}
    for row in features.as_dicts(temp_neighbor_path):
        # Lowercase to avoid casing mismatch.
        row = {key.lower(): val for key, val in row.items()}
        if len(id_field_names) == 1:
            source_id = row[f"src_{id_field_names[0]}"]
            neighbor_id = row[f"nbr_{id_field_names[0]}"]
        else:
            source_id = tuple(row[f"src_{name}"] for name in id_field_names)
            neighbor_id = tuple(row[f"nbr_{name}"] for name in id_field_names)
        if source_id not in adjacent_neighbors:
            adjacent_neighbors[source_id] = set()
        if not include_corner and not row["length"] and not row["area"]:
            continue

        adjacent_neighbors[source_id].add(neighbor_id)
    dataset.delete(temp_neighbor_path)
    return adjacent_neighbors
Esempio n. 5
0
def clip(
    dataset_path: Union[Path, str],
    *,
    clip_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    clip_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    log_level: int = logging.INFO,
) -> Counter:
    """Clip feature geometry where it overlaps clip-dataset geometry.

    Args:
        dataset_path: Path to dataset.
        clip_path: Path to clip-dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        clip_where_sql: SQL where-clause for clip-dataset subselection.
        output_path: Path to output dataset.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    clip_path = Path(clip_path)
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Clip features in `%s` where overlapping `%s`.",
        dataset_path,
        clip_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    clip_view = DatasetView(clip_path, dataset_where_sql=clip_where_sql)
    with view, clip_view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.Clip(
            in_features=view.name,
            clip_features=clip_view.name,
            out_feature_class=str(output_path),
        )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Clip.")
    return states
Esempio n. 6
0
def update_by_expression(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    expression: str,
    expression_type: str = "Python",
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Field:
    """Update attribute values using a (single) code-expression.

    Wraps arcpy.management.CalculateField.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field.
        expression: String expression to evaluate values from.
        expression_type: Type of code expression represents. Allowed values include:
            "Arcade", "Python", "Python3", and "SQL". Case-insensitive.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Field metadata instance for field with updated attributes.

    Raises:
        AttributeError: If given expression type invalid.
    """
    dataset_path = Path(dataset_path)
    if expression_type.upper() not in ["ARCADE", "PYTHON", "PYTHON3", "SQL"]:
        raise AttributeError("Invalid expression_type")

    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by %s expression `%s`.",
        dataset_path,
        field_name,
        expression_type,
        expression,
    )
    if expression_type.upper() == "PYTHON":
        expression_type = "PYTHON3"
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with session, view:
        arcpy.management.CalculateField(
            in_table=view.name,
            field=field_name,
            expression=expression,
            expression_type=expression_type,
        )
    LOG.log(log_level, "End: Update.")
    return Field(dataset_path, field_name)
Esempio n. 7
0
def table_to_points(
    dataset_path: Union[Path, str],
    *,
    x_field_name: str,
    y_field_name: str,
    z_field_name: Optional[str] = None,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    spatial_reference_item: SpatialReferenceSourceItem = 4326,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert coordinate table to a new point dataset.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        x_field_name: Name of field with x-coordinate.
        y_field_name: Name of field with y-coordinate.
        z_field_name: Name of field with z-coordinate.
        spatial_reference_item: Item from which the spatial reference of the output
            geometry will be derived. Default is 4326 (EPSG code for unprojected WGS84).
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert table rows `%s` to points in output `%s`.",
        dataset_path,
        output_path,
    )
    layer_name = unique_name()
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    arcpy.management.MakeXYEventLayer(
        table=view.name,
        out_layer=layer_name,
        in_x_field=x_field_name,
        in_y_field=y_field_name,
        in_z_field=z_field_name,
        spatial_reference=SpatialReference(spatial_reference_item).object,
    )
    dataset.copy(layer_name, output_path=output_path, log_level=logging.DEBUG)
    arcpy.management.Delete(layer_name)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Esempio n. 8
0
def lines_to_vertex_points(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    endpoints_only: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert geometry from lines to points at every vertex.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        endpoints_only: Output points should include line endpoints only if True.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert lines in `%s` to vertex points in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        arcpy.management.FeatureVerticesToPoints(
            in_features=view.name,
            # ArcPy2.8.0: Convert Path to str.
            out_feature_class=str(output_path),
            point_location="ALL" if not endpoints_only else "BOTH_ENDS",
        )
    dataset.delete_field(output_path,
                         field_name="ORIG_FID",
                         log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Esempio n. 9
0
def planarize(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Planarize feature geometry into lines.

    Note:
        This method does not make topological linework. However it does carry all
        attributes with it, rather than just an ID attribute.

        Since this method breaks the new line geometry at intersections, it can be
        useful to break line geometry features that cross.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Planarize geometry in `%s` to lines in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.FeatureToLine(in_features=view.name,
                                       out_feature_class=str(output_path),
                                       attributes=True)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Planarize.")
    return states
Esempio n. 10
0
def split_lines_at_vertices(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Split lines into smaller lines between vertices.

    The original datasets can be lines or polygons. Polygons will be split along their
    rings.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Split line geometry in `%s` into lines between vertices in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.SplitLine(in_features=view.name,
                                   out_feature_class=str(output_path))
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Split.")
    return states
Esempio n. 11
0
def points_to_thiessen_polygons(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert geometry from points to Thiessen polygons.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert points in `%s` to Thiessen polygons in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.CreateThiessenPolygons(
            in_features=view.name,
            out_feature_class=str(output_path),
            fields_to_copy="ALL",
        )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Esempio n. 12
0
def buffer(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    distance: Union[float, int],
    log_level: int = logging.INFO,
) -> Counter:
    """Buffer features a given distance & (optionally) dissolve on given fields.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        output_path: Path to output dataset.
        distance: Distance to buffer from feature, in the units of the dataset.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(log_level, "Start: Buffer features in `%s`.", dataset_path)
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.Buffer(
            in_features=view.name,
            out_feature_class=str(output_path),
            buffer_distance_or_field=distance,
        )
    for field_name in ["BUFF_DIST", "ORIG_FID"]:
        dataset.delete_field(output_path, field_name=field_name)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Buffer.")
    return states
Esempio n. 13
0
def polygons_to_lines(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    id_field_name: Optional[str] = None,
    make_topological: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert geometry from polygons to lines.

    Note:
        If `make_topological` is set to True, shared outlines will be a single, separate
        feature. Note that one cannot pass attributes to a topological transformation
        (as the values would not apply to all adjacent features).

        If an id field name is specified, the output dataset will identify the input
        features that defined the line feature with the name & values from the provided
        field. This option will be ignored if the output is non-topological lines, as
        the field will pass over with the rest of the attributes.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        id_field_name: Name of ID field to apply on topological lines.
        make_topological: Make line output topological, or merged where lines overlap.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert polgyons in `%s` to lines in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.PolygonToLine(
            in_features=view.name,
            out_feature_class=str(output_path),
            neighbor_option=("IDENTIFY_NEIGHBORS"
                             if make_topological else "IGNORE_NEIGHBORS"),
        )
    if make_topological:
        _dataset = Dataset(dataset_path)
        for side in ["left", "right"]:
            oid_key = f"{side.upper()}_FID"
            if id_field_name:
                id_field = next(
                    _field for _field in _dataset.fields
                    if _field.name.lower() == id_field_name.lower())
                id_field.name = f"{side.upper()}_{id_field_name}"
                # Cannot create an OID-type field, so force to long.
                if id_field.type.upper() == "OID":
                    id_field.type = "LONG"
                dataset.add_field(output_path,
                                  log_level=logging.DEBUG,
                                  **id_field.field_as_dict)
                attributes.update_by_joined_value(
                    output_path,
                    field_name=id_field.name,
                    key_field_names=[oid_key],
                    join_dataset_path=dataset_path,
                    join_field_name=id_field_name,
                    join_key_field_names=[_dataset.oid_field_name],
                    log_level=logging.DEBUG,
                )
            dataset.delete_field(output_path,
                                 field_name=oid_key,
                                 log_level=logging.DEBUG)
    else:
        dataset.delete_field(output_path,
                             field_name="ORIG_FID",
                             log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Esempio n. 14
0
def spatial_join_by_center(
    dataset_path: Union[Path, str],
    *,
    field_name: str,
    join_path: Union[Path, str],
    join_field_name: str,
    dataset_where_sql: Optional[str] = None,
    join_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    replacement_value: Optional[Any] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Spatially-join attributes by their center.

    Notes:
        Features joined with multiple join-features will be duplicated.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field to place joined values.
        join_path: Path to join-dataset.
        join_field_name: Name of join-field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        join_where_sql: SQL where-clause for the join-dataset subselection.
        output_path: Path to output dataset.
        replacement_value: Value to replace a present join-field value with. If set to
            None, no replacement will occur.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    join_path = Path(join_path)
    LOG.log(
        log_level,
        "Start: Spatially-join attributes in `%s.%s` by features/values in `%s.%s`.",
        dataset_path,
        field_name,
        join_path,
        join_field_name,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    # Do not include any field names - we do not want them added to output.
    join_view = DatasetView(join_path,
                            field_names=[],
                            dataset_where_sql=join_where_sql)
    with view, join_view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.SpatialJoin(
            target_features=view.name,
            join_features=join_view.name,
            out_feature_class=str(output_path),
            join_operation="JOIN_ONE_TO_MANY",
            join_type="KEEP_ALL",
            match_option="HAVE_THEIR_CENTER_IN",
        )
    if replacement_value is not None:
        attributes.update_by_value(
            output_path,
            field_name,
            value=replacement_value,
            dataset_where_sql="JOIN_FID <> -1",
            log_level=logging.DEBUG,
        )
    else:
        attributes.update_by_joined_value(
            output_path,
            field_name,
            key_field_names=["JOIN_FID"],
            join_dataset_path=join_path,
            join_field_name=join_field_name,
            join_key_field_names=["OID@"],
            dataset_where_sql="JOIN_FID <> -1",
            join_dataset_where_sql=join_where_sql,
            log_level=logging.DEBUG,
        )
    attributes.update_by_value(
        output_path,
        field_name,
        value=None,
        dataset_where_sql="JOIN_FID = -1",
        log_level=logging.DEBUG,
    )
    for name in ["Join_Count", "TARGET_FID", "JOIN_FID"]:
        dataset.delete_field(output_path,
                             field_name=name,
                             log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Join.")
    return states
Esempio n. 15
0
def dissolve_features(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    dissolve_field_names: Optional[Iterable[str]] = None,
    all_fields_in_output: bool = False,
    allow_multipart: bool = True,
    unsplit_lines: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Dissolve feature geometry that share value in given fields.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        output_path: Path to output dataset.
        dissolve_field_names: Names of fields to base dissolve on.
        all_fields_in_output: All fields in the dataset will persist in the output
            dataset if True. Otherwise, only the dissolve fields will persist. Non-
            dissolve fields will have default values, of course.
        allow_multipart: Allow multipart features in output if True.
        unsplit_lines: Merge line features when endpoints meet without crossing features
            if True.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    if dissolve_field_names is not None:
        dissolve_field_names = list(dissolve_field_names)
    LOG.log(
        log_level,
        "Start: Dissolve features in `%s` on fields `%s`.",
        dataset_path,
        dissolve_field_names,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(
        dataset_path,
        field_names=dissolve_field_names,
        dataset_where_sql=dataset_where_sql,
    )
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.Dissolve(
            in_features=view.name,
            out_feature_class=str(output_path),
            dissolve_field=dissolve_field_names,
            multi_part=allow_multipart,
            unsplit_lines=unsplit_lines,
        )
    if all_fields_in_output:
        for _field in Dataset(dataset_path).user_fields:
            # Cannot add a non-nullable field to existing features.
            _field.is_nullable = True
            dataset.add_field(
                output_path,
                exist_ok=True,
                log_level=logging.DEBUG,
                **_field.field_as_dict,
            )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Dissolve.")
    return states
Esempio n. 16
0
def generate_service_rings(
    dataset_path: Union[Path, str],
    *,
    id_field_name: str,
    network_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    cost_attribute: str,
    detailed_features: bool = False,
    max_distance: Union[float, int],
    overlap_facilities: bool = True,
    restriction_attributes: Optional[Iterable[str]] = None,
    ring_width: Union[float, int],
    travel_from_facility: bool = False,
    trim_value: Optional[Union[float, int]] = None,
    log_level: int = logging.INFO,
) -> Dataset:
    """Create facility service ring features using a network dataset.

    Args:
        dataset_path: Path to dataset.
        id_field_name: Name of dataset ID field.
        network_path: Path to network dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        output_path: Path to output dataset.
        cost_attribute: Name of network cost attribute to use.
        detailed_features: Generate high-detail features if True.
        max_distance: Distance in travel from the facility the outer ring will extend
            to, in the units of the dataset.
        overlap_facilities: Allow different facility service areas to overlap if True.
        restriction_attributes: Names of network restriction attributes to use in
            analysis.
        ring_width: Distance a service ring represents in travel, in the units of the
            dataset.
        travel_from_facility: Perform the analysis travelling from the facility if True,
            rather than toward the facility.
        trim_value: Disstance from network features to trim service areas at, in units
            of the dataset.
        log_level: Level to log the function at.

    Returns:
        Dataset metadata instance for output dataset.
    """
    dataset_path = Path(dataset_path)
    network_path = Path(network_path)
    output_path = Path(output_path)
    LOG.log(log_level, "Start: Generate service rings for `%s`.", dataset_path)
    # `trim_value` assumes meters if not input as linear unit string.
    if trim_value is not None:
        trim_value = f"{trim_value} {SpatialReference(dataset_path).linear_unit}"
    # ArcPy2.8.0: Convert Path to str.
    arcpy.na.MakeServiceAreaLayer(
        in_network_dataset=str(network_path),
        out_network_analysis_layer="service_area",
        impedance_attribute=cost_attribute,
        travel_from_to="TRAVEL_FROM" if travel_from_facility else "TRAVEL_TO",
        default_break_values=(" ".join(
            str(x) for x in range(ring_width, max_distance + 1, ring_width))),
        polygon_type="DETAILED_POLYS" if detailed_features else "SIMPLE_POLYS",
        merge="NO_MERGE" if overlap_facilities else "NO_OVERLAP",
        nesting_type="RINGS",
        UTurn_policy="ALLOW_DEAD_ENDS_AND_INTERSECTIONS_ONLY",
        restriction_attribute_name=restriction_attributes,
        polygon_trim=trim_value is not None,
        poly_trim_value=trim_value,
        hierarchy="NO_HIERARCHY",
    )
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        arcpy.na.AddLocations(
            in_network_analysis_layer="service_area",
            sub_layer="Facilities",
            in_table=view.name,
            field_mappings=f"Name {id_field_name} #",
            search_tolerance=max_distance,
            match_type="MATCH_TO_CLOSEST",
            append="CLEAR",
            snap_to_position_along_network="NO_SNAP",
            exclude_restricted_elements=True,
        )
    arcpy.na.Solve(
        in_network_analysis_layer="service_area",
        ignore_invalids=True,
        terminate_on_solve_error=True,
    )
    dataset.copy("service_area/Polygons",
                 output_path=output_path,
                 log_level=logging.DEBUG)
    arcpy.management.Delete("service_area")
    id_field = Field(dataset_path, id_field_name)
    dataset.add_field(output_path,
                      log_level=logging.DEBUG,
                      **id_field.field_as_dict)
    attributes.update_by_function(
        output_path,
        field_name=id_field.name,
        function=TYPE_ID_FUNCTION_MAP[id_field.type.lower()],
        field_as_first_arg=False,
        arg_field_names=["Name"],
        log_level=logging.DEBUG,
    )
    LOG.log(log_level, "End: Generate.")
    return Dataset(output_path)
Esempio n. 17
0
def update_by_central_overlay(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    overlay_dataset_path: Union[Path, str],
    overlay_field_name: str,
    dataset_where_sql: Optional[str] = None,
    overlay_where_sql: Optional[str] = None,
    replacement_value: Optional[Any] = None,
    tolerance: Optional[float] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update attribute values by finding the central overlay feature value.

    Notes:
        Since only one value will be selected in the overlay, operations with multiple
        overlaying features will respect the geoprocessing environment merge rule. This
        rule generally defaults to the value of the "first" feature.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field.
        overlay_dataset_path: Path to overlay-dataset.
        overlay_field_name: Name of overlay-field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        overlay_where_sql: SQL where-clause for overlay-dataset subselection.
        replacement_value: Value to replace a present overlay-field value with. If set
            to None, no replacement will occur.
        tolerance: Tolerance for coincidence, in units of the dataset. If set to None,
            will use the default tolerance for the workspace of the dataset.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Attribute counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    overlay_dataset_path = Path(overlay_dataset_path)
    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by central-overlay value in `%s.%s`.",
        dataset_path,
        field_name,
        overlay_dataset_path,
        overlay_field_name,
    )
    original_tolerance = arcpy.env.XYTolerance
    # Do *not* include any fields here (avoids name collisions in temporary output).
    view = DatasetView(dataset_path,
                       field_names=[],
                       dataset_where_sql=dataset_where_sql)
    overlay_view = DatasetView(
        overlay_dataset_path,
        field_names=[overlay_field_name],
        dataset_where_sql=overlay_where_sql,
    )
    with view, overlay_view:
        temp_output_path = unique_path("output")
        if tolerance is not None:
            arcpy.env.XYTolerance = tolerance
        arcpy.analysis.SpatialJoin(
            target_features=view.name,
            join_features=overlay_view.name,
            # ArcPy2.8.0: Convert to str.
            out_feature_class=str(temp_output_path),
            join_operation="JOIN_ONE_TO_ONE",
            join_type="KEEP_ALL",
            match_option="HAVE_THEIR_CENTER_IN",
        )
    arcpy.env.XYTolerance = original_tolerance
    if replacement_value is not None:
        update_by_function(
            temp_output_path,
            overlay_field_name,
            function=lambda x: replacement_value if x else None,
            log_level=logging.DEBUG,
        )
    states = update_by_joined_value(
        dataset_path,
        field_name,
        key_field_names=["OID@"],
        join_dataset_path=temp_output_path,
        join_field_name=overlay_field_name,
        join_key_field_names=["TARGET_FID"],
        dataset_where_sql=dataset_where_sql,
        use_edit_session=use_edit_session,
        log_level=logging.DEBUG,
    )
    # ArcPy2.8.0: Convert to str.
    arcpy.management.Delete(str(temp_output_path))
    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Esempio n. 18
0
def update_by_overlay_count(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    overlay_dataset_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    overlay_where_sql: Optional[str] = None,
    tolerance: Optional[float] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update attribute values by count of overlay features.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field.
        overlay_dataset_path: Path to overlay-dataset.

    Keyword Args:
        dataset_where_sql: SQL where-clause for dataset subselection.
        overlay_where_sql: SQL where-clause for overlay-dataset subselection.
        tolerance: Tolerance for coincidence, in units of the dataset. If set to None,
            will use the default tolerance for the workspace of the dataset.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Attribute counts for each update-state.

    Raises:
        RuntimeError: If attribute cannot be updated.
    """
    dataset_path = Path(dataset_path)
    overlay_dataset_path = Path(overlay_dataset_path)
    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by overlay feature counts from `%s`.",
        dataset_path,
        field_name,
        overlay_dataset_path,
    )
    original_tolerance = arcpy.env.XYTolerance
    view = DatasetView(dataset_path,
                       field_names=[],
                       dataset_where_sql=dataset_where_sql)
    overlay_view = DatasetView(
        overlay_dataset_path,
        field_names=[],
        dataset_where_sql=overlay_where_sql,
    )
    with view, overlay_view:
        if tolerance is not None:
            arcpy.env.XYTolerance = tolerance
        temp_output_path = unique_path("output")
        arcpy.analysis.SpatialJoin(
            target_features=view.name,
            join_features=overlay_view.name,
            # ArcPy2.8.0: Convert to str.
            out_feature_class=str(temp_output_path),
            join_operation="JOIN_ONE_TO_ONE",
            join_type="KEEP_COMMON",
            match_option="INTERSECT",
        )
    arcpy.env.XYTolerance = original_tolerance
    cursor = arcpy.da.SearchCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(temp_output_path),
        field_names=["TARGET_FID", "Join_Count"],
    )
    with cursor:
        oid_overlay_count = dict(cursor)
    # ArcPy2.8.0: Convert to str.
    arcpy.management.Delete(str(temp_output_path))
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(dataset_path),
        field_names=["OID@", field_name],
        where_clause=dataset_where_sql,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for feature in cursor:
            oid = feature[0]
            old_value = feature[1]
            new_value = oid_overlay_count.get(oid, 0)
            if same_value(old_value, new_value):
                states["unchanged"] += 1
            else:
                try:
                    cursor.updateRow([oid, new_value])
                    states["altered"] += 1
                except RuntimeError as error:
                    raise RuntimeError(
                        f"Update cursor failed: Offending value: `{new_value}`"
                    ) from error

    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Esempio n. 19
0
def insert_from_path(
    dataset_path: Union[Path, str],
    field_names: Optional[Iterable[str]] = None,
    *,
    source_path: Union[Path, str],
    source_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Insert features into dataset from another dataset.

    Args:
        dataset_path: Path to dataset.
        field_names: Names of fields for insert. Fields must exist in both datasets. If
            set to None, all user fields present in both datasets will be inserted,
            along with the geometry field (if present).
        source_path: Path to dataset for features to insert.
        source_where_sql: SQL where-clause for source dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each insert-state.
    """
    dataset_path = Path(dataset_path)
    source_path = Path(source_path)
    LOG.log(
        log_level,
        "Start: Insert features into `%s` from `%s`.",
        dataset_path,
        source_path,
    )
    _dataset = Dataset(dataset_path)
    source_dataset = Dataset(source_path)
    if field_names is None:
        field_names = set(name.lower()
                          for name in _dataset.field_names_tokenized) & set(
                              name.lower()
                              for name in source_dataset.field_names_tokenized)
    else:
        field_names = set(name.lower() for name in field_names)
    # OIDs & area/length "fields" have no business being part of an insert.
    # Geometry itself is handled separately in append function.
    for i_dataset in [_dataset, source_dataset]:
        for field_name in chain(*i_dataset.field_name_token.items()):
            field_names.discard(field_name)
            field_names.discard(field_name.lower())
            field_names.discard(field_name.upper())
    field_names = list(field_names)
    # Create field maps.
    # ArcGIS Pro's no-test append is case-sensitive (verified 1.0-1.1.1).
    # Avoid this problem by using field mapping.
    # BUG-000090970 - ArcGIS Pro 'No test' field mapping in Append tool does not auto-
    # map to the same field name if naming convention differs.
    field_mapping = arcpy.FieldMappings()
    for field_name in field_names:
        field_map = arcpy.FieldMap()
        field_map.addInputField(source_path, field_name)
        field_mapping.addFieldMap(field_map)
    session = Editing(_dataset.workspace_path, use_edit_session)
    states = Counter()
    view = DatasetView(
        source_path,
        name=unique_name("view"),
        dataset_where_sql=source_where_sql,
        # Must be nonspatial to append to nonspatial table.
        force_nonspatial=(not _dataset.is_spatial),
    )
    with view, session:
        arcpy.management.Append(
            inputs=view.name,
            # ArcPy2.8.0: Convert Path to str.
            target=str(dataset_path),
            schema_type="NO_TEST",
            field_mapping=field_mapping,
        )
        states["inserted"] = view.count
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Insert.")
    return states
Esempio n. 20
0
def nearest_features(
    dataset_path: Union[Path, str],
    *,
    id_field_name: str,
    near_path: Union[Path, str],
    near_id_field_name: str,
    dataset_where_sql: Optional[str] = None,
    near_where_sql: Optional[str] = None,
    max_distance: Optional[Union[float, int]] = None,
    near_rank: int = 1,
) -> Iterator[Dict[str, Any]]:
    """Generate info dictionaries for relationship with Nth-nearest near-feature.

    Args:
        dataset_path: Path to dataset.
        id_field_name: Name of dataset ID field.
        near_path: Path to near-dataset.
        near_id_field_name: Name of the near-dataset ID field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        near_where_sql: SQL where-clause for near-dataset subselection.
        max_distance: Maximum distance to search for near-features, in units of the
            dataset.
        near_rank: Nearness rank of the feature to map info for (Nth-nearest).

    Yields:
        Nearest feature details.
        Keys:
            * dataset_id
            * near_id
            * angle: Angle from dataset feature & near-feature, in decimal degrees.
            * distance: Distance between feature & near-feature, in units of the
                dataset.
    """
    dataset_path = Path(dataset_path)
    near_path = Path(near_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    near_view = DatasetView(near_path, dataset_where_sql=near_where_sql)
    with view, near_view:
        temp_near_path = unique_path("near")
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.GenerateNearTable(
            in_features=view.name,
            near_features=near_view.name,
            out_table=str(temp_near_path),
            search_radius=max_distance,
            angle=True,
            closest=(near_rank == 1),
            closest_count=near_rank,
        )
        oid_id_map = dict(
            features.as_tuples(view.name, field_names=["OID@", id_field_name]))
        near_oid_id_map = dict(
            features.as_tuples(near_view.name,
                               field_names=["OID@", near_id_field_name]))
    _features = features.as_dicts(
        temp_near_path,
        field_names=["IN_FID", "NEAR_FID", "NEAR_ANGLE", "NEAR_DIST"],
        dataset_where_sql=f"NEAR_RANK = {near_rank}"
        if near_rank != 1 else None,
    )
    for feature in _features:
        yield {
            "dataset_id": oid_id_map[feature["IN_FID"]],
            "near_id": near_oid_id_map[feature["NEAR_FID"]],
            "angle": feature["NEAR_ANGLE"],
            "distance": feature["NEAR_DIST"],
        }

    dataset.delete(temp_near_path)
Esempio n. 21
0
def union(
    dataset_path: Union[Path, str],
    *,
    field_name: str,
    union_path: Union[Path, str],
    union_field_name: str,
    dataset_where_sql: Optional[str] = None,
    union_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    replacement_value: Optional[Any] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Assign union attributes.

    Notes:
        Features with multiple union-features will be split.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field to place union values.
        union_path: Path to union-dataset.
        union_field_name: Name of union-field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        union_where_sql: SQL where-clause for the union-dataset subselection.
        replacement_value: Value to replace a present union-field value with. If set to
            None, no replacement will occur.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    union_path = Path(union_path)
    LOG.log(
        log_level,
        "Start: Union-set attributes in `%s.%s` by features/values in `%s.%s`.",
        dataset_path,
        field_name,
        union_path,
        union_field_name,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    # Do not include any field names - we do not want them added to output.
    union_view = DatasetView(union_path,
                             field_names=[],
                             dataset_where_sql=union_where_sql)
    with view, union_view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.Union(
            in_features=[view.name, union_view.name],
            out_feature_class=str(output_path),
            join_attributes="ALL",
        )
    fid_field_names = [
        name for name in Dataset(output_path).field_names
        if name.startswith("FID_")
    ]
    if replacement_value is not None:
        attributes.update_by_value(
            output_path,
            field_name,
            value=replacement_value,
            dataset_where_sql=f"{fid_field_names[-1]} <> -1",
            log_level=logging.DEBUG,
        )
    else:
        attributes.update_by_joined_value(
            output_path,
            field_name,
            key_field_names=[fid_field_names[-1]],
            join_dataset_path=union_path,
            join_field_name=union_field_name,
            join_key_field_names=["OID@"],
            dataset_where_sql=f"{fid_field_names[-1]} <> -1",
            join_dataset_where_sql=union_where_sql,
            log_level=logging.DEBUG,
        )
    attributes.update_by_value(
        output_path,
        field_name,
        value=None,
        dataset_where_sql=f"{fid_field_names[-1]} = -1",
        log_level=logging.DEBUG,
    )
    for name in fid_field_names:
        dataset.delete_field(output_path,
                             field_name=name,
                             log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Union.")
    return states
Esempio n. 22
0
def update_by_dominant_overlay(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    overlay_dataset_path: Union[Path, str],
    overlay_field_name: str,
    dataset_where_sql: Optional[str] = None,
    overlay_where_sql: Optional[str] = None,
    include_missing_area: bool = False,
    tolerance: Optional[float] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update attribute values by finding the dominant overlay feature value.
    Args:
        dataset_path: Path to dataset.
        field_name: Name of field.
        overlay_dataset_path: Path to overlay-dataset.
        overlay_field_name: Name of overlay-field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        overlay_where_sql: SQL where-clause for overlay-dataset subselection.
        include_missing_area: If True, the collective area where no
            overlay value exists (i.e. no overlay geometry + overlay of NoneType value)
            is considered a valid candidate for the dominant overlay.
        tolerance: Tolerance for coincidence, in units of the dataset. If set to None,
            will use the default tolerance for the workspace of the dataset.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Attribute counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    overlay_dataset_path = Path(overlay_dataset_path)
    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by dominant overlay value in `%s.%s`.",
        dataset_path,
        field_name,
        overlay_dataset_path,
        overlay_field_name,
    )
    original_tolerance = arcpy.env.XYTolerance
    # Do *not* include any fields here (avoids name collisions in temporary output).
    view = DatasetView(dataset_path,
                       field_names=[],
                       dataset_where_sql=dataset_where_sql)
    overlay_view = DatasetView(
        overlay_dataset_path,
        field_names=[overlay_field_name],
        dataset_where_sql=overlay_where_sql,
    )
    with view, overlay_view:
        temp_output_path = unique_path("output")
        if tolerance is not None:
            arcpy.env.XYTolerance = tolerance
        arcpy.analysis.Identity(
            in_features=view.name,
            identity_features=overlay_view.name,
            # ArcPy2.8.0: Convert to str.
            out_feature_class=str(temp_output_path),
            join_attributes="ALL",
        )
    arcpy.env.XYTolerance = original_tolerance
    # Identity makes custom OID field names - in_features OID field comes first.
    oid_field_names = [
        field_name for field_name in Dataset(temp_output_path).field_names
        if field_name.startswith("FID_")
    ]
    oid_value_area = {}
    cursor = arcpy.da.SearchCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(temp_output_path),
        field_names=oid_field_names + [overlay_field_name, "SHAPE@AREA"],
    )
    with cursor:
        for oid, overlay_oid, value, area in cursor:
            # Def check for -1 OID (no overlay feature): identity does not set to None.
            if overlay_oid == -1:
                value = None
            if value is None and not include_missing_area:
                continue

            if oid not in oid_value_area:
                oid_value_area[oid] = defaultdict(float)
            oid_value_area[oid][value] += area
    # ArcPy2.8.0: Convert to str.
    arcpy.management.Delete(str(temp_output_path))
    oid_dominant_value = {
        oid: max(value_area.items(), key=itemgetter(1))[0]
        for oid, value_area in oid_value_area.items()
    }
    states = update_by_mapping(
        dataset_path,
        field_name,
        mapping=oid_dominant_value,
        key_field_names=["OID@"],
        dataset_where_sql=dataset_where_sql,
        use_edit_session=use_edit_session,
        log_level=logging.DEBUG,
    )
    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states