Exemplo n.º 1
0
    def extract(
        self,
        dataset_path: Union[Path, str],
        *,
        field_names: Optional[Iterable[str]] = None,
        dataset_where_sql: Optional[str] = None,
    ) -> TProcedure:
        """Extract features to transform workspace.

        Args:
            dataset_path: Path to dataset.
            field_names: Names of fields to extract. If set to None, all fields will be
                included.
            dataset_where_sql: SQL where-clause for dataset subselection.

        Returns:
            Reference to instance.
        """
        dataset_path = Path(dataset_path)
        LOG.info("Start: Extract `%s`.", dataset_path)
        self.transform_path = self.available_transform_path
        states = Counter()
        states["extracted"] = dataset.copy(
            dataset_path,
            field_names=field_names,
            dataset_where_sql=dataset_where_sql,
            output_path=self.transform_path,
            log_level=logging.DEBUG,
        ).feature_count
        # ArcPy 2.8.0: Workaround for BUG-000091314.
        dataset.remove_all_default_field_values(self.transform_path,
                                                log_level=logging.DEBUG)
        log_entity_states("features", states, logger=LOG)
        LOG.info("End: Extract.")
        return self
Exemplo n.º 2
0
def delete(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Delete features in dataset.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each delete-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Delete features from `%s`.", dataset_path)
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view, session:
        states["deleted"] = view.count
        arcpy.management.DeleteRows(in_rows=view.name)
        states["remaining"] = dataset.feature_count(dataset_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Delete.")
    return states
Exemplo n.º 3
0
def points_to_multipoints(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert geometry from points to multipoints.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert points in `%s` to multipoints in output `%s`.",
        dataset_path,
        output_path,
    )
    _dataset = Dataset(dataset_path)
    # ArcPy2.8.0: Convert Path to str (2x).
    arcpy.management.CreateFeatureclass(
        out_path=str(output_path.parent),
        out_name=output_path.name,
        geometry_type="MULTIPOINT",
        template=str(dataset_path),
        spatial_reference=_dataset.spatial_reference.object,
    )
    field_names = _dataset.user_field_names + ["SHAPE@"]
    # ArcPy2.8.0: Convert Path to str.
    multipoint_cursor = arcpy.da.InsertCursor(in_table=str(output_path),
                                              field_names=field_names)
    # ArcPy2.8.0: Convert Path to str.
    point_cursor = arcpy.da.SearchCursor(
        in_table=str(dataset_path),
        field_names=field_names,
        where_clause=dataset_where_sql,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    with multipoint_cursor, point_cursor:
        for point_feature in point_cursor:
            multipoint_geometry = arcpy.Multipoint(
                point_feature[-1].firstPoint)
            multipoint_feature = point_feature[:-1] + (multipoint_geometry, )
            multipoint_cursor.insertRow(multipoint_feature)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Exemplo n.º 4
0
def delete_by_id(
    dataset_path: Union[Path, str],
    delete_ids: Iterable[Union[Sequence[Any], Any]],
    id_field_names: Iterable[str],
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Delete features in dataset with given IDs.

    Note:
        There is no guarantee that the ID field(s) are unique.
        Use ArcPy cursor token names for object IDs and geometry objects/properties.

    Args:
        dataset_path: Path to dataset.
        delete_ids: ID sequences for features to delete. If id_field_names contains only
            one field, IDs may be provided as non-sequence single-value.
        id_field_names: Names of the feature ID fields.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each delete-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Delete features in `%s` with given IDs.",
            dataset_path)
    id_field_names = list(id_field_names)
    if inspect.isgeneratorfunction(delete_ids):
        delete_ids = delete_ids()
    ids = set()
    for _id in delete_ids:
        if isinstance(_id, Iterable) and not isinstance(_id, str):
            ids.add(tuple(_id))
        else:
            ids.add((_id, ))
    states = Counter()
    if ids:
        # ArcPy2.8.0: Convert Path to str.
        cursor = arcpy.da.UpdateCursor(str(dataset_path),
                                       field_names=id_field_names)
        session = Editing(
            Dataset(dataset_path).workspace_path, use_edit_session)
        with session, cursor:
            for row in cursor:
                _id = tuple(row)
                if _id in ids:
                    cursor.deleteRow()
                    states["deleted"] += 1
    else:
        LOG.log(log_level, "No IDs provided.")
        states["deleted"] = 0
    states["unchanged"] = dataset.feature_count(dataset_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Delete.")
    return states
Exemplo n.º 5
0
def update_by_value(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    value: Any,
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update attribute values by assigning a given value.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field.
        value: Value to assign.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Attribute counts for each update-state.

    Raises:
        RuntimeError: If attribute cannot be updated.
    """
    dataset_path = Path(dataset_path)
    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by given value.",
        dataset_path,
        field_name,
    )
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(dataset_path),
        field_names=[field_name],
        where_clause=dataset_where_sql,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for (old_value, ) in cursor:
            if same_value(old_value, value):
                states["unchanged"] += 1
            else:
                try:
                    cursor.updateRow([value])
                    states["altered"] += 1
                except RuntimeError as error:
                    raise RuntimeError(
                        f"Update cursor failed: Offending value: `{value}`"
                    ) from error

    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Exemplo n.º 6
0
def densify(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    distance: Union[float, int],
    only_curve_features: bool = False,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Add vertices at a given distance along feature geometry segments.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        distance: Interval at which to add vertices, in units of the dataset.
        only_curve_features: Only densify curve features if True.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each densify-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Densify feature geometry in `%s`.",
            dataset_path)
    _dataset = Dataset(dataset_path)
    # Densify method on geometry object assumes meters if distance not string-with-unit.
    if _dataset.spatial_reference.linear_unit != "Meter":
        distance_unit = getattr(UNIT,
                                _dataset.spatial_reference.linear_unit.lower())
        distance_with_unit = (distance * distance_unit).to(
            UNIT.meter) / UNIT.meter
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert Path to str.
        in_table=str(dataset_path),
        field_names=["SHAPE@"],
        where_clause=dataset_where_sql,
    )
    session = Editing(_dataset.workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for (old_geometry, ) in cursor:
            if old_geometry:
                if only_curve_features and not old_geometry.hasCurves:
                    continue

                new_geometry = old_geometry.densify(
                    method="GEODESIC", distance=distance_with_unit)
                cursor.updateRow((new_geometry, ))
                states["densified"] += 1
            else:
                states["unchanged"] += 1
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Densify.")
    return states
Exemplo n.º 7
0
def eliminate_interior_rings(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Eliminate interior rings of polygon features.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each ring-eliminate-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Eliminate interior rings in `%s`.",
            dataset_path)
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert Path to str.
        in_table=str(dataset_path),
        field_names=["SHAPE@"],
        where_clause=dataset_where_sql,
    )
    _dataset = Dataset(dataset_path)
    session = Editing(_dataset.workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for (old_geometry, ) in cursor:
            if not any(None in part for part in old_geometry):
                states["unchanged"] += 1
                continue

            parts = arcpy.Array()
            for old_part in old_geometry:
                if None not in old_part:
                    parts.append(old_part)
                else:
                    new_part = arcpy.Array()
                    for point in old_part:
                        if not point:
                            break

                        new_part.append(point)
                    parts.append(new_part)
            new_geometry = arcpy.Polygon(parts,
                                         _dataset.spatial_reference.object)
            cursor.updateRow([new_geometry])
            states["rings eliminated"] += 1
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Eliminate.")
    return states
Exemplo n.º 8
0
def project(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    spatial_reference_item: SpatialReferenceSourceItem = 4326,
    log_level: int = logging.INFO,
) -> Counter:
    """Project dataset features to a new dataset.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        spatial_reference_item: Item from which the spatial reference of the output
            geometry will be derived. Default is 4326 (EPSG code for unprojected WGS84).
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    spatial_reference = SpatialReference(spatial_reference_item)
    LOG.log(
        log_level,
        "Start: Project `%s` to %s in output `%s`.",
        dataset_path,
        spatial_reference.name,
        output_path,
    )
    _dataset = Dataset(dataset_path)
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    # Project tool ignores view selections, so we create empty output & insert features.
    dataset.create(
        dataset_path=output_path,
        field_metadata_list=_dataset.user_fields,
        geometry_type=_dataset.geometry_type,
        spatial_reference_item=spatial_reference,
        log_level=logging.DEBUG,
    )
    features.insert_from_path(
        output_path,
        field_names=_dataset.user_field_names,
        source_path=dataset_path,
        source_where_sql=dataset_where_sql,
        log_level=logging.DEBUG,
    )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Project.")
    return states
Exemplo n.º 9
0
def table_to_points(
    dataset_path: Union[Path, str],
    *,
    x_field_name: str,
    y_field_name: str,
    z_field_name: Optional[str] = None,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    spatial_reference_item: SpatialReferenceSourceItem = 4326,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert coordinate table to a new point dataset.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        x_field_name: Name of field with x-coordinate.
        y_field_name: Name of field with y-coordinate.
        z_field_name: Name of field with z-coordinate.
        spatial_reference_item: Item from which the spatial reference of the output
            geometry will be derived. Default is 4326 (EPSG code for unprojected WGS84).
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert table rows `%s` to points in output `%s`.",
        dataset_path,
        output_path,
    )
    layer_name = unique_name()
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    arcpy.management.MakeXYEventLayer(
        table=view.name,
        out_layer=layer_name,
        in_x_field=x_field_name,
        in_y_field=y_field_name,
        in_z_field=z_field_name,
        spatial_reference=SpatialReference(spatial_reference_item).object,
    )
    dataset.copy(layer_name, output_path=output_path, log_level=logging.DEBUG)
    arcpy.management.Delete(layer_name)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Exemplo n.º 10
0
def update_from_dicts(
    dataset_path: Union[Path, str],
    field_names: Iterable[str],
    *,
    id_field_names: Iterable[str],
    source_features: Iterable[dict],
    delete_missing_features: bool = True,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update features in dataset from dictionaries.

    Note:
        There is no guarantee that the ID field(s) are unique.
        Use ArcPy cursor token names for object IDs and geometry objects/properties.

    Args:
        dataset_path: Path to dataset.
        field_names: Names of fields for update. Names must be present keys in
            `source_features` elements.
        id_field_names: Names of the feature ID fields. Names must be present keys in
            `source_features` elements.
        source_features: Features from which to source updates.
        delete_missing_features: True if update should delete features missing
            from `source_features`.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Update features into `%s` from dictionaries.",
            dataset_path)
    field_names = list(field_names)
    id_field_names = list(id_field_names)
    if inspect.isgeneratorfunction(source_features):
        source_features = source_features()
    states = update_from_iters(
        dataset_path,
        field_names=id_field_names + field_names,
        id_field_names=id_field_names,
        source_features=((feature[field_name]
                          for field_name in id_field_names + field_names)
                         for feature in source_features),
        delete_missing_features=delete_missing_features,
        use_edit_session=use_edit_session,
        log_level=logging.DEBUG,
    )
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Exemplo n.º 11
0
def keep_by_location(
    dataset_path: Union[Path, str],
    *,
    location_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    location_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Keep features where geometry overlaps location-dataset geometry.

    Args:
        dataset_path: Path to dataset.
        location_path: Path to location-dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        location_where_sql: SQL where-clause for location-dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each keep-state.
    """
    dataset_path = Path(dataset_path)
    location_path = Path(location_path)
    LOG.log(
        log_level,
        "Start: Keep features in `%s` where location overlaps `%s`.",
        dataset_path,
        location_path,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    location_view = DatasetView(location_path,
                                dataset_where_sql=location_where_sql)
    with session, view, location_view:
        arcpy.management.SelectLayerByLocation(
            in_layer=view.name,
            overlap_type="INTERSECT",
            select_features=location_view.name,
            selection_type="NEW_SELECTION",
        )
        arcpy.management.SelectLayerByLocation(
            in_layer=view.name, selection_type="SWITCH_SELECTION")
        states["deleted"] = delete(view.name,
                                   log_level=logging.DEBUG)["deleted"]
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Keep.")
    return states
Exemplo n.º 12
0
def erase_features(
    dataset_path: Union[Path, str],
    *,
    erase_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    erase_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    log_level: int = logging.INFO,
) -> Counter:
    """Erase feature geometry where it overlaps erase-dataset geometry.

    Args:
        dataset_path: Path to dataset.
        erase_path: Path to erase-dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        erase_where_sql: SQL where-clause for erase-dataset subselection.
        output_path: Path to output dataset.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    erase_path = Path(erase_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Erase features in `%s` where overlapping features in `%s`.",
        dataset_path,
        erase_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    erase_view = DatasetView(erase_path,
                             field_names=[],
                             dataset_where_sql=erase_where_sql)
    with view, erase_view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.Erase(
            in_features=view.name,
            erase_features=erase_view.name,
            out_feature_class=str(output_path),
        )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Erase.")
    return states
Exemplo n.º 13
0
def update_by_domain_code(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    code_field_name: str,
    domain_name: str,
    domain_workspace_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update attribute values using a coded-values domain.

    Args:
        dataset_path: Path to dataset
        field_name: Name of field.
        code_field_name: Name of field with related domain code.
        domain_name: Name of domain.
        domain_workspace_path: Path of the workspace the domain is in.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Attribute counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by domain code in `%s` using domain `%s`.",
        dataset_path,
        field_name,
        code_field_name,
        domain_name,
    )
    states = update_by_mapping(
        dataset_path,
        field_name,
        mapping=Domain(domain_workspace_path, domain_name).code_description,
        key_field_names=[code_field_name],
        dataset_where_sql=dataset_where_sql,
        use_edit_session=use_edit_session,
        log_level=logging.DEBUG,
    )
    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Exemplo n.º 14
0
def clip(
    dataset_path: Union[Path, str],
    *,
    clip_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    clip_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    log_level: int = logging.INFO,
) -> Counter:
    """Clip feature geometry where it overlaps clip-dataset geometry.

    Args:
        dataset_path: Path to dataset.
        clip_path: Path to clip-dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        clip_where_sql: SQL where-clause for clip-dataset subselection.
        output_path: Path to output dataset.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    clip_path = Path(clip_path)
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Clip features in `%s` where overlapping `%s`.",
        dataset_path,
        clip_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    clip_view = DatasetView(clip_path, dataset_where_sql=clip_where_sql)
    with view, clip_view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.Clip(
            in_features=view.name,
            clip_features=clip_view.name,
            out_feature_class=str(output_path),
        )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Clip.")
    return states
Exemplo n.º 15
0
def lines_to_vertex_points(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    endpoints_only: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert geometry from lines to points at every vertex.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        endpoints_only: Output points should include line endpoints only if True.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert lines in `%s` to vertex points in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        arcpy.management.FeatureVerticesToPoints(
            in_features=view.name,
            # ArcPy2.8.0: Convert Path to str.
            out_feature_class=str(output_path),
            point_location="ALL" if not endpoints_only else "BOTH_ENDS",
        )
    dataset.delete_field(output_path,
                         field_name="ORIG_FID",
                         log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Exemplo n.º 16
0
def planarize(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Planarize feature geometry into lines.

    Note:
        This method does not make topological linework. However it does carry all
        attributes with it, rather than just an ID attribute.

        Since this method breaks the new line geometry at intersections, it can be
        useful to break line geometry features that cross.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Planarize geometry in `%s` to lines in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.FeatureToLine(in_features=view.name,
                                       out_feature_class=str(output_path),
                                       attributes=True)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Planarize.")
    return states
Exemplo n.º 17
0
    def load(
        self,
        dataset_path: Union[Path, str],
        *,
        preserve_features: bool = False,
        use_edit_session: bool = False,
    ) -> TProcedure:
        """Load features from transform-dataset to load-dataset.

        Args:
            dataset_path: Path to dataset.
            preserve_features: Keep current features in load-dataset if True;
                remove them before adding transform-features if False.
            use_edit_session: True if edits are to be made in an edit session.

        Returns:
            Reference to instance.
        """
        dataset_path = Path(dataset_path)
        LOG.info("Start: Load `%s`.", dataset_path)
        states = Counter()
        # Load to an existing dataset.
        if dataset.is_valid(dataset_path):
            if not preserve_features:
                states["deleted"] = features.delete(
                    dataset_path,
                    use_edit_session=use_edit_session,
                    log_level=logging.DEBUG,
                )["deleted"]
            states["inserted"] = features.insert_from_path(
                dataset_path,
                source_path=self.transform_path,
                use_edit_session=use_edit_session,
                log_level=logging.DEBUG,
            )["inserted"]
        # Load to a new dataset.
        else:
            states["copied"] = dataset.copy(
                self.transform_path,
                output_path=dataset_path,
                log_level=logging.DEBUG).feature_count
        log_entity_states("features", states, logger=LOG)
        LOG.info("End: Load.")
        return self
Exemplo n.º 18
0
def points_to_thiessen_polygons(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert geometry from points to Thiessen polygons.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert points in `%s` to Thiessen polygons in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.CreateThiessenPolygons(
            in_features=view.name,
            out_feature_class=str(output_path),
            fields_to_copy="ALL",
        )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Exemplo n.º 19
0
def split_lines_at_vertices(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Split lines into smaller lines between vertices.

    The original datasets can be lines or polygons. Polygons will be split along their
    rings.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Split line geometry in `%s` into lines between vertices in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.SplitLine(in_features=view.name,
                                   out_feature_class=str(output_path))
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Split.")
    return states
Exemplo n.º 20
0
    def update(
        self,
        dataset_path: Union[Path, str],
        *,
        field_names: Optional[Iterable[str]] = None,
        id_field_names: Iterable[str],
        delete_missing_features: bool = True,
        use_edit_session: bool = False,
    ) -> TProcedure:
        """Update features from transform- to load-dataset.

        Args:
            dataset_path: Path to dataset.
            field_names: Names of fields for update. Fields must exist in both datasets.
                If set to None, all user fields present in both datasets will be
                updated, along with the geometry field (if present).
            id_field_names: Names of the feature ID fields. Fields must exist in both
                datasets.
            delete_missing_features: True if update should delete features missing from
                source dataset.
            use_edit_session: True if edits are to be made in an edit session.

        Returns:
            Reference to instance.
        """
        dataset_path = Path(dataset_path)
        LOG.info("Start: Update `%s`.", dataset_path)
        states = features.update_from_path(
            dataset_path,
            field_names=field_names,
            id_field_names=id_field_names,
            source_path=self.transform_path,
            delete_missing_features=delete_missing_features,
            use_edit_session=use_edit_session,
            log_level=logging.DEBUG,
        )
        log_entity_states("features", states, logger=LOG)
        LOG.info("End: Update.")
        return self
Exemplo n.º 21
0
def insert_from_iters(
    dataset_path: Union[Path, str],
    field_names: Iterable[str],
    *,
    source_features: Iterable[Sequence[Any]],
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Insert features into dataset from sequences.

    Args:
        dataset_path: Path to dataset.
        field_names: Names of fields for insert. Names must be in the same order as
            their corresponding attributes in `source_features` elements.
        source_features: Features to insert.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each insert-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Insert features into `%s` from sequences.",
            dataset_path)
    field_names = list(field_names)
    if inspect.isgeneratorfunction(source_features):
        source_features = source_features()
    # ArcPy2.8.0: Convert Path to str.
    cursor = arcpy.da.InsertCursor(in_table=str(dataset_path),
                                   field_names=field_names)
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for row in source_features:
            cursor.insertRow(tuple(row))
            states["inserted"] += 1
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Insert.")
    return states
Exemplo n.º 22
0
def buffer(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    distance: Union[float, int],
    log_level: int = logging.INFO,
) -> Counter:
    """Buffer features a given distance & (optionally) dissolve on given fields.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        output_path: Path to output dataset.
        distance: Distance to buffer from feature, in the units of the dataset.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(log_level, "Start: Buffer features in `%s`.", dataset_path)
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.Buffer(
            in_features=view.name,
            out_feature_class=str(output_path),
            buffer_distance_or_field=distance,
        )
    for field_name in ["BUFF_DIST", "ORIG_FID"]:
        dataset.delete_field(output_path, field_name=field_name)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Buffer.")
    return states
Exemplo n.º 23
0
def insert_from_dicts(
    dataset_path: Union[Path, str],
    field_names: Iterable[str],
    *,
    source_features: Iterable[dict],
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Insert features into dataset from dictionaries.

    Args:
        dataset_path: Path to dataset.
        field_names: Names of fields for insert. Names must be present keys in
            `source_features` elements.
        source_features: Features to insert.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each insert-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Insert features into `%s` from dictionaries.",
            dataset_path)
    field_names = list(field_names)
    if inspect.isgeneratorfunction(source_features):
        source_features = source_features()
    states = insert_from_iters(
        dataset_path,
        field_names,
        source_features=((feature[field_name] for field_name in field_names)
                         for feature in source_features),
        use_edit_session=use_edit_session,
        log_level=logging.DEBUG,
    )
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Insert.")
    return states
Exemplo n.º 24
0
def union(
    dataset_path: Union[Path, str],
    *,
    field_name: str,
    union_path: Union[Path, str],
    union_field_name: str,
    dataset_where_sql: Optional[str] = None,
    union_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    replacement_value: Optional[Any] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Assign union attributes.

    Notes:
        Features with multiple union-features will be split.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field to place union values.
        union_path: Path to union-dataset.
        union_field_name: Name of union-field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        union_where_sql: SQL where-clause for the union-dataset subselection.
        replacement_value: Value to replace a present union-field value with. If set to
            None, no replacement will occur.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    union_path = Path(union_path)
    LOG.log(
        log_level,
        "Start: Union-set attributes in `%s.%s` by features/values in `%s.%s`.",
        dataset_path,
        field_name,
        union_path,
        union_field_name,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    # Do not include any field names - we do not want them added to output.
    union_view = DatasetView(union_path,
                             field_names=[],
                             dataset_where_sql=union_where_sql)
    with view, union_view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.Union(
            in_features=[view.name, union_view.name],
            out_feature_class=str(output_path),
            join_attributes="ALL",
        )
    fid_field_names = [
        name for name in Dataset(output_path).field_names
        if name.startswith("FID_")
    ]
    if replacement_value is not None:
        attributes.update_by_value(
            output_path,
            field_name,
            value=replacement_value,
            dataset_where_sql=f"{fid_field_names[-1]} <> -1",
            log_level=logging.DEBUG,
        )
    else:
        attributes.update_by_joined_value(
            output_path,
            field_name,
            key_field_names=[fid_field_names[-1]],
            join_dataset_path=union_path,
            join_field_name=union_field_name,
            join_key_field_names=["OID@"],
            dataset_where_sql=f"{fid_field_names[-1]} <> -1",
            join_dataset_where_sql=union_where_sql,
            log_level=logging.DEBUG,
        )
    attributes.update_by_value(
        output_path,
        field_name,
        value=None,
        dataset_where_sql=f"{fid_field_names[-1]} = -1",
        log_level=logging.DEBUG,
    )
    for name in fid_field_names:
        dataset.delete_field(output_path,
                             field_name=name,
                             log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Union.")
    return states
Exemplo n.º 25
0
def spatial_join_by_center(
    dataset_path: Union[Path, str],
    *,
    field_name: str,
    join_path: Union[Path, str],
    join_field_name: str,
    dataset_where_sql: Optional[str] = None,
    join_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    replacement_value: Optional[Any] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Spatially-join attributes by their center.

    Notes:
        Features joined with multiple join-features will be duplicated.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field to place joined values.
        join_path: Path to join-dataset.
        join_field_name: Name of join-field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        join_where_sql: SQL where-clause for the join-dataset subselection.
        output_path: Path to output dataset.
        replacement_value: Value to replace a present join-field value with. If set to
            None, no replacement will occur.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    join_path = Path(join_path)
    LOG.log(
        log_level,
        "Start: Spatially-join attributes in `%s.%s` by features/values in `%s.%s`.",
        dataset_path,
        field_name,
        join_path,
        join_field_name,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    # Do not include any field names - we do not want them added to output.
    join_view = DatasetView(join_path,
                            field_names=[],
                            dataset_where_sql=join_where_sql)
    with view, join_view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.SpatialJoin(
            target_features=view.name,
            join_features=join_view.name,
            out_feature_class=str(output_path),
            join_operation="JOIN_ONE_TO_MANY",
            join_type="KEEP_ALL",
            match_option="HAVE_THEIR_CENTER_IN",
        )
    if replacement_value is not None:
        attributes.update_by_value(
            output_path,
            field_name,
            value=replacement_value,
            dataset_where_sql="JOIN_FID <> -1",
            log_level=logging.DEBUG,
        )
    else:
        attributes.update_by_joined_value(
            output_path,
            field_name,
            key_field_names=["JOIN_FID"],
            join_dataset_path=join_path,
            join_field_name=join_field_name,
            join_key_field_names=["OID@"],
            dataset_where_sql="JOIN_FID <> -1",
            join_dataset_where_sql=join_where_sql,
            log_level=logging.DEBUG,
        )
    attributes.update_by_value(
        output_path,
        field_name,
        value=None,
        dataset_where_sql="JOIN_FID = -1",
        log_level=logging.DEBUG,
    )
    for name in ["Join_Count", "TARGET_FID", "JOIN_FID"]:
        dataset.delete_field(output_path,
                             field_name=name,
                             log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Join.")
    return states
Exemplo n.º 26
0
def update_rows(
    dataset_path: Union[Path, str],
    *,
    field_name: str,
    id_field_names: Iterable[str],
    cmp_dataset_path: Union[Path, str],
    cmp_field_name: Optional[str] = None,
    cmp_id_field_names: Optional[Iterable[str]] = None,
    cmp_date: Optional[Union[date, _datetime]] = None,
    date_initiated_field_name: str = "date_initiated",
    date_expired_field_name: str = "date_expired",
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Add field value changes to tracking dataset from comparison dataset.

    Args:
        dataset_path: Path to tracking dataset.
        field_name: Name of field with tracked attribute.
        id_field_names: Names of the feature ID fields.
        cmp_dataset_path: Path to comparison dataset.
        cmp_field_name: Name of field with tracked attribute in comparison dataset. If
            set to None, will assume same as field_name.
        cmp_id_field_names: Names of the feature ID fields in comparison dataset. If set
            to None, will assume same as field_name.
        cmp_date: Date to mark comparison change. If set to None, will set to the date
            of execution.
        date_initiated_field_name: Name of tracking-row-inititated date field.
        date_expired_field_name: Name of tracking-row-expired date field.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    cmp_dataset_path = Path(cmp_dataset_path)
    LOG.log(
        log_level,
        "Start: Update tracking rows in `%s` from `%s`.",
        dataset_path,
        cmp_dataset_path,
    )
    id_field_names = list(id_field_names)
    if cmp_field_name is None:
        cmp_field_name = field_name
    cmp_id_field_names = (id_field_names if cmp_id_field_names is None else
                          list(cmp_id_field_names))
    if cmp_date is None:
        cmp_date = date.today()
    current_where_sql = f"{date_expired_field_name} IS NULL"
    id_current_value = {
        row[:-1]: row[-1]
        for row in features.as_tuples(
            dataset_path,
            field_names=id_field_names + [field_name],
            dataset_where_sql=current_where_sql,
        )
    }
    id_cmp_value = {
        row[:-1]: row[-1]
        for row in features.as_tuples(cmp_dataset_path,
                                      field_names=cmp_id_field_names +
                                      [cmp_field_name])
    }
    changed_ids = set()
    expired_ids = {_id for _id in id_current_value if _id not in id_cmp_value}
    new_rows = []
    for _id, value in id_cmp_value.items():
        if _id not in id_current_value:
            new_rows.append(_id + (value, cmp_date))
        elif not same_value(value, id_current_value[_id]):
            changed_ids.add(_id)
            new_rows.append(_id + (value, cmp_date))
    # ArcPy2.8.0: Convert Path to str.
    cursor = arcpy.da.UpdateCursor(
        in_table=str(dataset_path),
        field_names=id_field_names + [field_name, date_expired_field_name],
        where_clause=current_where_sql,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for row in cursor:
            _id = tuple(row[:len(id_field_names)])
            if _id in changed_ids or _id in expired_ids:
                cursor.updateRow(_id + (row[-2], cmp_date))
            else:
                states["unchanged"] += 1
    features.insert_from_iters(
        dataset_path,
        field_names=id_field_names + [field_name, date_initiated_field_name],
        source_features=new_rows,
        use_edit_session=use_edit_session,
        log_level=logging.DEBUG,
    )
    states["changed"] = len(changed_ids)
    states["expired"] = len(expired_ids)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Exemplo n.º 27
0
def consolidate_rows(
    dataset_path: Union[Path, str],
    *,
    field_name: str,
    id_field_names: Iterable[str],
    date_initiated_field_name: str = "date_initiated",
    date_expired_field_name: str = "date_expired",
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Consolidate tracking dataset rows where the value does not actually change.

    Useful for quick-loaded point-in-time values, or for processing hand-altered rows.

    Args:
        dataset_path: Path to tracking dataset.
        field_name: Name of field with tracked attribute.
        id_field_names: Names of the feature ID fields.
        date_initiated_field_name: Name of tracking-row-inititated date field.
        date_expired_field_name: Name of tracking-row-expired date field.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Consolidate tracking rows in `%s`.",
            dataset_path)
    id_field_names = list(id_field_names)
    field_names = id_field_names + [
        date_initiated_field_name,
        date_expired_field_name,
        field_name,
    ]
    id_rows = defaultdict(list)
    for row in features.as_dicts(dataset_path, field_names=field_names):
        _id = tuple(row[name] for name in id_field_names)
        id_rows[_id].append(row)
    for _id in list(id_rows):
        rows = sorted(id_rows[_id], key=itemgetter(date_initiated_field_name))
        for i, row in enumerate(rows):
            if i == 0 or row[date_initiated_field_name] is None:
                continue

            date_initiated = row[date_initiated_field_name]
            value = row[field_name]
            previous_row = rows[i - 1]
            previous_value = previous_row[field_name]
            previous_date_expired = previous_row[date_expired_field_name]
            if same_value(value, previous_value) and same_value(
                    date_initiated, previous_date_expired):
                # Move previous row date initiated to current row & clear from previous.
                row[date_initiated_field_name] = previous_row[
                    date_initiated_field_name]
                previous_row[date_initiated_field_name] = None
        id_rows[_id] = [
            row for row in rows if row[date_initiated_field_name] is not None
        ]
    states = features.update_from_dicts(
        dataset_path,
        field_names=field_names,
        # In tracking dataset, ID is ID + date_initiated.
        id_field_names=id_field_names + [date_initiated_field_name],
        source_features=chain(*id_rows.values()),
        use_edit_session=use_edit_session,
        log_level=logging.DEBUG,
    )
    log_entity_states("tracking rows", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Consolidate.")
    return states
Exemplo n.º 28
0
def update_node_ids(
    dataset_path: Union[Path, str],
    *,
    from_id_field_name: str,
    to_id_field_name: str,
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update node ID values.

    Args:
        dataset_path: Path to the dataset.
        from_id_field_name: Name of from-node ID field.
        to_id_field_name: Name of to-node ID field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(
        log_level,
        "Start: Update node IDs in `%s` (from) & `%s` (to) for `%s`.",
        from_id_field_name,
        to_id_field_name,
        dataset_path,
    )
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(dataset_path),
        field_names=["OID@", from_id_field_name, to_id_field_name],
        where_clause=dataset_where_sql,
    )
    oid_node = id_node_map(
        dataset_path,
        from_id_field_name=from_id_field_name,
        to_id_field_name=to_id_field_name,
        update_nodes=True,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for old_feature in cursor:
            oid = old_feature[0]
            new_feature = (oid, oid_node[oid]["from"], oid_node[oid]["to"])
            if same_feature(old_feature, new_feature):
                states["unchanged"] += 1
            else:
                try:
                    cursor.updateRow(new_feature)
                    states["altered"] += 1
                except RuntimeError as error:
                    raise RuntimeError(
                        f"Row failed to update. Offending row: `{new_feature}`"
                    ) from error

    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Exemplo n.º 29
0
def dissolve_features(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    dissolve_field_names: Optional[Iterable[str]] = None,
    all_fields_in_output: bool = False,
    allow_multipart: bool = True,
    unsplit_lines: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Dissolve feature geometry that share value in given fields.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        output_path: Path to output dataset.
        dissolve_field_names: Names of fields to base dissolve on.
        all_fields_in_output: All fields in the dataset will persist in the output
            dataset if True. Otherwise, only the dissolve fields will persist. Non-
            dissolve fields will have default values, of course.
        allow_multipart: Allow multipart features in output if True.
        unsplit_lines: Merge line features when endpoints meet without crossing features
            if True.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    if dissolve_field_names is not None:
        dissolve_field_names = list(dissolve_field_names)
    LOG.log(
        log_level,
        "Start: Dissolve features in `%s` on fields `%s`.",
        dataset_path,
        dissolve_field_names,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(
        dataset_path,
        field_names=dissolve_field_names,
        dataset_where_sql=dataset_where_sql,
    )
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.Dissolve(
            in_features=view.name,
            out_feature_class=str(output_path),
            dissolve_field=dissolve_field_names,
            multi_part=allow_multipart,
            unsplit_lines=unsplit_lines,
        )
    if all_fields_in_output:
        for _field in Dataset(dataset_path).user_fields:
            # Cannot add a non-nullable field to existing features.
            _field.is_nullable = True
            dataset.add_field(
                output_path,
                exist_ok=True,
                log_level=logging.DEBUG,
                **_field.field_as_dict,
            )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Dissolve.")
    return states
Exemplo n.º 30
0
def polygons_to_lines(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    id_field_name: Optional[str] = None,
    make_topological: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert geometry from polygons to lines.

    Note:
        If `make_topological` is set to True, shared outlines will be a single, separate
        feature. Note that one cannot pass attributes to a topological transformation
        (as the values would not apply to all adjacent features).

        If an id field name is specified, the output dataset will identify the input
        features that defined the line feature with the name & values from the provided
        field. This option will be ignored if the output is non-topological lines, as
        the field will pass over with the rest of the attributes.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        id_field_name: Name of ID field to apply on topological lines.
        make_topological: Make line output topological, or merged where lines overlap.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert polgyons in `%s` to lines in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.PolygonToLine(
            in_features=view.name,
            out_feature_class=str(output_path),
            neighbor_option=("IDENTIFY_NEIGHBORS"
                             if make_topological else "IGNORE_NEIGHBORS"),
        )
    if make_topological:
        _dataset = Dataset(dataset_path)
        for side in ["left", "right"]:
            oid_key = f"{side.upper()}_FID"
            if id_field_name:
                id_field = next(
                    _field for _field in _dataset.fields
                    if _field.name.lower() == id_field_name.lower())
                id_field.name = f"{side.upper()}_{id_field_name}"
                # Cannot create an OID-type field, so force to long.
                if id_field.type.upper() == "OID":
                    id_field.type = "LONG"
                dataset.add_field(output_path,
                                  log_level=logging.DEBUG,
                                  **id_field.field_as_dict)
                attributes.update_by_joined_value(
                    output_path,
                    field_name=id_field.name,
                    key_field_names=[oid_key],
                    join_dataset_path=dataset_path,
                    join_field_name=id_field_name,
                    join_key_field_names=[_dataset.oid_field_name],
                    log_level=logging.DEBUG,
                )
            dataset.delete_field(output_path,
                                 field_name=oid_key,
                                 log_level=logging.DEBUG)
    else:
        dataset.delete_field(output_path,
                             field_name="ORIG_FID",
                             log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states