Пример #1
0
def copy(
    dataset_path: Union[Path, str],
    *,
    field_names: Optional[Iterable[str]] = None,
    dataset_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    overwrite: bool = False,
    schema_only: bool = False,
    log_level: int = logging.INFO,
) -> Dataset:
    """Copy features into a new dataset.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        field_names: Collection of field names to include in output. If set to None, all
            fields will be included.
        dataset_where_sql: SQL where-clause property for dataset subselection.
        overwrite: Overwrite existing dataset at output path if True.
        schema_only: Copy only the schema--omitting data--if True.
        log_level: Level to log the function at.

    Returns:
        Dataset metadata instance for output dataset.

    Raises:
        ValueError: If dataset type not supported.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    if field_names is not None:
        field_names = list(field_names)
    else:
        field_names = Dataset(dataset_path).user_field_names
    LOG.log(log_level, "Start: Copy dataset `%s` to `%s`.", dataset_path,
            output_path)
    _dataset = Dataset(dataset_path)
    view = DatasetView(
        dataset_path,
        field_names=field_names,
        dataset_where_sql=dataset_where_sql if not schema_only else "0 = 1",
    )
    with view:
        if overwrite and arcpy.Exists(output_path):
            delete(output_path, log_level=logging.DEBUG)
        if _dataset.is_spatial:
            # ArcPy2.8.0: Convert to str.
            arcpy.management.CopyFeatures(in_features=view.name,
                                          out_feature_class=str(output_path))
        elif _dataset.is_table:
            # ArcPy2.8.0: Convert to str.
            arcpy.management.CopyRows(in_rows=view.name,
                                      out_table=str(output_path))
        else:
            raise ValueError(f"`{dataset_path}` unsupported dataset type.")

    LOG.log(log_level, "End: Copy.")
    return Dataset(output_path)
Пример #2
0
    def __init__(
        self,
        dataset_path: Union[Path, str],
        *,
        field_names: Optional[Iterable[str]] = None,
        dataset_where_sql: Optional[str] = None,
        copy_path: Optional[Union[Path, str]] = None,
        force_nonspatial: bool = False,
    ) -> None:
        """Initialize instance.

        Note:
            To make a temp dataset without copying any template rows:
            `dataset_where_sql="0 = 1"`

        Args:
            dataset_path: Path to original dataset.
            copy_path: Path to copy dataset. If set to None, path will be auto-
                generated.
            field_names: Collection of field names to include in copy. If set to None,
                all fields will be included.
            dataset_where_sql: SQL where-clause property for original dataset
                subselection.
            force_nonspatial: Forces view to be nonspatial if True.
        """
        self.copy_path = Path(copy_path) if copy_path else unique_path(
            "TempCopy")
        self.dataset = Dataset(path=dataset_path)
        self.dataset_path = Path(dataset_path)
        self.dataset_where_sql = dataset_where_sql
        self.field_names = (self.dataset.field_names
                            if field_names is None else list(field_names))
        self.is_spatial = self.dataset.is_spatial and not force_nonspatial
Пример #3
0
    def __init__(
        self,
        dataset_path: Union[Path, str],
        *,
        field_names: Optional[Iterable[str]] = None,
        dataset_where_sql: Optional[str] = None,
        force_nonspatial: bool = False,
        name: Optional[str] = None,
    ) -> None:
        """Initialize instance.

        Args:
            dataset_path: Path to dataset.
            name: Name of view. If set to None, name will be auto-generated.
            field_names: Collection of field names to include in view. If set to None,
                all fields will be included.
            dataset_where_sql: SQL where-clause for dataset subselection.
            force_nonspatial: Forces view to be nonspatial if True.
        """
        self.dataset = Dataset(path=dataset_path)
        self.dataset_path = Path(dataset_path)
        self._dataset_where_sql = dataset_where_sql
        self.field_names = (self.dataset.field_names
                            if field_names is None else list(field_names))
        self.is_spatial = self.dataset.is_spatial and not force_nonspatial
        self.name = name if name else unique_name("View")
Пример #4
0
def delete(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Delete features in dataset.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each delete-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Delete features from `%s`.", dataset_path)
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view, session:
        states["deleted"] = view.count
        arcpy.management.DeleteRows(in_rows=view.name)
        states["remaining"] = dataset.feature_count(dataset_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Delete.")
    return states
Пример #5
0
def points_to_multipoints(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert geometry from points to multipoints.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert points in `%s` to multipoints in output `%s`.",
        dataset_path,
        output_path,
    )
    _dataset = Dataset(dataset_path)
    # ArcPy2.8.0: Convert Path to str (2x).
    arcpy.management.CreateFeatureclass(
        out_path=str(output_path.parent),
        out_name=output_path.name,
        geometry_type="MULTIPOINT",
        template=str(dataset_path),
        spatial_reference=_dataset.spatial_reference.object,
    )
    field_names = _dataset.user_field_names + ["SHAPE@"]
    # ArcPy2.8.0: Convert Path to str.
    multipoint_cursor = arcpy.da.InsertCursor(in_table=str(output_path),
                                              field_names=field_names)
    # ArcPy2.8.0: Convert Path to str.
    point_cursor = arcpy.da.SearchCursor(
        in_table=str(dataset_path),
        field_names=field_names,
        where_clause=dataset_where_sql,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    with multipoint_cursor, point_cursor:
        for point_feature in point_cursor:
            multipoint_geometry = arcpy.Multipoint(
                point_feature[-1].firstPoint)
            multipoint_feature = point_feature[:-1] + (multipoint_geometry, )
            multipoint_cursor.insertRow(multipoint_feature)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Пример #6
0
def delete_by_id(
    dataset_path: Union[Path, str],
    delete_ids: Iterable[Union[Sequence[Any], Any]],
    id_field_names: Iterable[str],
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Delete features in dataset with given IDs.

    Note:
        There is no guarantee that the ID field(s) are unique.
        Use ArcPy cursor token names for object IDs and geometry objects/properties.

    Args:
        dataset_path: Path to dataset.
        delete_ids: ID sequences for features to delete. If id_field_names contains only
            one field, IDs may be provided as non-sequence single-value.
        id_field_names: Names of the feature ID fields.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each delete-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Delete features in `%s` with given IDs.",
            dataset_path)
    id_field_names = list(id_field_names)
    if inspect.isgeneratorfunction(delete_ids):
        delete_ids = delete_ids()
    ids = set()
    for _id in delete_ids:
        if isinstance(_id, Iterable) and not isinstance(_id, str):
            ids.add(tuple(_id))
        else:
            ids.add((_id, ))
    states = Counter()
    if ids:
        # ArcPy2.8.0: Convert Path to str.
        cursor = arcpy.da.UpdateCursor(str(dataset_path),
                                       field_names=id_field_names)
        session = Editing(
            Dataset(dataset_path).workspace_path, use_edit_session)
        with session, cursor:
            for row in cursor:
                _id = tuple(row)
                if _id in ids:
                    cursor.deleteRow()
                    states["deleted"] += 1
    else:
        LOG.log(log_level, "No IDs provided.")
        states["deleted"] = 0
    states["unchanged"] = dataset.feature_count(dataset_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Delete.")
    return states
Пример #7
0
def update_by_value(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    value: Any,
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update attribute values by assigning a given value.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field.
        value: Value to assign.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Attribute counts for each update-state.

    Raises:
        RuntimeError: If attribute cannot be updated.
    """
    dataset_path = Path(dataset_path)
    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by given value.",
        dataset_path,
        field_name,
    )
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(dataset_path),
        field_names=[field_name],
        where_clause=dataset_where_sql,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for (old_value, ) in cursor:
            if same_value(old_value, value):
                states["unchanged"] += 1
            else:
                try:
                    cursor.updateRow([value])
                    states["altered"] += 1
                except RuntimeError as error:
                    raise RuntimeError(
                        f"Update cursor failed: Offending value: `{value}`"
                    ) from error

    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Пример #8
0
def update_by_expression(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    expression: str,
    expression_type: str = "Python",
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Field:
    """Update attribute values using a (single) code-expression.

    Wraps arcpy.management.CalculateField.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field.
        expression: String expression to evaluate values from.
        expression_type: Type of code expression represents. Allowed values include:
            "Arcade", "Python", "Python3", and "SQL". Case-insensitive.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Field metadata instance for field with updated attributes.

    Raises:
        AttributeError: If given expression type invalid.
    """
    dataset_path = Path(dataset_path)
    if expression_type.upper() not in ["ARCADE", "PYTHON", "PYTHON3", "SQL"]:
        raise AttributeError("Invalid expression_type")

    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by %s expression `%s`.",
        dataset_path,
        field_name,
        expression_type,
        expression,
    )
    if expression_type.upper() == "PYTHON":
        expression_type = "PYTHON3"
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with session, view:
        arcpy.management.CalculateField(
            in_table=view.name,
            field=field_name,
            expression=expression,
            expression_type=expression_type,
        )
    LOG.log(log_level, "End: Update.")
    return Field(dataset_path, field_name)
Пример #9
0
def eliminate_interior_rings(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Eliminate interior rings of polygon features.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each ring-eliminate-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Eliminate interior rings in `%s`.",
            dataset_path)
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert Path to str.
        in_table=str(dataset_path),
        field_names=["SHAPE@"],
        where_clause=dataset_where_sql,
    )
    _dataset = Dataset(dataset_path)
    session = Editing(_dataset.workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for (old_geometry, ) in cursor:
            if not any(None in part for part in old_geometry):
                states["unchanged"] += 1
                continue

            parts = arcpy.Array()
            for old_part in old_geometry:
                if None not in old_part:
                    parts.append(old_part)
                else:
                    new_part = arcpy.Array()
                    for point in old_part:
                        if not point:
                            break

                        new_part.append(point)
                    parts.append(new_part)
            new_geometry = arcpy.Polygon(parts,
                                         _dataset.spatial_reference.object)
            cursor.updateRow([new_geometry])
            states["rings eliminated"] += 1
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Eliminate.")
    return states
Пример #10
0
def densify(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    distance: Union[float, int],
    only_curve_features: bool = False,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Add vertices at a given distance along feature geometry segments.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        distance: Interval at which to add vertices, in units of the dataset.
        only_curve_features: Only densify curve features if True.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each densify-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Densify feature geometry in `%s`.",
            dataset_path)
    _dataset = Dataset(dataset_path)
    # Densify method on geometry object assumes meters if distance not string-with-unit.
    if _dataset.spatial_reference.linear_unit != "Meter":
        distance_unit = getattr(UNIT,
                                _dataset.spatial_reference.linear_unit.lower())
        distance_with_unit = (distance * distance_unit).to(
            UNIT.meter) / UNIT.meter
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert Path to str.
        in_table=str(dataset_path),
        field_names=["SHAPE@"],
        where_clause=dataset_where_sql,
    )
    session = Editing(_dataset.workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for (old_geometry, ) in cursor:
            if old_geometry:
                if only_curve_features and not old_geometry.hasCurves:
                    continue

                new_geometry = old_geometry.densify(
                    method="GEODESIC", distance=distance_with_unit)
                cursor.updateRow((new_geometry, ))
                states["densified"] += 1
            else:
                states["unchanged"] += 1
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Densify.")
    return states
Пример #11
0
def project(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    spatial_reference_item: SpatialReferenceSourceItem = 4326,
    log_level: int = logging.INFO,
) -> Counter:
    """Project dataset features to a new dataset.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        spatial_reference_item: Item from which the spatial reference of the output
            geometry will be derived. Default is 4326 (EPSG code for unprojected WGS84).
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    spatial_reference = SpatialReference(spatial_reference_item)
    LOG.log(
        log_level,
        "Start: Project `%s` to %s in output `%s`.",
        dataset_path,
        spatial_reference.name,
        output_path,
    )
    _dataset = Dataset(dataset_path)
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    # Project tool ignores view selections, so we create empty output & insert features.
    dataset.create(
        dataset_path=output_path,
        field_metadata_list=_dataset.user_fields,
        geometry_type=_dataset.geometry_type,
        spatial_reference_item=spatial_reference,
        log_level=logging.DEBUG,
    )
    features.insert_from_path(
        output_path,
        field_names=_dataset.user_field_names,
        source_path=dataset_path,
        source_where_sql=dataset_where_sql,
        log_level=logging.DEBUG,
    )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Project.")
    return states
Пример #12
0
def remove_all_default_field_values(dataset_path: Union[Path, str],
                                    *,
                                    log_level: int = logging.INFO) -> Dataset:
    """Remove all default field values in dataset.

    Args:
        dataset_path: Path to dataset.
        log_level: Level to log the function at.

    Returns:
        Dataset metadata instance for dataset.
    """
    dataset_path = Path(dataset_path)
    LOG.log(
        log_level,
        "Start: Remove all default field values for dataset `%s`.",
        dataset_path,
    )
    subtype_codes = [
        code
        for code, _property in arcpy.da.ListSubtypes(dataset_path).items()
        if _property["SubtypeField"]
    ]
    for field in Dataset(dataset_path).fields:
        if field.default_value is None:
            continue

        LOG.log(log_level, "Removing default value for field `%s`.",
                field.name)
        set_default_field_value(
            dataset_path,
            field_name=field.name,
            value=None,
            subtype_codes=subtype_codes,
            log_level=logging.DEBUG,
        )
    LOG.log(log_level, "End: Remove.")
    # Make new Dataset instance to update the field information.
    return Dataset(dataset_path)
Пример #13
0
def keep_by_location(
    dataset_path: Union[Path, str],
    *,
    location_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    location_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Keep features where geometry overlaps location-dataset geometry.

    Args:
        dataset_path: Path to dataset.
        location_path: Path to location-dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        location_where_sql: SQL where-clause for location-dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each keep-state.
    """
    dataset_path = Path(dataset_path)
    location_path = Path(location_path)
    LOG.log(
        log_level,
        "Start: Keep features in `%s` where location overlaps `%s`.",
        dataset_path,
        location_path,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    location_view = DatasetView(location_path,
                                dataset_where_sql=location_where_sql)
    with session, view, location_view:
        arcpy.management.SelectLayerByLocation(
            in_layer=view.name,
            overlap_type="INTERSECT",
            select_features=location_view.name,
            selection_type="NEW_SELECTION",
        )
        arcpy.management.SelectLayerByLocation(
            in_layer=view.name, selection_type="SWITCH_SELECTION")
        states["deleted"] = delete(view.name,
                                   log_level=logging.DEBUG)["deleted"]
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Keep.")
    return states
Пример #14
0
def create(
    dataset_path: Union[Path, str],
    *,
    field_metadata_list: Optional[Iterable[Union[Field, dict]]] = None,
    geometry_type: Optional[str] = None,
    spatial_reference_item: SpatialReferenceSourceItem = 4326,
    log_level: int = logging.INFO,
) -> Dataset:
    """Create new dataset.

    Args:
        dataset_path: Path to dataset.
        field_metadata_list: Collection of field metadata instances or mappings.
        geometry_type: Type of geometry, if a spatial dataset. Will create a nonspatial
            dataset if set to None.
        spatial_reference_item: Item from which the spatial reference of the output
            geometry will be derived. Default is 4326 (EPSG code for unprojected WGS84).
        log_level: Level to log the function at.

    Returns:
        Dataset metadata instance for created dataset.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Create dataset `%s`.", dataset_path)
    if geometry_type:
        if spatial_reference_item is None:
            spatial_reference_item = 4326
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.CreateFeatureclass(
            out_path=str(dataset_path.parent),
            out_name=dataset_path.name,
            geometry_type=geometry_type,
            has_z="DISABLED",
            spatial_reference=SpatialReference(spatial_reference_item).object,
        )
    else:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.CreateTable(
            out_path=str(dataset_path.parent),
            out_name=dataset_path.name,
        )
    if field_metadata_list:
        for field_metadata in field_metadata_list:
            if isinstance(field_metadata, Field):
                field_metadata = field_metadata.field_as_dict
            add_field(dataset_path, log_level=logging.DEBUG, **field_metadata)
    LOG.log(log_level, "End: Create.")
    return Dataset(dataset_path)
Пример #15
0
def is_valid(dataset_path: Union[Path, str]) -> bool:
    """Return True if dataset is extant & valid.

    Args:
        dataset_path: Path to dataset.
    """
    dataset_path = Path(dataset_path)
    exists = dataset_path and arcpy.Exists(dataset=dataset_path)
    if exists:
        try:
            valid = Dataset(dataset_path).is_table
        except IOError:
            valid = False
    else:
        valid = False
    return valid
Пример #16
0
def build_network(network_path: Union[Path, str],
                  *,
                  log_level: int = logging.INFO) -> Dataset:
    """Build network dataset.

    Args:
        network_path: Path to network dataset.
        log_level: Level to log the function at.

    Returns:
        Dataset metadata instance for network dataset.
    """
    network_path = Path(network_path)
    LOG.log(log_level, "Start: Build network `%s`.", network_path)
    # ArcPy2.8.0: Convert Path to str.
    arcpy.nax.BuildNetwork(in_network_dataset=str(network_path))
    LOG.log(log_level, "End: Build.")
    return Dataset(network_path)
Пример #17
0
def delete(dataset_path: Union[Path, str],
           *,
           log_level: int = logging.INFO) -> Dataset:
    """Delete dataset.

    Args:
        dataset_path: Path to dataset.
        log_level: Level to log the function at.

    Returns:
        Dataset metadata instance for now-deleted dataset.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Delete dataset `%s`.", dataset_path)
    _dataset = Dataset(dataset_path)
    # ArcPy2.8.0: Convert to str.
    arcpy.management.Delete(in_data=str(dataset_path))
    LOG.log(log_level, "End: Delete.")
    return _dataset
Пример #18
0
def compress(
    dataset_path: Union[Path, str],
    *,
    bad_allocation_ok: bool = False,
    log_level: int = logging.INFO,
) -> Dataset:
    """Compress dataset.

    Only applicable to file geodatabase datasets.

    Args:
        dataset_path: Path to dataset.
        bad_allocation_ok: Will not raise ExecuteError on bad allocations. "Bad
            allocation" generally occurs when dataset is too big to compress.
        log_level: Level to log the function at.

    Returns:
        Dataset metadata instance for compressed dataset.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Compress dataset `%s`.", dataset_path)
    try:
        # ArcPy2.8.0: Convert to str.
        arcpy.management.CompressFileGeodatabaseData(in_data=str(dataset_path))
    except arcpy.ExecuteError as error:
        # Bad allocation error just means the dataset is too big to compress.
        if str(
                error
        ) == ("bad allocation\nFailed to execute (CompressFileGeodatabaseData).\n"
              ):
            LOG.error("Compress error: bad allocation.")
            if not bad_allocation_ok:
                raise

        else:
            LOG.error("""str(error) = "%s\"""", error)
            LOG.error("""repr(error) = "%r\"""", error)
            raise

    LOG.log(log_level, "End: Compress.")
    return Dataset(dataset_path)
Пример #19
0
def insert_from_iters(
    dataset_path: Union[Path, str],
    field_names: Iterable[str],
    *,
    source_features: Iterable[Sequence[Any]],
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Insert features into dataset from sequences.

    Args:
        dataset_path: Path to dataset.
        field_names: Names of fields for insert. Names must be in the same order as
            their corresponding attributes in `source_features` elements.
        source_features: Features to insert.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each insert-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Insert features into `%s` from sequences.",
            dataset_path)
    field_names = list(field_names)
    if inspect.isgeneratorfunction(source_features):
        source_features = source_features()
    # ArcPy2.8.0: Convert Path to str.
    cursor = arcpy.da.InsertCursor(in_table=str(dataset_path),
                                   field_names=field_names)
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for row in source_features:
            cursor.insertRow(tuple(row))
            states["inserted"] += 1
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Insert.")
    return states
Пример #20
0
def as_dicts(
    dataset_path: Union[Path, str],
    field_names: Optional[Iterable[str]] = None,
    *,
    dataset_where_sql: Optional[str] = None,
    spatial_reference_item: SpatialReferenceSourceItem = None,
) -> Iterator[dict]:
    """Generate dictionaries of feature attribute name to their value.

    Notes:
        Use ArcPy cursor token names for object IDs and geometry objects/properties.

    Args:
        dataset_path: Path to dataset.
        field_names: Names of fields to include in generated dictionary. Names will be
            the keys in the dictionary mapping to their attributes values. If set to
            None, all fields will be included.
        dataset_where_sql: SQL where-clause for dataset subselection.
        spatial_reference_item: Item from which the spatial reference for any geometry
            properties will be set to. If set to None, will use spatial reference of
            the dataset.
    """
    dataset_path = Path(dataset_path)
    if field_names:
        field_names = list(field_names)
    else:
        field_names = Dataset(dataset_path).field_names_tokenized
    cursor = arcpy.da.SearchCursor(
        # ArcPy2.8.0: Convert Path to str.
        in_table=str(dataset_path),
        field_names=field_names,
        where_clause=dataset_where_sql,
        spatial_reference=SpatialReference(spatial_reference_item).object,
    )
    with cursor:
        for feature in cursor:
            yield dict(zip(cursor.fields, feature))
Пример #21
0
def closest_facility_route(
    dataset_path: Union[Path, str],
    *,
    id_field_name: str,
    facility_path: Union[Path, str],
    facility_id_field_name: str,
    network_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    facility_where_sql: Optional[str] = None,
    max_cost: Optional[Union[float, int]] = None,
    travel_from_facility: bool = False,
    travel_mode: str,
) -> Iterator[Dict[str, Any]]:
    """Generate route info dictionaries for closest facility to each location feature.

    Args:
        dataset_path: Path to dataset.
        id_field_name: Name of dataset ID field.
        facility_path: Path to facility dataset.
        facility_id_field_name: Name of facility dataset ID field.
        network_path: Path to network dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        facility_where_sql: SQL where-clause for the facility dataset subselection.
        max_cost: Maximum travel cost the search will allow, in the units of the cost
            attribute.
        travel_from_facility: Perform the analysis travelling from the facility if True,
            rather than toward the facility.
        travel_mode: Name of the network travel mode to use. Travel mode must exist in
            the network dataset.

    Yields:
        Closest facility route details.
        Keys:
            * dataset_id
            * facility_id
            * cost - Cost of route, in units of travel mode impedance.
            * geometry - Route geometry, in spatial reference of dataset.

    Raises:
        RuntimeError: When analysis fails.
    """
    dataset_path = Path(dataset_path)
    facility_path = Path(facility_path)
    network_path = Path(network_path)
    analysis = arcpy.nax.ClosestFacility(network_path)
    analysis.defaultImpedanceCutoff = max_cost
    distance_units = UNIT_PLURAL[SpatialReference(dataset_path).linear_unit]
    analysis.distanceUnits = getattr(arcpy.nax.DistanceUnits, distance_units)
    analysis.ignoreInvalidLocations = True
    if travel_from_facility:
        analysis.travelDirection = arcpy.nax.TravelDirection.FromFacility
    # ArcPy2.8.0: Convert Path to str.
    analysis.travelMode = arcpy.nax.GetTravelModes(network_path)[travel_mode]
    # Load facilities.
    field = Field(
        facility_path,
        Dataset(facility_path).oid_field_name if
        facility_id_field_name.upper() == "OID@" else facility_id_field_name,
    )
    field_description = [
        "source_id",
        field.type if field.type != "OID" else "LONG",
        "#",
        field.length,
        "#",
        "#",
    ]
    analysis.addFields(arcpy.nax.ClosestFacilityInputDataType.Facilities,
                       [field_description])
    cursor = analysis.insertCursor(
        arcpy.nax.ClosestFacilityInputDataType.Facilities,
        field_names=["source_id", "SHAPE@"],
    )
    _features = features.as_tuples(
        facility_path,
        field_names=[facility_id_field_name, "SHAPE@"],
        dataset_where_sql=facility_where_sql,
    )
    with cursor:
        for feature in _features:
            cursor.insertRow(feature)
    # Load dataset locations.
    field = Field(
        dataset_path,
        Dataset(dataset_path).oid_field_name
        if id_field_name.upper() == "OID@" else id_field_name,
    )
    field_description = [
        "source_id",
        field.type if field.type != "OID" else "LONG",
        "#",
        field.length,
        "#",
        "#",
    ]
    analysis.addFields(arcpy.nax.ClosestFacilityInputDataType.Incidents,
                       [field_description])
    cursor = analysis.insertCursor(
        arcpy.nax.ClosestFacilityInputDataType.Incidents,
        field_names=["source_id", "SHAPE@"],
    )
    _features = features.as_tuples(
        dataset_path,
        field_names=[id_field_name, "SHAPE@"],
        dataset_where_sql=dataset_where_sql,
    )
    with cursor:
        for feature in _features:
            cursor.insertRow(feature)
    # Solve & generate.
    result = analysis.solve()
    if not result.solveSucceeded:
        for message in result.solverMessages(arcpy.nax.MessageSeverity.All):
            LOG.error(message)
        raise RuntimeError("Closest facility analysis failed")

    facility_oid_id = dict(
        result.searchCursor(
            output_type=getattr(arcpy.nax.ClosestFacilityOutputDataType,
                                "Facilities"),
            field_names=["FacilityOID", "source_id"],
        ))
    location_oid_id = dict(
        result.searchCursor(
            output_type=getattr(arcpy.nax.ClosestFacilityOutputDataType,
                                "Incidents"),
            field_names=["IncidentOID", "source_id"],
        ))
    keys = ["FacilityOID", "IncidentOID", f"Total_{distance_units}", "SHAPE@"]
    cursor = result.searchCursor(
        output_type=arcpy.nax.ClosestFacilityOutputDataType.Routes,
        field_names=keys)
    with cursor:
        for row in cursor:
            route = dict(zip(keys, row))
            yield {
                "dataset_id": location_oid_id[route["IncidentOID"]],
                "facility_id": facility_oid_id[route["FacilityOID"]],
                "cost": route[f"Total_{distance_units}"],
                "geometry": route["SHAPE@"],
            }
Пример #22
0
def update_by_overlay_count(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    overlay_dataset_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    overlay_where_sql: Optional[str] = None,
    tolerance: Optional[float] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update attribute values by count of overlay features.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field.
        overlay_dataset_path: Path to overlay-dataset.

    Keyword Args:
        dataset_where_sql: SQL where-clause for dataset subselection.
        overlay_where_sql: SQL where-clause for overlay-dataset subselection.
        tolerance: Tolerance for coincidence, in units of the dataset. If set to None,
            will use the default tolerance for the workspace of the dataset.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Attribute counts for each update-state.

    Raises:
        RuntimeError: If attribute cannot be updated.
    """
    dataset_path = Path(dataset_path)
    overlay_dataset_path = Path(overlay_dataset_path)
    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by overlay feature counts from `%s`.",
        dataset_path,
        field_name,
        overlay_dataset_path,
    )
    original_tolerance = arcpy.env.XYTolerance
    view = DatasetView(dataset_path,
                       field_names=[],
                       dataset_where_sql=dataset_where_sql)
    overlay_view = DatasetView(
        overlay_dataset_path,
        field_names=[],
        dataset_where_sql=overlay_where_sql,
    )
    with view, overlay_view:
        if tolerance is not None:
            arcpy.env.XYTolerance = tolerance
        temp_output_path = unique_path("output")
        arcpy.analysis.SpatialJoin(
            target_features=view.name,
            join_features=overlay_view.name,
            # ArcPy2.8.0: Convert to str.
            out_feature_class=str(temp_output_path),
            join_operation="JOIN_ONE_TO_ONE",
            join_type="KEEP_COMMON",
            match_option="INTERSECT",
        )
    arcpy.env.XYTolerance = original_tolerance
    cursor = arcpy.da.SearchCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(temp_output_path),
        field_names=["TARGET_FID", "Join_Count"],
    )
    with cursor:
        oid_overlay_count = dict(cursor)
    # ArcPy2.8.0: Convert to str.
    arcpy.management.Delete(str(temp_output_path))
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(dataset_path),
        field_names=["OID@", field_name],
        where_clause=dataset_where_sql,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for feature in cursor:
            oid = feature[0]
            old_value = feature[1]
            new_value = oid_overlay_count.get(oid, 0)
            if same_value(old_value, new_value):
                states["unchanged"] += 1
            else:
                try:
                    cursor.updateRow([oid, new_value])
                    states["altered"] += 1
                except RuntimeError as error:
                    raise RuntimeError(
                        f"Update cursor failed: Offending value: `{new_value}`"
                    ) from error

    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Пример #23
0
def add_index(
    dataset_path: Union[Path, str],
    *,
    field_names: Iterable[str],
    index_name: Optional[str] = None,
    is_ascending: bool = False,
    is_unique: bool = False,
    fail_on_lock_ok: bool = False,
    log_level: int = logging.INFO,
) -> List[Field]:
    """Add index to dataset fields.

    Args:
        dataset_path: Path to dataset.
        field_names: Sequence of participating field names.
        index_name (str): Name for index. Only applicable to non-spatial indexes for
            geodatabase datasets.
        is_ascending: Build index with values in ascending order if True. Only
            applicable to non-spatial indexes for enterprise geodatabase datasets.
        is_unique: Build index with unique constraint if True. Only
            applicable to non-spatial indexes for enterprise geodatabase datasets.
        fail_on_lock_ok: If True, indicate success even if dataset locks prevent
            adding index.
        log_level: Level to log the function at.

    Returns:
        Sequence of field metadata instances for participating fields.

    Raises:
        RuntimeError: If more than one field and any are geometry-type.
        arcpy.ExecuteError: If dataset lock prevents adding index.
    """
    dataset_path = Path(dataset_path)
    field_names = list(field_names)
    LOG.log(
        log_level,
        "Start: Add index to field(s) `%s` on `%s`.",
        field_names,
        dataset_path,
    )
    field_types = {
        field.type.upper()
        for field in Dataset(dataset_path).fields if field.name.lower() in
        [field_name.lower() for field_name in field_names]
    }
    if "GEOMETRY" in field_types:
        if len(field_names) > 1:
            raise RuntimeError("Cannot create a composite spatial index.")

        # ArcPy2.8.0: Convert to str.
        func = partial(arcpy.management.AddSpatialIndex,
                       in_features=str(dataset_path))
    else:
        func = partial(
            arcpy.management.AddIndex,
            # ArcPy2.8.0: Convert to str.
            in_table=str(dataset_path),
            fields=field_names,
            index_name=index_name,
            unique=is_unique,
            ascending=is_ascending,
        )
    try:
        func()
    except arcpy.ExecuteError as error:
        if error.message.startswith("ERROR 000464"):
            LOG.warning("Lock on `%s` prevents adding index.", dataset_path)
            if not fail_on_lock_ok:
                raise

    LOG.log(log_level, "End: Add.")
    return [Field(dataset_path, field_name) for field_name in field_names]
Пример #24
0
def update_by_unique_id(
    dataset_path: Union[Path, str],
    field_name: str,
    *,
    dataset_where_sql: Optional[str] = None,
    initial_number: int = 1,
    start_after_max_number: bool = False,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update attribute values by assigning a unique ID.

    Existing IDs are preserved, if unique.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        initial_number: Initial number for a proposed ID, if using a numeric data type.
            Superseded by `start_after_max_number`.
        start_after_max_number: Initial number will be one greater than the
            maximum existing ID number if True, if using a numeric data type.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Attribute counts for each update-state.

    Raises:
        RuntimeError: If attribute cannot be updated.
    """
    dataset_path = Path(dataset_path)
    LOG.log(
        log_level,
        "Start: Update attributes in `%s.%s` by assigning unique IDs.",
        dataset_path,
        field_name,
    )
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(dataset_path),
        field_names=[field_name],
        where_clause=dataset_where_sql,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    # First run will clear duplicate IDs & gather used IDs.
    used_ids = set()
    # BUG-UNFILED: Use separate edit sessions (not a fan of this intermediate state).
    with session, cursor:
        for (id_value, ) in cursor:
            if id_value in used_ids:
                cursor.updateRow([None])
            else:
                used_ids.add(id_value)
        _field = Field(dataset_path, field_name)
        id_pool = unique_ids(
            data_type=python_type(_field.type),
            string_length=_field.length,
            initial_number=(max(used_ids) +
                            1 if start_after_max_number else initial_number),
        )
    states = Counter()
    # Second run will fill in missing IDs.
    with session, cursor:
        for (id_value, ) in cursor:
            if id_value is not None:
                states["unchanged"] += 1
            else:
                id_value = next(id_pool)
                while id_value in used_ids:
                    id_value = next(id_pool)
                try:
                    cursor.updateRow([id_value])
                    states["altered"] += 1
                    used_ids.add(id_value)
                except RuntimeError as error:
                    raise RuntimeError(
                        f"Update cursor failed: Offending value: `{id_value}`"
                    ) from error

    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Пример #25
0
def update_rows(
    dataset_path: Union[Path, str],
    *,
    field_name: str,
    id_field_names: Iterable[str],
    cmp_dataset_path: Union[Path, str],
    cmp_field_name: Optional[str] = None,
    cmp_id_field_names: Optional[Iterable[str]] = None,
    cmp_date: Optional[Union[date, _datetime]] = None,
    date_initiated_field_name: str = "date_initiated",
    date_expired_field_name: str = "date_expired",
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Add field value changes to tracking dataset from comparison dataset.

    Args:
        dataset_path: Path to tracking dataset.
        field_name: Name of field with tracked attribute.
        id_field_names: Names of the feature ID fields.
        cmp_dataset_path: Path to comparison dataset.
        cmp_field_name: Name of field with tracked attribute in comparison dataset. If
            set to None, will assume same as field_name.
        cmp_id_field_names: Names of the feature ID fields in comparison dataset. If set
            to None, will assume same as field_name.
        cmp_date: Date to mark comparison change. If set to None, will set to the date
            of execution.
        date_initiated_field_name: Name of tracking-row-inititated date field.
        date_expired_field_name: Name of tracking-row-expired date field.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    cmp_dataset_path = Path(cmp_dataset_path)
    LOG.log(
        log_level,
        "Start: Update tracking rows in `%s` from `%s`.",
        dataset_path,
        cmp_dataset_path,
    )
    id_field_names = list(id_field_names)
    if cmp_field_name is None:
        cmp_field_name = field_name
    cmp_id_field_names = (id_field_names if cmp_id_field_names is None else
                          list(cmp_id_field_names))
    if cmp_date is None:
        cmp_date = date.today()
    current_where_sql = f"{date_expired_field_name} IS NULL"
    id_current_value = {
        row[:-1]: row[-1]
        for row in features.as_tuples(
            dataset_path,
            field_names=id_field_names + [field_name],
            dataset_where_sql=current_where_sql,
        )
    }
    id_cmp_value = {
        row[:-1]: row[-1]
        for row in features.as_tuples(cmp_dataset_path,
                                      field_names=cmp_id_field_names +
                                      [cmp_field_name])
    }
    changed_ids = set()
    expired_ids = {_id for _id in id_current_value if _id not in id_cmp_value}
    new_rows = []
    for _id, value in id_cmp_value.items():
        if _id not in id_current_value:
            new_rows.append(_id + (value, cmp_date))
        elif not same_value(value, id_current_value[_id]):
            changed_ids.add(_id)
            new_rows.append(_id + (value, cmp_date))
    # ArcPy2.8.0: Convert Path to str.
    cursor = arcpy.da.UpdateCursor(
        in_table=str(dataset_path),
        field_names=id_field_names + [field_name, date_expired_field_name],
        where_clause=current_where_sql,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for row in cursor:
            _id = tuple(row[:len(id_field_names)])
            if _id in changed_ids or _id in expired_ids:
                cursor.updateRow(_id + (row[-2], cmp_date))
            else:
                states["unchanged"] += 1
    features.insert_from_iters(
        dataset_path,
        field_names=id_field_names + [field_name, date_initiated_field_name],
        source_features=new_rows,
        use_edit_session=use_edit_session,
        log_level=logging.DEBUG,
    )
    states["changed"] = len(changed_ids)
    states["expired"] = len(expired_ids)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Пример #26
0
def generate_service_rings(
    dataset_path: Union[Path, str],
    *,
    id_field_name: str,
    network_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    cost_attribute: str,
    detailed_features: bool = False,
    max_distance: Union[float, int],
    overlap_facilities: bool = True,
    restriction_attributes: Optional[Iterable[str]] = None,
    ring_width: Union[float, int],
    travel_from_facility: bool = False,
    trim_value: Optional[Union[float, int]] = None,
    log_level: int = logging.INFO,
) -> Dataset:
    """Create facility service ring features using a network dataset.

    Args:
        dataset_path: Path to dataset.
        id_field_name: Name of dataset ID field.
        network_path: Path to network dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        output_path: Path to output dataset.
        cost_attribute: Name of network cost attribute to use.
        detailed_features: Generate high-detail features if True.
        max_distance: Distance in travel from the facility the outer ring will extend
            to, in the units of the dataset.
        overlap_facilities: Allow different facility service areas to overlap if True.
        restriction_attributes: Names of network restriction attributes to use in
            analysis.
        ring_width: Distance a service ring represents in travel, in the units of the
            dataset.
        travel_from_facility: Perform the analysis travelling from the facility if True,
            rather than toward the facility.
        trim_value: Disstance from network features to trim service areas at, in units
            of the dataset.
        log_level: Level to log the function at.

    Returns:
        Dataset metadata instance for output dataset.
    """
    dataset_path = Path(dataset_path)
    network_path = Path(network_path)
    output_path = Path(output_path)
    LOG.log(log_level, "Start: Generate service rings for `%s`.", dataset_path)
    # `trim_value` assumes meters if not input as linear unit string.
    if trim_value is not None:
        trim_value = f"{trim_value} {SpatialReference(dataset_path).linear_unit}"
    # ArcPy2.8.0: Convert Path to str.
    arcpy.na.MakeServiceAreaLayer(
        in_network_dataset=str(network_path),
        out_network_analysis_layer="service_area",
        impedance_attribute=cost_attribute,
        travel_from_to="TRAVEL_FROM" if travel_from_facility else "TRAVEL_TO",
        default_break_values=(" ".join(
            str(x) for x in range(ring_width, max_distance + 1, ring_width))),
        polygon_type="DETAILED_POLYS" if detailed_features else "SIMPLE_POLYS",
        merge="NO_MERGE" if overlap_facilities else "NO_OVERLAP",
        nesting_type="RINGS",
        UTurn_policy="ALLOW_DEAD_ENDS_AND_INTERSECTIONS_ONLY",
        restriction_attribute_name=restriction_attributes,
        polygon_trim=trim_value is not None,
        poly_trim_value=trim_value,
        hierarchy="NO_HIERARCHY",
    )
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        arcpy.na.AddLocations(
            in_network_analysis_layer="service_area",
            sub_layer="Facilities",
            in_table=view.name,
            field_mappings=f"Name {id_field_name} #",
            search_tolerance=max_distance,
            match_type="MATCH_TO_CLOSEST",
            append="CLEAR",
            snap_to_position_along_network="NO_SNAP",
            exclude_restricted_elements=True,
        )
    arcpy.na.Solve(
        in_network_analysis_layer="service_area",
        ignore_invalids=True,
        terminate_on_solve_error=True,
    )
    dataset.copy("service_area/Polygons",
                 output_path=output_path,
                 log_level=logging.DEBUG)
    arcpy.management.Delete("service_area")
    id_field = Field(dataset_path, id_field_name)
    dataset.add_field(output_path,
                      log_level=logging.DEBUG,
                      **id_field.field_as_dict)
    attributes.update_by_function(
        output_path,
        field_name=id_field.name,
        function=TYPE_ID_FUNCTION_MAP[id_field.type.lower()],
        field_as_first_arg=False,
        arg_field_names=["Name"],
        log_level=logging.DEBUG,
    )
    LOG.log(log_level, "End: Generate.")
    return Dataset(output_path)
Пример #27
0
def polygons_to_lines(
    dataset_path: Union[Path, str],
    *,
    output_path: Union[Path, str],
    dataset_where_sql: Optional[str] = None,
    id_field_name: Optional[str] = None,
    make_topological: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Convert geometry from polygons to lines.

    Note:
        If `make_topological` is set to True, shared outlines will be a single, separate
        feature. Note that one cannot pass attributes to a topological transformation
        (as the values would not apply to all adjacent features).

        If an id field name is specified, the output dataset will identify the input
        features that defined the line feature with the name & values from the provided
        field. This option will be ignored if the output is non-topological lines, as
        the field will pass over with the rest of the attributes.

    Args:
        dataset_path: Path to dataset.
        output_path: Path to output dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        id_field_name: Name of ID field to apply on topological lines.
        make_topological: Make line output topological, or merged where lines overlap.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    LOG.log(
        log_level,
        "Start: Convert polgyons in `%s` to lines in output `%s`.",
        dataset_path,
        output_path,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.PolygonToLine(
            in_features=view.name,
            out_feature_class=str(output_path),
            neighbor_option=("IDENTIFY_NEIGHBORS"
                             if make_topological else "IGNORE_NEIGHBORS"),
        )
    if make_topological:
        _dataset = Dataset(dataset_path)
        for side in ["left", "right"]:
            oid_key = f"{side.upper()}_FID"
            if id_field_name:
                id_field = next(
                    _field for _field in _dataset.fields
                    if _field.name.lower() == id_field_name.lower())
                id_field.name = f"{side.upper()}_{id_field_name}"
                # Cannot create an OID-type field, so force to long.
                if id_field.type.upper() == "OID":
                    id_field.type = "LONG"
                dataset.add_field(output_path,
                                  log_level=logging.DEBUG,
                                  **id_field.field_as_dict)
                attributes.update_by_joined_value(
                    output_path,
                    field_name=id_field.name,
                    key_field_names=[oid_key],
                    join_dataset_path=dataset_path,
                    join_field_name=id_field_name,
                    join_key_field_names=[_dataset.oid_field_name],
                    log_level=logging.DEBUG,
                )
            dataset.delete_field(output_path,
                                 field_name=oid_key,
                                 log_level=logging.DEBUG)
    else:
        dataset.delete_field(output_path,
                             field_name="ORIG_FID",
                             log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Convert.")
    return states
Пример #28
0
def update_node_ids(
    dataset_path: Union[Path, str],
    *,
    from_id_field_name: str,
    to_id_field_name: str,
    dataset_where_sql: Optional[str] = None,
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Update node ID values.

    Args:
        dataset_path: Path to the dataset.
        from_id_field_name: Name of from-node ID field.
        to_id_field_name: Name of to-node ID field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(
        log_level,
        "Start: Update node IDs in `%s` (from) & `%s` (to) for `%s`.",
        from_id_field_name,
        to_id_field_name,
        dataset_path,
    )
    cursor = arcpy.da.UpdateCursor(
        # ArcPy2.8.0: Convert to str.
        in_table=str(dataset_path),
        field_names=["OID@", from_id_field_name, to_id_field_name],
        where_clause=dataset_where_sql,
    )
    oid_node = id_node_map(
        dataset_path,
        from_id_field_name=from_id_field_name,
        to_id_field_name=to_id_field_name,
        update_nodes=True,
    )
    session = Editing(Dataset(dataset_path).workspace_path, use_edit_session)
    states = Counter()
    with session, cursor:
        for old_feature in cursor:
            oid = old_feature[0]
            new_feature = (oid, oid_node[oid]["from"], oid_node[oid]["to"])
            if same_feature(old_feature, new_feature):
                states["unchanged"] += 1
            else:
                try:
                    cursor.updateRow(new_feature)
                    states["altered"] += 1
                except RuntimeError as error:
                    raise RuntimeError(
                        f"Row failed to update. Offending row: `{new_feature}`"
                    ) from error

    log_entity_states("attributes", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Update.")
    return states
Пример #29
0
def union(
    dataset_path: Union[Path, str],
    *,
    field_name: str,
    union_path: Union[Path, str],
    union_field_name: str,
    dataset_where_sql: Optional[str] = None,
    union_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    replacement_value: Optional[Any] = None,
    log_level: int = logging.INFO,
) -> Counter:
    """Assign union attributes.

    Notes:
        Features with multiple union-features will be split.

    Args:
        dataset_path: Path to dataset.
        field_name: Name of field to place union values.
        union_path: Path to union-dataset.
        union_field_name: Name of union-field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        union_where_sql: SQL where-clause for the union-dataset subselection.
        replacement_value: Value to replace a present union-field value with. If set to
            None, no replacement will occur.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    union_path = Path(union_path)
    LOG.log(
        log_level,
        "Start: Union-set attributes in `%s.%s` by features/values in `%s.%s`.",
        dataset_path,
        field_name,
        union_path,
        union_field_name,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    # Do not include any field names - we do not want them added to output.
    union_view = DatasetView(union_path,
                             field_names=[],
                             dataset_where_sql=union_where_sql)
    with view, union_view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.Union(
            in_features=[view.name, union_view.name],
            out_feature_class=str(output_path),
            join_attributes="ALL",
        )
    fid_field_names = [
        name for name in Dataset(output_path).field_names
        if name.startswith("FID_")
    ]
    if replacement_value is not None:
        attributes.update_by_value(
            output_path,
            field_name,
            value=replacement_value,
            dataset_where_sql=f"{fid_field_names[-1]} <> -1",
            log_level=logging.DEBUG,
        )
    else:
        attributes.update_by_joined_value(
            output_path,
            field_name,
            key_field_names=[fid_field_names[-1]],
            join_dataset_path=union_path,
            join_field_name=union_field_name,
            join_key_field_names=["OID@"],
            dataset_where_sql=f"{fid_field_names[-1]} <> -1",
            join_dataset_where_sql=union_where_sql,
            log_level=logging.DEBUG,
        )
    attributes.update_by_value(
        output_path,
        field_name,
        value=None,
        dataset_where_sql=f"{fid_field_names[-1]} = -1",
        log_level=logging.DEBUG,
    )
    for name in fid_field_names:
        dataset.delete_field(output_path,
                             field_name=name,
                             log_level=logging.DEBUG)
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Union.")
    return states
Пример #30
0
def dissolve_features(
    dataset_path: Union[Path, str],
    *,
    dataset_where_sql: Optional[str] = None,
    output_path: Union[Path, str],
    dissolve_field_names: Optional[Iterable[str]] = None,
    all_fields_in_output: bool = False,
    allow_multipart: bool = True,
    unsplit_lines: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Dissolve feature geometry that share value in given fields.

    Args:
        dataset_path: Path to dataset.
        dataset_where_sql: SQL where-clause for dataset subselection.
        output_path: Path to output dataset.
        dissolve_field_names: Names of fields to base dissolve on.
        all_fields_in_output: All fields in the dataset will persist in the output
            dataset if True. Otherwise, only the dissolve fields will persist. Non-
            dissolve fields will have default values, of course.
        allow_multipart: Allow multipart features in output if True.
        unsplit_lines: Merge line features when endpoints meet without crossing features
            if True.
        log_level: Level to log the function at.

    Returns:
        Feature counts for original and output datasets.
    """
    dataset_path = Path(dataset_path)
    output_path = Path(output_path)
    if dissolve_field_names is not None:
        dissolve_field_names = list(dissolve_field_names)
    LOG.log(
        log_level,
        "Start: Dissolve features in `%s` on fields `%s`.",
        dataset_path,
        dissolve_field_names,
    )
    states = Counter()
    states["in original dataset"] = dataset.feature_count(dataset_path)
    view = DatasetView(
        dataset_path,
        field_names=dissolve_field_names,
        dataset_where_sql=dataset_where_sql,
    )
    with view:
        # ArcPy2.8.0: Convert Path to str.
        arcpy.management.Dissolve(
            in_features=view.name,
            out_feature_class=str(output_path),
            dissolve_field=dissolve_field_names,
            multi_part=allow_multipart,
            unsplit_lines=unsplit_lines,
        )
    if all_fields_in_output:
        for _field in Dataset(dataset_path).user_fields:
            # Cannot add a non-nullable field to existing features.
            _field.is_nullable = True
            dataset.add_field(
                output_path,
                exist_ok=True,
                log_level=logging.DEBUG,
                **_field.field_as_dict,
            )
    states["in output"] = dataset.feature_count(output_path)
    log_entity_states("features", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Dissolve.")
    return states