Пример #1
0
def id_node_map(
    dataset_path: Union[Path, str],
    *,
    from_id_field_name: str,
    to_id_field_name: str,
    id_field_names: Iterable[str] = ("OID@", ),
    dataset_where_sql: Optional[str] = None,
    update_nodes: bool = False,
) -> Dict[Tuple[Any], Dict[str, Any]]:
    """Return mapping of feature ID to from- & to-node ID dictionary.

    Notes:
        From- & to-node IDs must be same attribute type.
        Output format:
            `{feature_id: {"from": from_node_id, "to": to_node_id}}`

    Args:
        dataset_path: Path to dataset.
        from_id_field_name: Name of from-node ID field.
        to_id_field_name: Name of to-node ID field.
        id_field_names: Names of the feature ID fields.
        dataset_where_sql: SQL where-clause for dataset subselection.
        update_nodes: Update nodes based on feature geometries if True.
    """
    dataset_path = Path(dataset_path)
    id_field_names = list(id_field_names)
    id_node = {}
    if update_nodes:
        coordinate_node = coordinates_node_map(
            dataset_path,
            from_id_field_name=from_id_field_name,
            to_id_field_name=to_id_field_name,
            id_field_names=id_field_names,
            update_nodes=update_nodes,
            dataset_where_sql=dataset_where_sql,
        )
        for node in coordinate_node.values():
            for end in ["from", "to"]:
                for feature_id in node["feature_ids"][end]:
                    feature_id = feature_id[0] if len(
                        feature_id) == 1 else feature_id
                    if feature_id not in id_node:
                        id_node[feature_id] = {}
                    id_node[feature_id][end] = node["node_id"]
    else:
        for feature in features.as_dicts(
                dataset_path,
                field_names=id_field_names +
            [from_id_field_name, to_id_field_name],
                dataset_where_sql=dataset_where_sql,
        ):
            feature["from_id"] = feature[from_id_field_name]
            feature["to_id"] = feature[to_id_field_name]
            feature["id"] = tuple(feature[key] for key in id_field_names)
            feature["id"] = (feature["id"][0]
                             if len(feature["id"]) == 1 else feature["id"])
            for end in ["from", "to"]:
                id_node[feature["id"]][end] = feature[f"{end}_id"]
    return id_node
Пример #2
0
def adjacent_neighbors_map(
    dataset_path: Union[Path, str],
    *,
    id_field_names: Iterable[str],
    dataset_where_sql: Optional[str] = None,
    exclude_overlap: bool = False,
    include_corner: bool = False,
) -> Dict[Union[Tuple[Any], Any], Set[Union[Tuple[Any], Any]]]:
    """Return mapping of feature ID to set of adjacent feature IDs.

    Notes:
        Only works for polygon geometries.
        If id_field_names only has one field name, feature IDs in the mapping will be
            the single value of that field, not a tuple.

    Args:
        dataset_path: Path to dataset.
        id_field_names: Names of the feature ID fields.
        dataset_where_sql: SQL where-clause for dataset subselection.
        exclude_overlap: Exclude features that overlap, but do not have adjacent edges
            or nodes if True.
        include_corner: Include features that have adjacent corner nodes, but no
            adjacent edges if True.
    """
    dataset_path = Path(dataset_path)
    id_field_names = list(id_field_names)
    # Lowercase to avoid casing mismatch.
    # id_field_names = [name.lower() for name in id_field_names]
    view = DatasetView(dataset_path,
                       field_names=id_field_names,
                       dataset_where_sql=dataset_where_sql)
    with view:
        temp_neighbor_path = unique_path("neighbor")
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.PolygonNeighbors(
            in_features=view.name,
            out_table=str(temp_neighbor_path),
            in_fields=id_field_names,
            area_overlap=not exclude_overlap,
            both_sides=True,
        )
    adjacent_neighbors = {}
    for row in features.as_dicts(temp_neighbor_path):
        # Lowercase to avoid casing mismatch.
        row = {key.lower(): val for key, val in row.items()}
        if len(id_field_names) == 1:
            source_id = row[f"src_{id_field_names[0]}"]
            neighbor_id = row[f"nbr_{id_field_names[0]}"]
        else:
            source_id = tuple(row[f"src_{name}"] for name in id_field_names)
            neighbor_id = tuple(row[f"nbr_{name}"] for name in id_field_names)
        if source_id not in adjacent_neighbors:
            adjacent_neighbors[source_id] = set()
        if not include_corner and not row["length"] and not row["area"]:
            continue

        adjacent_neighbors[source_id].add(neighbor_id)
    dataset.delete(temp_neighbor_path)
    return adjacent_neighbors
Пример #3
0
def consolidate_rows(
    dataset_path: Union[Path, str],
    *,
    field_name: str,
    id_field_names: Iterable[str],
    date_initiated_field_name: str = "date_initiated",
    date_expired_field_name: str = "date_expired",
    use_edit_session: bool = False,
    log_level: int = logging.INFO,
) -> Counter:
    """Consolidate tracking dataset rows where the value does not actually change.

    Useful for quick-loaded point-in-time values, or for processing hand-altered rows.

    Args:
        dataset_path: Path to tracking dataset.
        field_name: Name of field with tracked attribute.
        id_field_names: Names of the feature ID fields.
        date_initiated_field_name: Name of tracking-row-inititated date field.
        date_expired_field_name: Name of tracking-row-expired date field.
        use_edit_session: True if edits are to be made in an edit session.
        log_level: Level to log the function at.

    Returns:
        Feature counts for each update-state.
    """
    dataset_path = Path(dataset_path)
    LOG.log(log_level, "Start: Consolidate tracking rows in `%s`.",
            dataset_path)
    id_field_names = list(id_field_names)
    field_names = id_field_names + [
        date_initiated_field_name,
        date_expired_field_name,
        field_name,
    ]
    id_rows = defaultdict(list)
    for row in features.as_dicts(dataset_path, field_names=field_names):
        _id = tuple(row[name] for name in id_field_names)
        id_rows[_id].append(row)
    for _id in list(id_rows):
        rows = sorted(id_rows[_id], key=itemgetter(date_initiated_field_name))
        for i, row in enumerate(rows):
            if i == 0 or row[date_initiated_field_name] is None:
                continue

            date_initiated = row[date_initiated_field_name]
            value = row[field_name]
            previous_row = rows[i - 1]
            previous_value = previous_row[field_name]
            previous_date_expired = previous_row[date_expired_field_name]
            if same_value(value, previous_value) and same_value(
                    date_initiated, previous_date_expired):
                # Move previous row date initiated to current row & clear from previous.
                row[date_initiated_field_name] = previous_row[
                    date_initiated_field_name]
                previous_row[date_initiated_field_name] = None
        id_rows[_id] = [
            row for row in rows if row[date_initiated_field_name] is not None
        ]
    states = features.update_from_dicts(
        dataset_path,
        field_names=field_names,
        # In tracking dataset, ID is ID + date_initiated.
        id_field_names=id_field_names + [date_initiated_field_name],
        source_features=chain(*id_rows.values()),
        use_edit_session=use_edit_session,
        log_level=logging.DEBUG,
    )
    log_entity_states("tracking rows", states, logger=LOG, log_level=log_level)
    LOG.log(log_level, "End: Consolidate.")
    return states
Пример #4
0
def nearest_features(
    dataset_path: Union[Path, str],
    *,
    id_field_name: str,
    near_path: Union[Path, str],
    near_id_field_name: str,
    dataset_where_sql: Optional[str] = None,
    near_where_sql: Optional[str] = None,
    max_distance: Optional[Union[float, int]] = None,
    near_rank: int = 1,
) -> Iterator[Dict[str, Any]]:
    """Generate info dictionaries for relationship with Nth-nearest near-feature.

    Args:
        dataset_path: Path to dataset.
        id_field_name: Name of dataset ID field.
        near_path: Path to near-dataset.
        near_id_field_name: Name of the near-dataset ID field.
        dataset_where_sql: SQL where-clause for dataset subselection.
        near_where_sql: SQL where-clause for near-dataset subselection.
        max_distance: Maximum distance to search for near-features, in units of the
            dataset.
        near_rank: Nearness rank of the feature to map info for (Nth-nearest).

    Yields:
        Nearest feature details.
        Keys:
            * dataset_id
            * near_id
            * angle: Angle from dataset feature & near-feature, in decimal degrees.
            * distance: Distance between feature & near-feature, in units of the
                dataset.
    """
    dataset_path = Path(dataset_path)
    near_path = Path(near_path)
    view = DatasetView(dataset_path, dataset_where_sql=dataset_where_sql)
    near_view = DatasetView(near_path, dataset_where_sql=near_where_sql)
    with view, near_view:
        temp_near_path = unique_path("near")
        # ArcPy2.8.0: Convert Path to str.
        arcpy.analysis.GenerateNearTable(
            in_features=view.name,
            near_features=near_view.name,
            out_table=str(temp_near_path),
            search_radius=max_distance,
            angle=True,
            closest=(near_rank == 1),
            closest_count=near_rank,
        )
        oid_id_map = dict(
            features.as_tuples(view.name, field_names=["OID@", id_field_name]))
        near_oid_id_map = dict(
            features.as_tuples(near_view.name,
                               field_names=["OID@", near_id_field_name]))
    _features = features.as_dicts(
        temp_near_path,
        field_names=["IN_FID", "NEAR_FID", "NEAR_ANGLE", "NEAR_DIST"],
        dataset_where_sql=f"NEAR_RANK = {near_rank}"
        if near_rank != 1 else None,
    )
    for feature in _features:
        yield {
            "dataset_id": oid_id_map[feature["IN_FID"]],
            "near_id": near_oid_id_map[feature["NEAR_FID"]],
            "angle": feature["NEAR_ANGLE"],
            "distance": feature["NEAR_DIST"],
        }

    dataset.delete(temp_near_path)
Пример #5
0
def coordinates_node_map(
    dataset_path: Union[Path, str],
    *,
    from_id_field_name: str,
    to_id_field_name: str,
    id_field_names: Iterable[str] = ("OID@", ),
    dataset_where_sql: Optional[str] = None,
    update_nodes: bool = False,
    spatial_reference_item: SpatialReferenceSourceItem = None,
) -> Dict[Tuple[float], Dict[str, Any]]:
    """Return mapping of coordinates to node info mapping for dataset.

    Notes:
        From- & to-node IDs must be same attribute type.
        Output format:
            `{(x, y): {"node_id": Any, "feature_ids": {"from": set, "to": set}}}`

    Args:
        dataset_path: Path to dataset.
        from_id_field_name: Name of from-node ID field.
        to_id_field_name: Name of to-node ID field.
        id_field_names: Names of the feature ID fields.
        dataset_where_sql: SQL where-clause for dataset subselection.
        update_nodes: Update nodes based on feature geometries if True.
        spatial_reference_item: Item from which the spatial reference for any geometry
            properties will be set to. If set to None, will use spatial reference of
            the dataset.

    Raises:
        ValueError: If from- & to-node ID fields are not the same type.
    """
    dataset_path = Path(dataset_path)
    id_field_names = list(id_field_names)
    node_id_data_type = None
    node_id_max_length = None
    for node_id_field_name in [from_id_field_name, to_id_field_name]:
        field = Field(dataset_path, node_id_field_name)
        if not node_id_data_type:
            node_id_data_type = python_type(field.type)
        elif python_type(field.type) != node_id_data_type:
            raise ValueError("From- and to-node ID fields must be same type")

        if node_id_data_type == str:
            if not node_id_max_length or node_id_max_length > field.length:
                node_id_max_length = field.length
    coordinate_node = {}
    for feature in features.as_dicts(
            dataset_path,
            field_names=id_field_names +
        [from_id_field_name, to_id_field_name, "SHAPE@"],
            dataset_where_sql=dataset_where_sql,
            spatial_reference_item=spatial_reference_item,
    ):
        feature["from_id"] = feature[from_id_field_name]
        feature["to_id"] = feature[to_id_field_name]
        feature["id"] = tuple(feature[key] for key in id_field_names)
        feature["from_coordinates"] = (
            feature["SHAPE@"].firstPoint.X,
            feature["SHAPE@"].firstPoint.Y,
        )
        feature["to_coordinates"] = (
            feature["SHAPE@"].lastPoint.X,
            feature["SHAPE@"].lastPoint.Y,
        )
        for end in ["from", "to"]:
            if feature[f"{end}_coordinates"] not in coordinate_node:
                coordinate_node[feature[f"{end}_coordinates"]] = {
                    "node_id": feature[f"{end}_id"],
                    "feature_ids": {
                        "from": set(),
                        "to": set()
                    },
                }
            node = coordinate_node[feature[f"{end}_coordinates"]]
            if node["node_id"] is None:
                node["node_id"] = feature[f"{end}_id"]
            # Assign lower node ID if newer is different than current.
            else:
                node["node_id"] = min(node, feature[f"{end}_id"])
            node["feature_ids"][end].add(feature["id"])
    if update_nodes:
        coordinate_node = _updated_coordinates_node_map(
            coordinate_node, node_id_data_type, node_id_max_length)
    return coordinate_node