Exemplo n.º 1
0
def duplicate_field(dataset_path, field_name, new_field_name, **kwargs):
    """Create new field as a duplicate of another.

    Note: This does *not* duplicate the values of the original field; only the schema.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the field.
        new_field_name (str): Name of the new field.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        log_level (str): Level to log the function at. Default is "info".

    Returns:
        str: Name of the field created.
    """
    log = leveled_logger(LOG, kwargs.setdefault("log_level", "info"))
    log(
        "Start: Duplicate field %s as %s on %s.",
        field_name,
        new_field_name,
        dataset_path,
    )
    meta = {"field": field_metadata(dataset_path, field_name)}
    meta["field"]["name"] = new_field_name
    # Cannot add OID-type field, so change to long.
    if meta["field"]["type"].lower() == "oid":
        meta["field"]["type"] = "long"
    add_field_from_metadata(dataset_path, meta["field"], log_level=None)
    log("End: Duplicate.")
    return new_field_name
Exemplo n.º 2
0
def closest_facility_route(
    dataset_path,
    id_field_name,
    facility_path,
    facility_id_field_name,
    network_path,
    cost_attribute,
    **kwargs
):
    """Generate route info dictionaries for dataset features's closest facility.

    Args:
        dataset_path (str): Path of the dataset.
        id_field_name (str): Name of the dataset ID field.
        facility_path (str): Path of the facilities dataset.
        facility_id_field_name (str): Name of the facility ID field.
        network_path (str): Path of the network dataset.
        cost_attribute (str): Name of the network cost attribute to use.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        facility_where_sql (str): SQL where-clause for facility subselection.
        max_cost (float): Maximum travel cost the search will attempt, in the cost
            attribute's units.
        restriction_attributes (iter): Collection of network restriction attribute
            names to use.
        travel_from_facility (bool): Flag to indicate performing the analysis
            travelling from (True) or to (False) the facility. Default is False.
        log_level (str): Level to log the function at. Default is 'info'.

    Yields:
        dict: The next feature's analysis result details.
            Dictionary keys: 'dataset_id', 'facility_id', 'cost', 'geometry',
            'cost' value (float) will match units of the cost_attribute.
            'geometry' (arcpy.Geometry) will match spatial reference to the dataset.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('facility_where_sql')
    kwargs.setdefault('max_cost')
    kwargs.setdefault('restriction_attributes')
    kwargs.setdefault('travel_from_facility', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Generate closest facility in %s to locations in %s.",
        facility_path,
        dataset_path,
    )
    meta = {
        'id_field': {
            'dataset': arcobj.field_metadata(dataset_path, id_field_name),
            'facility': arcobj.field_metadata(facility_path, facility_id_field_name),
        }
    }
    keys = {
        'cursor': [
            'FacilityID', 'IncidentID', 'total_{}'.format(cost_attribute), 'shape@'
        ]
    }
    view = {
        'dataset': arcobj.DatasetView(dataset_path, kwargs['dataset_where_sql']),
        'facility': arcobj.DatasetView(facility_path, kwargs['facility_where_sql']),
    }
    with arcobj.ArcExtension('Network'):
        arcpy.na.MakeClosestFacilityLayer(
            in_network_dataset=network_path,
            out_network_analysis_layer='closest',
            impedance_attribute=cost_attribute,
            travel_from_to=(
                'travel_from' if kwargs['travel_from_facility'] else 'travel_to'
            ),
            default_cutoff=kwargs['max_cost'],
            UTurn_policy='allow_dead_ends_and_intersections_only',
            restriction_attribute_name=kwargs['restriction_attributes'],
            hierarchy='no_hierarchy',
            output_path_shape='true_lines_with_measures',
        )
        # Load facilities.
        with view['facility']:
            arcpy.na.AddFieldToAnalysisLayer(
                in_network_analysis_layer='closest',
                sub_layer='Facilities',
                field_name='facility_id',
                field_type=meta['id_field']['facility']['type'],
                field_precision=meta['id_field']['facility']['precision'],
                field_scale=meta['id_field']['facility']['scale'],
                field_length=meta['id_field']['facility']['length'],
                field_is_nullable=True,
            )
            arcpy.na.AddLocations(
                in_network_analysis_layer='closest',
                sub_layer='Facilities',
                in_table=view['facility'].name,
                field_mappings='facility_id {} #'.format(facility_id_field_name),
                append=False,
                exclude_restricted_elements=True,
            )
        facility_oid_id = attributes.id_map(
            'closest/Facilities', id_field_names='oid@', field_names='facility_id'
        )
        # Load dataset locations.
        with view['dataset']:
            arcpy.na.AddFieldToAnalysisLayer(
                in_network_analysis_layer='closest',
                sub_layer='Incidents',
                field_name='dataset_id',
                field_type=meta['id_field']['dataset']['type'],
                field_precision=meta['id_field']['dataset']['precision'],
                field_scale=meta['id_field']['dataset']['scale'],
                field_length=meta['id_field']['dataset']['length'],
                field_is_nullable=True,
            )
            arcpy.na.AddLocations(
                in_network_analysis_layer='closest',
                sub_layer='Incidents',
                in_table=view['dataset'].name,
                field_mappings='dataset_id {} #'.format(id_field_name),
                append=False,
                snap_to_position_along_network=False,
                exclude_restricted_elements=True,
            )
        dataset_oid_id = attributes.id_map(
            'closest/Incidents', id_field_names='oid@', field_names='dataset_id'
        )
        arcpy.na.Solve(
            in_network_analysis_layer='closest',
            ignore_invalids=True,
            terminate_on_solve_error=True,
        )
    cursor = arcpy.da.SearchCursor('closest/Routes', field_names=keys['cursor'])
    with cursor:
        for row in cursor:
            feat = dict(zip(keys['cursor'], row))
            yield {
                'dataset_id': dataset_oid_id[feat['IncidentID']],
                'facility_id': facility_oid_id[feat['FacilityID']],
                'cost': feat['total_' + cost_attribute],
                'geometry': feat['shape@'],
            }

    dataset.delete('closest', log_level=None)
    log("End: Generate.")
Exemplo n.º 3
0
def generate_service_rings(
    dataset_path,
    output_path,
    network_path,
    cost_attribute,
    ring_width,
    max_distance,
    **kwargs
):
    """Create facility service ring features using a network dataset.

    Args:
        dataset_path (str): Path of the dataset.
        output_path (str): Path of the output service rings dataset.
        network_path (str): Path of the network dataset.
        cost_attribute (str): Name of the network cost attribute to use.
        ring_width (float): Distance a service ring represents in travel, in the
            dataset's units.
        max_distance (float): Distance in travel from the facility the outer ring will
            extend to, in the dataset's units.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        id_field_name (str): Name of facility ID field.
        restriction_attributes (iter): Collection of network restriction attribute
            names to use.
        travel_from_facility (bool): Flag to indicate performing the analysis
            travelling from (True) or to (False) the facility. Default is False.
        detailed_features (bool): Flag to generate high-detail features. Default is
            False.
        overlap_facilities (bool): Flag to overlap different facility service areas.
            Default is True.
        trim_value (float): Dstance from the network features to trim service areas at.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the output service rings dataset.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('id_field_name')
    kwargs.setdefault('restriction_attributes')
    kwargs.setdefault('travel_from_facility', False)
    kwargs.setdefault('detailed_features', False)
    kwargs.setdefault('overlap_facilities', True)
    kwargs.setdefault('trim_value')
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log("Start: Generate service rings for %s.", dataset_path)
    # trim_value assumes meters if not input as linear_unit string.
    if kwargs['trim_value'] is not None:
        trim_value = arcobj.linear_unit_string(kwargs['trim_value'], dataset_path)
    else:
        trim_value = None
    view = {'dataset': arcobj.DatasetView(dataset_path, kwargs['dataset_where_sql'])}
    with arcobj.ArcExtension('Network'):
        arcpy.na.MakeServiceAreaLayer(
            in_network_dataset=network_path,
            out_network_analysis_layer='service_area',
            impedance_attribute=cost_attribute,
            travel_from_to=(
                'travel_from' if kwargs['travel_from_facility'] else 'travel_to'
            ),
            default_break_values=(
                ' '.join(
                    str(x) for x in range(ring_width, max_distance + 1, ring_width)
                )
            ),
            polygon_type=(
                'detailed_polys' if kwargs['detailed_features'] else 'simple_polys'
            ),
            merge=('no_merge' if kwargs['overlap_facilities'] else 'no_overlap'),
            nesting_type='rings',
            UTurn_policy='allow_dead_ends_and_intersections_only',
            restriction_attribute_name=kwargs['restriction_attributes'],
            polygon_trim=(True if trim_value else False),
            poly_trim_value=trim_value,
            hierarchy='no_hierarchy',
        )
        with view['dataset']:
            arcpy.na.AddLocations(
                in_network_analysis_layer="service_area",
                sub_layer="Facilities",
                in_table=view['dataset'].name,
                field_mappings='Name {} #'.format(kwargs['id_field_name']),
                search_tolerance=max_distance,
                match_type='match_to_closest',
                append='clear',
                snap_to_position_along_network='no_snap',
                exclude_restricted_elements=True,
            )
        arcpy.na.Solve(
            in_network_analysis_layer="service_area",
            ignore_invalids=True,
            terminate_on_solve_error=True,
        )
    dataset.copy('service_area/Polygons', output_path, log_level=None)
    dataset.delete('service_area', log_level=None)
    if kwargs['id_field_name']:
        meta = {
            'id_field': arcobj.field_metadata(dataset_path, kwargs['id_field_name'])
        }
        dataset.add_field_from_metadata(output_path, meta['id_field'], log_level=None)
        attributes.update_by_function(
            output_path,
            meta['id_field']['name'],
            function=TYPE_ID_FUNCTION_MAP[meta['id_field']['type']],
            field_as_first_arg=False,
            arg_field_names=['Name'],
            log_level=None,
        )
    log("End: Generate.")
    return output_path
Exemplo n.º 4
0
    def __init__(
        self,
        init_dataset_path,
        new_dataset_path,
        id_field_names,
        cmp_field_names=None,
        **kwargs
    ):
        """Initialize instance.

        Args:
            init_dataset_path (str): Path of initial dataset.
            new_dataset_path (str): Path of new dataset.
            id_field_names (iter): Field names used to identify a feature.
            cmp_field_names (iter): Collection of fields to compate attributes between
                datasets for differences.
            **kwargs: Arbitrary keyword arguments. See below.

        Keyword Args:
            overlay_path_fields_map (dict): Mapping of overlay path to attribute field
                names to overlay. Default is None.
            init_dataset_where_sql (str): SQL where-clause for inital dataset
                subselection. Default is None.
            new_dataset_where_sql (str): SQL where-clause for new dataset subselection.
                Default is None.
        """
        self._keys = {
            "id": list(id_field_names),
            "cmp": list(cmp_field_names) if cmp_field_names else [],
        }
        """dict: Mapping of field tag to names."""
        self._keys["load"] = self._keys["id"] + self._keys["cmp"]
        self._dataset = {
            "init": {"path": init_dataset_path},
            "new": {"path": new_dataset_path},
            "overlays": [],
        }
        """dict: Mapping of dataset tag to info about dataset."""
        for tag in self._dataset_tags:
            self._dataset[tag]["where_sql"] = kwargs.get(tag + "_dataset_where_sql")
            try:
                self._dataset[tag]["spatial_reference"] = spatial_reference(
                    self._dataset[tag]["path"]
                )
            except AttributeError:
                self._dataset[tag]["spatial_reference"] = None
            else:
                # Need to add geometry to load-keys if extant.
                self._keys["load"].append("shape@")
        for path, field_names in kwargs.get("overlay_path_fields_map", {}).items():
            self._dataset["overlays"].append({"path": path, "keys": list(field_names)})
        # Collect field metadata for diff table.
        self._diff_field_metas = [
            {"name": "diff_type", "type": "text", "length": 9},
            {"name": "description", "type": "text", "length": 64},
            {"name": "init_repr", "type": "text", "length": 255},
            {"name": "new_repr", "type": "text", "length": 255},
        ]
        """list of dicts: Diff table field metadata."""
        for id_key in self._keys["id"]:
            meta = {
                key: val
                for key, val in field_metadata(
                    self._dataset["init"]["path"], id_key
                ).items()
                if key in {"name", "type", "length", "precision", "scale"}
            }
            self._diff_field_metas.append(meta)
        self._keys["diff"] = [field["name"] for field in self._diff_field_metas]
        # Init containers.
        self._id_attr = defaultdict(dict)
        """defaultdict: Mapping of feature ID to information of attributes."""
        self.ids = {key: None for key in self._feature_diff_types}
        self.diffs = {key: None for key in self.diff_types}
        self._displacement_links = []
        """list: Representations of displacement links for the geometry diffs."""
Exemplo n.º 5
0
def coordinate_node_map(
    dataset_path,
    from_id_field_name,
    to_id_field_name,
    id_field_names=("oid@",),
    **kwargs
):
    """Return mapping of coordinates to node-info dictionary.

    Notes:
        From & to IDs must be same attribute type.
        Default output format:
            {(x, y): {"node_id": <id>, "ids": {"from": set(), "to": set()}}}

    Args:
        dataset_path (str): Path of the dataset.
        from_id_field_name (str): Name of the from-ID field.
        to_id_field_name (str): Name of the to-ID field.
        id_field_names (iter, str): Name(s) of the ID field(s).
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        update_nodes (bool): Update nodes based on feature geometries if True. Default
            is False.

    Returns:
        dict.
    """
    kwargs.setdefault("dataset_where_sql")
    kwargs.setdefault("update_nodes", False)
    meta = {
        "from_id_field": field_metadata(dataset_path, from_id_field_name),
        "to_id_field": field_metadata(dataset_path, to_id_field_name),
    }
    if meta["from_id_field"]["type"] != meta["to_id_field"]["type"]:
        raise ValueError("From- and to-ID fields must be of same type.")

    keys = {"id": list(contain(id_field_names))}
    keys["feature"] = ["shape@", from_id_field_name, to_id_field_name] + keys["id"]
    coordinate_node = {}
    for feature in as_iters(
        dataset_path, keys["feature"], dataset_where_sql=kwargs["dataset_where_sql"]
    ):
        _id = tuple(feature[3:])
        if len(keys["id"]) == 1:
            _id = _id[0]
        geom = feature[0]
        node_id = {"from": feature[1], "to": feature[2]}
        coordinate = {
            "from": (geom.firstPoint.X, geom.firstPoint.Y),
            "to": (geom.lastPoint.X, geom.lastPoint.Y),
        }
        for end in ["from", "to"]:
            if coordinate[end] not in coordinate_node:
                # Create new coordinate-node.
                coordinate_node[coordinate[end]] = {
                    "node_id": node_id[end],
                    "ids": defaultdict(set),
                }
            # Assign new ID if current is missing.
            if coordinate_node[coordinate[end]]["node_id"] is None:
                coordinate_node[coordinate[end]]["node_id"] = node_id[end]
            # Assign lower ID if different than current.
            else:
                coordinate_node[coordinate[end]]["node_id"] = min(
                    coordinate_node[coordinate[end]]["node_id"], node_id[end]
                )
            # Add feature ID to end-ID set.
            coordinate_node[coordinate[end]]["ids"][end].add(_id)
    if kwargs["update_nodes"]:
        coordinate_node = _update_coordinate_node_map(
            coordinate_node, meta["from_id_field"]
        )
    return coordinate_node
Exemplo n.º 6
0
def update_by_unique_id(dataset_path, field_name, **kwargs):
    """Update attribute values by assigning a unique ID.

    Existing IDs are preserved, if unique.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the field.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        use_edit_session (bool): Updates are done in an edit session if True. Default is
            False.
        log_level (str): Level to log the function at. Default is "info".

    Returns:
        collections.Counter: Counts for each feature action.
    """
    kwargs.setdefault("dataset_where_sql")
    kwargs.setdefault("use_edit_session", True)
    log = leveled_logger(LOG, kwargs.setdefault("log_level", "info"))
    log(
        "Start: Update attributes in %s on %s by assigning unique IDs.",
        field_name,
        dataset_path,
    )
    meta = {
        "dataset": dataset_metadata(dataset_path),
        "field": field_metadata(dataset_path, field_name),
    }
    session = Editor(meta["dataset"]["workspace_path"], kwargs["use_edit_session"])
    cursor = arcpy.da.UpdateCursor(
        in_table=dataset_path,
        field_names=[field_name],
        where_clause=kwargs["dataset_where_sql"],
    )
    with session:
        used_ids = set()
        # First run will clear duplicate IDs & gather used IDs.
        with cursor:
            for [id_value] in cursor:
                if id_value in used_ids:
                    cursor.updateRow([None])
                else:
                    used_ids.add(id_value)
        id_pool = unique_ids(
            data_type=python_type(meta["field"]["type"]),
            string_length=meta["field"].get("length"),
        )
        # Second run will fill in missing IDs.
        update_action_count = Counter()
        with cursor:
            for [id_value] in cursor:
                if id_value is not None:
                    update_action_count["unchanged"] += 1
                else:
                    id_value = next(id_pool)
                    while id_value in used_ids:
                        id_value = next(id_pool)
                    try:
                        cursor.updateRow([id_value])
                        update_action_count["altered"] += 1
                        used_ids.add(id_value)
                    except RuntimeError:
                        LOG.error("Offending value is %s", id_value)
                        raise

    for action, count in sorted(update_action_count.items()):
        log("%s attributes %s.", count, action)
    log("End: Update.")
    return update_action_count