示例#1
0
    def transform(self, transformation, **kwargs):
        """Run transform operation as defined in the workspace.

        Args:
            transformation: Function or method used to perform a transformation upon
                the transform-dataset.
            **kwargs: Arbitrary keyword arguments; passed through to the transformation.

        Returns:
            arcetl.etl.ArcETL: Reference to the instance.
        """
        # Unless otherwise stated, dataset path is self.transform_path.
        kwargs.setdefault("dataset_path", self.transform_path)
        # Add output_path to kwargs if needed.
        if "output_path" in funcsigs.signature(transformation).parameters:
            kwargs.setdefault(
                "output_path",
                unique_path(getattr(transformation, "__name__", "transform")),
            )
        transformation(**kwargs)
        # If there"s a new output, replace old transform.
        if "output_path" in funcsigs.signature(transformation).parameters:
            if dataset.is_valid(self.transform_path):
                dataset.delete(self.transform_path, log_level=None)
            self.transform_path = kwargs["output_path"]
        return self
示例#2
0
    def __init__(self, dataset_path, dataset_where_sql=None, **kwargs):
        """Initialize instance.

        Note:
            To make a temp dataset without copying any template rows:
            `dataset_where_sql="0=1"`

        Args:
            dataset_path (str): Path of dataset to copy.
            dataset_where_sql (str): SQL where-clause for dataset subselection.
            **kwargs: Arbitrary keyword arguments. See below.

        Keyword Args:
            output_path (str): Path of the dataset to create.  Default is None (auto-
                generate path)
            field_names (iter): Field names to include in copy. If field_names not
                specified, all fields will be included.
            force_nonspatial (bool): True to force a nonspatial copy, False otherwise.
                Default is False.
        """
        self.path = kwargs.get("output_path", unique_path("temp"))
        self.dataset_path = dataset_path
        self.dataset_meta = dataset_metadata(dataset_path)
        self.field_names = list(
            kwargs.get("field_names", self.dataset_meta["field_names"])
        )
        self.is_spatial = all(
            [self.dataset_meta["is_spatial"], not kwargs.get("force_nonspatial", False)]
        )
        self.where_sql = dataset_where_sql
示例#3
0
def erase(dataset_path, erase_dataset_path, **kwargs):
    """Erase feature geometry where it overlaps erase-dataset geometry.

    Args:
        dataset_path (str): Path of the dataset.
        erase_dataset_path (str): Path of the dataset defining the erase-area.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        erase_where_sql (str): SQL where-clause for erase-dataset subselection.
        tolerance (float): Tolerance for coincidence, in dataset's units.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('erase_where_sql')
    kwargs.setdefault('tolerance')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Erase features in %s where overlapping %s.",
        dataset_path,
        erase_dataset_path,
    )
    meta = {'dataset': arcobj.dataset_metadata(dataset_path)}
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql']),
        'erase': arcobj.DatasetView(erase_dataset_path,
                                    kwargs['erase_where_sql']),
    }
    temp_output_path = unique_path('output')
    with view['dataset'], view['erase']:
        arcpy.analysis.Erase(
            in_features=view['dataset'].name,
            erase_features=view['erase'].name,
            out_feature_class=temp_output_path,
            cluster_tolerance=kwargs['tolerance'],
        )
    session = arcobj.Editor(meta['dataset']['workspace_path'],
                            kwargs['use_edit_session'])
    with session:
        delete(dataset_path,
               dataset_where_sql=kwargs['dataset_where_sql'],
               log_level=None)
        insert_from_path(dataset_path,
                         insert_dataset_path=temp_output_path,
                         log_level=None)
    dataset.delete(temp_output_path, log_level=None)
    log("End: Erase.")
    return dataset_path
示例#4
0
    def extract(self):
        """Extract review features.

        Returns:
            arcetl.diff.Differ: Reference to instance.
        """
        # Clear old attributes.
        self._id_attr.clear()
        for tag in self._dataset_tags:
            feats = attributes.as_dicts(
                dataset_path=self._dataset[tag]["path"],
                field_names=self._keys["load"],
                dataset_where_sql=self._dataset[tag]["where_sql"],
                spatial_reference_item=self._dataset["init"]["spatial_reference"],
            )
            for feat in feats:
                id_val = tuple(freeze_values(*(feat[key] for key in self._keys["id"])))
                self._id_attr[tag][id_val] = feat
        # Add overlay attributes.
        for tag in self._dataset_tags:
            view = DatasetView(
                dataset_path=self._dataset[tag]["path"],
                dataset_where_sql=self._dataset[tag]["where_sql"],
                field_names=self._keys["id"],
            )
            with view:
                for overlay in self._dataset["overlays"]:
                    field_maps = arcpy.FieldMappings()
                    for path, keys in [
                        (view.name, self._keys["id"]),
                        (overlay["path"], overlay["keys"]),
                    ]:
                        for key in keys:
                            field_map = arcpy.FieldMap()
                            field_map.addInputField(path, key)
                            field_maps.addFieldMap(field_map)
                    output_path = unique_path()
                    arcpy.analysis.SpatialJoin(
                        target_features=view.name,
                        join_features=overlay["path"],
                        out_feature_class=output_path,
                        field_mapping=field_maps,
                    )
                    for feat in attributes.as_dicts(
                        output_path, field_names=(self._keys["id"] + overlay["keys"])
                    ):
                        id_val = tuple(
                            freeze_values(*(feat[key] for key in self._keys["id"]))
                        )
                        if id_val in self._id_attr[tag]:
                            for key in overlay["keys"]:
                                # Use (path, field name) for attribute key.
                                self._id_attr[tag][id_val][
                                    (overlay["path"], key)
                                ] = feat[key]
                    arcpy.management.Delete(output_path)
        return self
示例#5
0
    def extract(self, dataset_path, extract_where_sql=None):
        """Extract features to transform workspace.

        Args:
            dataset_path (str): Path of the dataset to extract.
            extract_where_sql (str): SQL where-clause for extract subselection.

        Returns:
            arcetl.etl.ArcETL: Reference to the instance.
        """
        LOG.info("Start: Extract %s.", dataset_path)
        self.transform_path = unique_path("extract")
        feature_action_count = dataset.copy(
            dataset_path=dataset_path,
            output_path=self.transform_path,
            dataset_where_sql=extract_where_sql,
            log_level=None,
        )
        for action, count in sorted(feature_action_count.items()):
            LOG.info("%s features %s.", count, action)
        LOG.info("End: Extract.")
        return self
示例#6
0
    def init_schema(self, template_path=None, **kwargs):
        """Initialize dataset schema. Use only when extracting dataset.

        Keyword arguments that describe the schema will only be referenced
        if template_path is undefined.

        Args:
            template_path (str): Path of the dataset to use as schema
                template.
            **kwargs: Arbitrary keyword arguments. See below.

        Keyword Args:
            field_metadata_list (iter): Field metadata mappings. Will be ignored if
                template_path used.
            geometry_type (str): NGeometry type. Valid types are: point, multipoint,
                polygon, polyline. If unstated or another value, dataset will be
                nonspatial. Will be ignored if template_path used.
            spatial_reference_item: Item from which the spatial reference of the output
                geometry will be derived. Default is 4326 (EPSG code for unprojected
                WGS84).  Will be ignored if template_path used.

        Returns:
            arcetl.etl.ArcETL: Reference to the instance.
        """
        LOG.info("Start: Initialize schema.")
        self.transform_path = unique_path("init")
        if template_path:
            dataset.copy(
                dataset_path=template_path,
                output_path=self.transform_path,
                schema_only=True,
                log_level=None,
            )
        else:
            dataset.create(dataset_path=self.transform_path, log_level=None, **kwargs)
        LOG.info("End: Initialize.")
        return self
示例#7
0
def eliminate_interior_rings(dataset_path,
                             max_area=None,
                             max_percent_total_area=None,
                             **kwargs):
    """Eliminate interior rings of polygon features.

    Note:
        If no value if provided for either max_area or max_percent_total_area, (nearly)
        all interior rings will be removed. Technically, max_percent_total_area will be
        set to 99.9999.

    Args:
        dataset_path (str): Path of the dataset.
        max_area (float, str): Maximum area which parts smaller than are eliminated.
            Numeric area will be in dataset's units. String area will be formatted as
            '{number} {unit}'.
        max_percent_total_area (float): Maximum percent of total area which parts
            smaller than are eliminated.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log("Start: Eliminate interior rings in %s.", dataset_path)
    # Only set max_percent_total_area default if neither it or area defined.
    if all([max_area is None, max_percent_total_area is None]):
        max_percent_total_area = 99.9999
    if all([max_area is not None, max_percent_total_area is not None]):
        condition = 'area_or_percent'
    elif max_area is not None:
        condition = 'area'
    else:
        condition = 'percent'
    meta = {'dataset': arcobj.dataset_metadata(dataset_path)}
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql'])
    }
    temp_output_path = unique_path('output')
    with view['dataset']:
        arcpy.management.EliminatePolygonPart(
            in_features=view['dataset'].name,
            out_feature_class=temp_output_path,
            condition=condition,
            part_area=max_area,
            part_area_percent=max_percent_total_area,
            part_option='contained_only',
        )
    session = arcobj.Editor(meta['dataset']['workspace_path'],
                            kwargs['use_edit_session'])
    with session:
        delete(dataset_path,
               dataset_where_sql=kwargs['dataset_where_sql'],
               log_level=None)
        insert_from_path(dataset_path,
                         insert_dataset_path=temp_output_path,
                         log_level=None)
    dataset.delete(temp_output_path, log_level=None)
    log("End: Eliminate.")
    return dataset_path
示例#8
0
def dissolve(dataset_path,
             dissolve_field_names=None,
             multipart=True,
             **kwargs):
    """Dissolve geometry of features that share values in given fields.

    Args:
        dataset_path (str): Path of the dataset.
        dissolve_field_names (iter): Iterable of field names to dissolve on.
        multipart (bool): Flag to allow multipart features in output.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        unsplit_lines (bool): Flag to merge line features when endpoints meet without
            crossing features. Default is False.
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        tolerance (float): Tolerance for coincidence, in dataset's units.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('unsplit_lines', False)
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Dissolve features in %s on fields: %s.",
        dataset_path,
        dissolve_field_names,
    )
    meta = {
        'dataset': arcobj.dataset_metadata(dataset_path),
        'orig_tolerance': arcpy.env.XYTolerance,
    }
    keys = {'dissolve': tuple(contain(dissolve_field_names))}
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql'])
    }
    temp_output_path = unique_path('output')
    with view['dataset']:
        if 'tolerance' in kwargs:
            arcpy.env.XYTolerance = kwargs['tolerance']
        arcpy.management.Dissolve(
            in_features=view['dataset'].name,
            out_feature_class=temp_output_path,
            dissolve_field=keys['dissolve'],
            multi_part=multipart,
            unsplit_lines=kwargs['unsplit_lines'],
        )
        if 'tolerance' in kwargs:
            arcpy.env.XYTolerance = meta['orig_tolerance']
    session = arcobj.Editor(meta['dataset']['workspace_path'],
                            kwargs['use_edit_session'])
    with session:
        delete(dataset_path,
               dataset_where_sql=kwargs['dataset_where_sql'],
               log_level=None)
        insert_from_path(dataset_path,
                         insert_dataset_path=temp_output_path,
                         log_level=None)
    dataset.delete(temp_output_path, log_level=None)
    log("End: Dissolve.")
    return dataset_path
示例#9
0
def id_near_info_map(dataset_path,
                     dataset_id_field_name,
                     near_dataset_path,
                     near_id_field_name,
                     max_near_distance=None,
                     **kwargs):
    """Return mapping dictionary of feature IDs/near-feature info.

    Args:
        dataset_path (str): Path of the dataset.
        dataset_id_field_name (str): Name of ID field.
        near_dataset_path (str): Path of the near-dataset.
        near_id_field_name (str): Name of the near ID field.
        max_near_distance (float): Maximum distance to search for near-features, in
            units of the dataset's spatial reference.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        near_where_sql (str): SQL where-clause for near-dataset subselection.
        near_rank (int): Nearness rank of the feature to map info for. Default is 1.

    Returns:
        dict: Mapping of the dataset ID to a near-feature info dictionary.
            Info dictionary keys: 'id', 'near_id', 'rank', 'distance',
            'angle', 'near_x', 'near_y'.
            'distance' value (float) will match linear unit of the dataset's
            spatial reference.
            'angle' value (float) is in decimal degrees.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('near_where_sql')
    kwargs.setdefault('near_rank', 1)
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql']),
        'near': arcobj.DatasetView(near_dataset_path,
                                   kwargs['near_where_sql']),
    }
    with view['dataset'], view['near']:
        temp_near_path = unique_path('near')
        arcpy.analysis.GenerateNearTable(
            in_features=view['dataset'].name,
            near_features=view['near'].name,
            out_table=temp_near_path,
            search_radius=max_near_distance,
            location=True,
            angle=True,
            closest=False,
            closest_count=kwargs['near_rank'],
        )
        oid_id_map = attributes.id_map(view['dataset'].name, 'oid@',
                                       dataset_id_field_name)
        near_oid_id_map = attributes.id_map(view['near'].name, 'oid@',
                                            near_id_field_name)
    field_names = [
        'in_fid', 'near_fid', 'near_dist', 'near_angle', 'near_x', 'near_y',
        'near_rank'
    ]
    near_info_map = {}
    for near_info in attributes.as_dicts(temp_near_path, field_names):
        if near_info['near_rank'] == kwargs['near_rank']:
            _id = oid_id_map[near_info['in_fid']]
            near_info_map[_id] = {
                'id': _id,
                'near_id': near_oid_id_map[near_info['near_fid']],
                'rank': near_info['near_rank'],
                'distance': near_info['near_dist'],
                'angle': near_info['near_angle'],
                'near_x': near_info['near_x'],
                'near_y': near_info['near_y'],
            }
    dataset.delete(temp_near_path, log_level=None)
    return near_info_map
示例#10
0
def union(dataset_path, field_name, union_dataset_path, union_field_name,
          **kwargs):
    """Assign union attribute to features, splitting where necessary.

    Note:
        This function has a 'chunking' loop routine in order to avoid an unhelpful
        output error that occurs when the inputs are rather large. For some reason the
        identity will 'succeed' with an empty output warning, but not create an output
        dataset. Running the identity against smaller sets of data generally avoids
        this conundrum.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the dataset's field to assign to.
        union_dataset_path (str): Path of the union dataset.
        union_field_name (str): Name of union dataset's field with values to assign.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        chunk_size (int): Number of features to process per loop. Default is 4096.
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        union_where_sql (str): SQL where-clause for the union dataset subselection.
        replacement_value: Value to replace overlay field values with.
        tolerance (float): Tolerance for coincidence, in dataset's units.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('chunk_size', 4096)
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('union_where_sql')
    kwargs.setdefault('tolerance')
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Union-set attributes in %s on %s by overlay values in %s on %s.",
        field_name,
        dataset_path,
        union_field_name,
        union_dataset_path,
    )
    if 'replacement_value' in kwargs and kwargs[
            'replacement_value'] is not None:
        update_function = (lambda x: kwargs['replacement_value']
                           if x else None)
    else:
        # Union puts empty string when identity feature not present.
        # Fix to null (replacement value function does this inherently).
        update_function = (lambda x: None if x == '' else x)
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql'])
    }
    # Create a temporary copy of the union dataset.
    temp_union = arcobj.TempDatasetCopy(union_dataset_path,
                                        kwargs['union_where_sql'],
                                        field_names=[union_field_name])
    with view['dataset'], temp_union:
        # Avoid field name collisions with neutral field name.
        temp_union.field_name = dataset.rename_field(
            temp_union.path,
            union_field_name,
            new_field_name=unique_name(union_field_name),
            log_level=None,
        )
        for view['chunk'] in view['dataset'].as_chunks(kwargs['chunk_size']):
            temp_output_path = unique_path('output')
            arcpy.analysis.Union(
                in_features=[view['chunk'].name, temp_union.path],
                out_feature_class=temp_output_path,
                join_attributes='all',
                cluster_tolerance=kwargs['tolerance'],
                gaps=False,
            )
            # Clean up bad or null geometry created in processing.
            arcpy.management.RepairGeometry(temp_output_path)
            # Push identity (or replacement) value from temp to update field.
            attributes.update_by_function(
                temp_output_path,
                field_name,
                update_function,
                field_as_first_arg=False,
                arg_field_names=[temp_union.field_name],
                log_level=None,
            )
            # Replace original chunk features with new features.
            features.delete(view['chunk'].name, log_level=None)
            features.insert_from_path(dataset_path,
                                      temp_output_path,
                                      log_level=None)
            dataset.delete(temp_output_path, log_level=None)
    log("End: Union.")
    return dataset_path
示例#11
0
def overlay(dataset_path, field_name, overlay_dataset_path, overlay_field_name,
            **kwargs):
    """Assign overlay attribute to features, splitting where necessary.

    Note:
        Only one overlay flag at a time can be used. If mutliple are set to True, the
        first one referenced in the code will be used. If no overlay flags are set, the
        operation will perform a basic intersection check, and the result will be at
        the whim of the geoprocessing environment's merge rule for the update field.

        This function has a 'chunking' loop routine in order to avoid an unhelpful
        output error that occurs when the inputs are rather large. For some reason the
        identity will 'succeed' with an empty output warning, but not create an output
        dataset. Running the identity against smaller sets of data generally avoids this
        conundrum.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the dataset's field to assign to.
        overlay_dataset_path (str): Path of the overlay dataset.
        overlay_field_name (str): Name of overlay dataset's field with values to
            assign.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        chunk_size (int): Number of features to process per loop. Default is 4096.
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        overlay_central_coincident (bool): Flag to overlay the centrally-coincident
            value. Default is False.
        overlay_most_coincident (bool): Flag to overlay the most coincident value.
            Default is False.
        overlay_where_sql (str): SQL where-clause for the overlay dataset subselection.
        replacement_value: Value to replace overlay field values with.
        tolerance (float): Tolerance for coincidence, in dataset's units.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('chunk_size', 4096)
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('overlay_central_coincident', False)
    kwargs.setdefault('overlay_most_coincident', False)
    kwargs.setdefault('overlay_where_sql')
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log(
        "Start: Overlay-set attributes in %s on %s by overlay values in %s on %s.",
        field_name,
        dataset_path,
        overlay_field_name,
        overlay_dataset_path,
    )
    # Check flags & set details for spatial join call.
    join_kwargs = {
        'join_operation': 'join_one_to_many',
        'join_type': 'keep_all'
    }
    if kwargs['overlay_central_coincident']:
        join_kwargs['match_option'] = 'have_their_center_in'
    elif kwargs['overlay_most_coincident']:
        raise NotImplementedError(
            "overlay_most_coincident not yet implemented.")

    else:
        join_kwargs['match_option'] = 'intersect'
    if 'replacement_value' in kwargs and kwargs[
            'replacement_value'] is not None:
        update_function = (lambda x: kwargs['replacement_value']
                           if x else None)
    else:
        update_function = (lambda x: x)
    meta = {'orig_tolerance': arcpy.env.XYTolerance}
    view = {
        'dataset': arcobj.DatasetView(dataset_path,
                                      kwargs['dataset_where_sql'])
    }
    # Create temporary copy of overlay dataset.
    temp_overlay = arcobj.TempDatasetCopy(
        overlay_dataset_path,
        kwargs['overlay_where_sql'],
        field_names=[overlay_field_name],
    )
    with view['dataset'], temp_overlay:
        # Avoid field name collisions with neutral field name.
        temp_overlay.field_name = dataset.rename_field(
            temp_overlay.path,
            overlay_field_name,
            new_field_name=unique_name(overlay_field_name),
            log_level=None,
        )
        if 'tolerance' in kwargs:
            arcpy.env.XYTolerance = kwargs['tolerance']
        for view['chunk'] in view['dataset'].as_chunks(kwargs['chunk_size']):
            temp_output_path = unique_path('output')
            arcpy.analysis.SpatialJoin(target_features=view['chunk'].name,
                                       join_features=temp_overlay.path,
                                       out_feature_class=temp_output_path,
                                       **join_kwargs)
            # Clean up bad or null geometry created in processing.
            arcpy.management.RepairGeometry(temp_output_path)
            # Push identity (or replacement) value from temp to update field.
            attributes.update_by_function(
                temp_output_path,
                field_name,
                update_function,
                field_as_first_arg=False,
                arg_field_names=[temp_overlay.field_name],
                log_level=None,
            )
            # Replace original chunk features with new features.
            features.delete(view['chunk'].name, log_level=None)
            features.insert_from_path(dataset_path,
                                      temp_output_path,
                                      log_level=None)
            dataset.delete(temp_output_path, log_level=None)
        if 'tolerance' in kwargs:
            arcpy.env.XYTolerance = meta['orig_tolerance']
    log("End: Overlay.")
    return dataset_path
示例#12
0
def update_by_overlay(
    dataset_path, field_name, overlay_dataset_path, overlay_field_name, **kwargs
):
    """Update attribute values by finding overlay feature value.

    Note:
        Since only one value will be selected in the overlay, operations with multiple
        overlaying features will respect the geoprocessing environment merge rule. This
        rule generally defaults to the value of the "first" feature.

        Only one overlay flag at a time can be used (e.g. "overlay_most_coincident",
        "overlay_central_coincident"). If multiple are set to True, the first one
        referenced in the code will be used. If no overlay flags are set, the operation
        will perform a basic intersection check, and the result will be at the whim of
        the geoprocessing environment merge rule for the update field.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the field.
        overlay_dataset_path (str): Path of the overlay-dataset.
        overlay_field_name (str): Name of the overlay-field.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        overlay_central_coincident (bool): Overlay will use the centrally-coincident
            value if True. Default is False.
        overlay_most_coincident (bool): Overlay will use the most coincident value if
            True. Default is False.
        overlay_where_sql (str): SQL where-clause for overlay dataset subselection.
        replacement_value: Value to replace a present overlay-field value with.
        tolerance (float): Tolerance for coincidence, in units of the dataset.
        use_edit_session (bool): Updates are done in an edit session if True. Default is
            False.
        log_level (str): Level to log the function at. Default is "info".

    Returns:
        collections.Counter: Counts for each feature action.
    """
    kwargs.setdefault("dataset_where_sql")
    kwargs.setdefault("overlay_central_coincident", False)
    kwargs.setdefault("overlay_most_coincident", False)
    kwargs.setdefault("overlay_where_sql")
    kwargs.setdefault("use_edit_session", False)
    log = leveled_logger(LOG, kwargs.setdefault("log_level", "info"))
    log(
        "Start: Update attributes in %s on %s by overlay values in %s on %s.",
        field_name,
        dataset_path,
        overlay_field_name,
        overlay_dataset_path,
    )
    meta = {
        "dataset": dataset_metadata(dataset_path),
        "original_tolerance": arcpy.env.XYTolerance,
    }
    join_kwargs = {"join_operation": "join_one_to_many", "join_type": "keep_all"}
    if kwargs["overlay_central_coincident"]:
        join_kwargs["match_option"] = "have_their_center_in"
    ##TODO: Implement overlay_most_coincident.
    elif kwargs["overlay_most_coincident"]:
        raise NotImplementedError("overlay_most_coincident not yet implemented.")

    # else:
    #     join_kwargs["match_option"] = "intersect"
    dataset_view = DatasetView(dataset_path, kwargs["dataset_where_sql"])
    overlay_copy = TempDatasetCopy(
        overlay_dataset_path,
        kwargs["overlay_where_sql"],
        field_names=[overlay_field_name],
    )
    with dataset_view, overlay_copy:
        # Avoid field name collisions with neutral name.
        overlay_copy.field_name = dataset.rename_field(
            overlay_copy.path,
            overlay_field_name,
            new_field_name=unique_name(overlay_field_name),
            log_level=None,
        )
        if "tolerance" in kwargs:
            arcpy.env.XYTolerance = kwargs["tolerance"]
        # Create temp output of the overlay.
        temp_output_path = unique_path("output")
        arcpy.analysis.SpatialJoin(
            target_features=dataset_view.name,
            join_features=overlay_copy.path,
            out_feature_class=temp_output_path,
            **join_kwargs
        )
        if "tolerance" in kwargs:
            arcpy.env.XYTolerance = meta["original_tolerance"]
    # Push overlay (or replacement) value from output to update field.
    if "replacement_value" in kwargs and kwargs["replacement_value"] is not None:
        function = lambda x: kwargs["replacement_value"] if x else None
    else:
        function = lambda x: x
    update_by_function(
        temp_output_path,
        field_name,
        function,
        field_as_first_arg=False,
        arg_field_names=[overlay_copy.field_name],
        log_level=None,
    )
    # Update values in original dataset.
    update_action_count = update_by_joined_value(
        dataset_path,
        field_name,
        join_dataset_path=temp_output_path,
        join_field_name=field_name,
        on_field_pairs=[(meta["dataset"]["oid_field_name"], "target_fid")],
        dataset_where_sql=kwargs["dataset_where_sql"],
        use_edit_session=kwargs["use_edit_session"],
        log_level=None,
    )
    dataset.delete(temp_output_path, log_level=None)
    for action, count in sorted(update_action_count.items()):
        log("%s attributes %s.", count, action)
    log("End: Update.")
    return update_action_count