def update_attributes_by_function(dataset_path, field_name, function,
                                  **kwargs):
    """Update attribute values by passing them to a function.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the field.
        function (types.FunctionType): Function to get values from.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        arg_field_names (iter): Iterable of the field names whose values will
            be the method arguments (not including the primary field).
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        field_as_first_arg (bool): Flag to indicate the field value will be
            the first argument for the method. Defaults to True.
        kwarg_field_names (iter): Iterable of the field names whose names &
            values will be the method keyword arguments.
        log_level (str): Level to log the function at. Defaults to 'info'.
        use_edit_session (bool): Flag to perform updates in an edit session.
            Default is False.

    Returns:
        str: Name of the field updated.

    """
    log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
    log("Start: Update attributes in %s on %s by function %s.", field_name,
        dataset_path, function)
    field_names = {
        'args': tuple(kwargs.get('arg_field_names', ())),
        'kwargs': tuple(kwargs.get('kwarg_field_names', ()))
    }
    field_names['row'] = ((field_name, ) + field_names['args'] +
                          field_names['kwargs'])
    args_idx = len(field_names['args']) + 1
    session = Editor(
        dataset_metadata(dataset_path)['workspace_path'],
        kwargs.get('use_edit_session', False))
    cursor = arcpy.da.UpdateCursor(dataset_path, field_names['row'],
                                   kwargs.get('dataset_where_sql'))
    with session, cursor:
        for row in cursor:
            func_args = (row[0:args_idx] if kwargs.get(
                'field_as_first_arg', True) else row[1:args_idx])
            func_kwargs = dict(zip(field_names['kwargs'], row[args_idx:]))
            new_value = function(*func_args, **func_kwargs)
            if row[0] != new_value:
                try:
                    cursor.updateRow([new_value] + row[1:])
                except RuntimeError:
                    LOG.error("Offending value is %s", new_value)
                    raise RuntimeError
    log("End: Update.")
    return field_name
def update_attributes_by_mapping(dataset_path, field_name, mapping,
                                 key_field_names, **kwargs):
    """Update attribute values by finding them in a mapping.

    Note:
        Wraps update_by_function.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the field.
        mapping (object): Mapping to get values from.
        key_field_names (iter): Name of the fields whose values will be the mapping's
            keys.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        default_value: Value to return from mapping if key value on feature not
            present. Defaults to None.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Defaults to 'info'.

    Returns:
        str: Name of the field updated.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('default_value')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
    log("Start: Update attributes in %s on %s by mapping with key(s) in %s.",
        field_name, dataset_path, key_field_names)
    keys = tuple(contain(key_field_names))
    session = Editor(
        dataset_metadata(dataset_path)['workspace_path'],
        kwargs['use_edit_session'])
    cursor = arcpy.da.UpdateCursor(dataset_path, (field_name, ) + keys,
                                   kwargs['dataset_where_sql'])
    with session, cursor:
        for row in cursor:
            old_value = row[0]
            key = row[1] if len(keys) == 1 else tuple(row[1:])
            new_value = mapping.get(key, kwargs['default_value'])
            if old_value != new_value:
                try:
                    cursor.updateRow([new_value] + row[1:])
                except RuntimeError:
                    LOG.error("Offending value is %s", new_value)
                    raise RuntimeError
    log("End: Update.")
    return field_name
def update_attributes_by_node_ids(dataset_path, from_id_field_name,
                                  to_id_field_name, **kwargs):
    """Update attribute values by passing them to a function.

    Args:
        dataset_path (str): Path of the dataset.
        from_id_field_name (str): Name of the from-ID field.
        to_id_field_name (str): Name of the to-ID field.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Defaults to 'info'.

    Returns:
        tuple: Names (str) of the fields updated.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
    log("Start: Update attributes in %s on %s by node IDs.",
        (from_id_field_name, to_id_field_name), dataset_path)
    oid_nodes = id_node_map(dataset_path,
                            from_id_field_name,
                            to_id_field_name,
                            field_names_as_keys=True,
                            update_nodes=True)
    session = Editor(
        dataset_metadata(dataset_path)['workspace_path'],
        kwargs['use_edit_session'])
    cursor = arcpy.da.UpdateCursor(
        dataset_path,
        field_names=('oid@', from_id_field_name, to_id_field_name),
        where_clause=kwargs['dataset_where_sql'],
    )
    with session, cursor:
        for row in cursor:
            oid = row[0]
            new_row = (oid, oid_nodes[oid][from_id_field_name],
                       oid_nodes[oid][to_id_field_name])
            if row != new_row:
                try:
                    cursor.updateRow(new_row)
                except RuntimeError:
                    LOG.error("Offending values one of %s, %s", new_row[1],
                              new_row[2])
                    raise RuntimeError
    log("End: Update.")
    return (from_id_field_name, to_id_field_name)
def insert_features_from_iters(dataset_path, insert_features, field_names,
                               **kwargs):
    """Insert features into dataset from iterables.

    Args:
        dataset_path (str): Path of the dataset.
        insert_features (iter of iter): Collection of iterables representing
            features.
        field_names (iter): Collection of field names to insert. These must
            match the order of their attributes in the insert_features items.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        use_edit_session (bool): Flag to perform updates in an edit session.
            Default is False.
        log_level (str): Level to log the function at. Defaults to 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
    log("Start: Insert features into %s from iterables.", dataset_path)
    meta = {'dataset': dataset_metadata(dataset_path)}
    keys = {'row': tuple(contain(field_names))}
    if inspect.isgeneratorfunction(insert_features):
        insert_features = insert_features()
    session = Editor(meta['dataset']['workspace_path'],
                     kwargs['use_edit_session'])
    cursor = arcpy.da.InsertCursor(dataset_path, field_names=keys['row'])
    feature_count = Counter()
    with session, cursor:
        for row in insert_features:
            cursor.insertRow(tuple(row))
            feature_count['inserted'] += 1
    log("%s features inserted.", feature_count['inserted'])
    log("End: Insert.")
    return feature_count
def insert_features_from_dicts(dataset_path, insert_features, field_names,
                               **kwargs):
    """Insert features into dataset from dictionaries.

    Args:
        dataset_path (str): Path of the dataset.
        insert_features (iter of dict): Collection of dictionaries
            representing features.
        field_names (iter): Collection of field names/keys to insert.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        use_edit_session (bool): Flag to perform updates in an edit session.
            Default is False.
        log_level (str): Level to log the function at. Defaults to 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
    log("Start: Insert features into %s from dictionaries.", dataset_path)
    keys = {'row': tuple(contain(field_names))}
    if inspect.isgeneratorfunction(insert_features):
        insert_features = insert_features()
    iters = ((feature[key] for key in keys['row'])
             for feature in insert_features)
    feature_count = insert_features_from_iters(
        dataset_path,
        iters,
        field_names,
        use_edit_session=kwargs['use_edit_session'],
        log_level=None,
    )
    log("%s features inserted.", feature_count['inserted'])
    log("End: Insert.")
    return feature_count
def insert_features_from_path(dataset_path,
                              insert_dataset_path,
                              field_names=None,
                              **kwargs):
    """Insert features into dataset from another dataset.

    Args:
        dataset_path (str): Path of the dataset.
        insert_dataset_path (str): Path of dataset to insert features from.
        field_names (iter): Collection of field names to insert. Listed field must be
            present in both datasets. If field_names is None, all fields will be
            inserted.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        insert_where_sql (str): SQL where-clause for insert-dataset subselection.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Defaults to 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('insert_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
    log("Start: Insert features into %s from %s.", dataset_path,
        insert_dataset_path)
    meta = {
        'dataset': dataset_metadata(dataset_path),
        'insert': dataset_metadata(insert_dataset_path)
    }
    if field_names is None:
        keys = set.intersection(*(set(
            name.lower() for name in _meta['field_names_tokenized'])
                                  for _meta in meta.values()))
    else:
        keys = set(name.lower() for name in contain(field_names))
    # OIDs & area/length "fields" have no business being part of an update.
    # Geometry itself is handled separately in append function.
    for _meta in meta.values():
        for key in chain(*_meta['field_token'].items()):
            keys.discard(key)
    append_kwargs = {
        'inputs': unique_name('view'),
        'target': dataset_path,
        'schema_type': 'no_test',
        'field_mapping': arcpy.FieldMappings()
    }
    # Create field maps.
    # ArcGIS Pro's no-test append is case-sensitive (verified 1.0-1.1.1).
    # Avoid this problem by using field mapping.
    # BUG-000090970 - ArcGIS Pro 'No test' field mapping in Append tool does
    # not auto-map to the same field name if naming convention differs.
    for key in keys:
        field_map = arcpy.FieldMap()
        field_map.addInputField(insert_dataset_path, key)
        append_kwargs['field_mapping'].addFieldMap(field_map)
    view = DatasetView(
        insert_dataset_path,
        kwargs['insert_where_sql'],
        view_name=append_kwargs['inputs'],
        # Must be nonspatial to append to nonspatial table.
        force_nonspatial=(not meta['dataset']['is_spatial']))
    session = Editor(meta['dataset']['workspace_path'],
                     kwargs['use_edit_session'])
    with view, session:
        arcpy.management.Append(**append_kwargs)
        feature_count = Counter({'inserted': view.count})
    log("%s features inserted.", feature_count['inserted'])
    log("End: Insert.")
    return feature_count
def delete_features(dataset_path, **kwargs):
    """Delete features in the dataset.

    Args:
        dataset_path (str): Path of the dataset.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        use_edit_session (bool): Flag to perform updates in an edit session.
            Default is False.
        log_level (str): Level to log the function at. Defaults to 'info'.

    Returns:
        str: Path of the dataset updated.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('use_edit_session', False)
    log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
    if kwargs['dataset_where_sql']:
        log("Start: Delete features from %s where `%s`.", dataset_path,
            kwargs['dataset_where_sql'])
    else:
        log("Start: Delete features from %s.", dataset_path)
    meta = {'dataset': dataset_metadata(dataset_path)}
    truncate_error_codes = (
        # "Only supports Geodatabase tables and feature classes."
        'ERROR 000187',
        # "Operation not supported on a versioned table."
        'ERROR 001259',
        # "Operation not supported on table {table name}."
        'ERROR 001260',
        # Operation not supported on a feature class in a controller dataset.
        'ERROR 001395',
    )
    # Can use (faster) truncate when no sub-selection or edit session.
    run_truncate = (kwargs['dataset_where_sql'] is None
                    and kwargs['use_edit_session'] is False)
    feature_count = Counter()
    if run_truncate:
        feature_count['deleted'] = dataset_feature_count(dataset_path)
        feature_count['unchanged'] = 0
        try:
            arcpy.management.TruncateTable(in_table=dataset_path)
        except arcpy.ExecuteError:
            # Avoid arcpy.GetReturnCode(); error code position inconsistent.
            # Search messages for 'ERROR ######' instead.
            if any(code in arcpy.GetMessages()
                   for code in truncate_error_codes):
                LOG.debug("Truncate unsupported; will try deleting rows.")
                run_truncate = False
            else:
                raise
    if not run_truncate:
        view = {
            'dataset': DatasetView(dataset_path, kwargs['dataset_where_sql'])
        }
        session = Editor(meta['dataset']['workspace_path'],
                         kwargs['use_edit_session'])
        with view['dataset'], session:
            feature_count['deleted'] = view['dataset'].count
            arcpy.management.DeleteRows(in_rows=view['dataset'].name)
        feature_count['unchanged'] = dataset_feature_count(dataset_path)
    for key in ('deleted', 'unchanged'):
        log("%s features %s.", feature_count[key], key)
    log("End: Delete.")
    return feature_count
def update_attributes_by_unique_id(dataset_path, field_name, **kwargs):
    """Update attribute values by assigning a unique ID.

    Existing IDs are preserved, if unique.

    Args:
        dataset_path (str): Path of the dataset.
        field_name (str): Name of the field.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        use_edit_session (bool): Flag to perform updates in an edit session. Default is
            False.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        dict: Mapping of new IDs to existing old IDs.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('use_edit_session', True)
    log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
    log("Start: Update attributes in %s on %s by assigning unique IDs.",
        field_name, dataset_path)
    meta = {'field': field_metadata(dataset_path, field_name)}

    def _corrected_id(current_id,
                      unique_id_pool,
                      used_ids,
                      ignore_nonetype=False):
        """Return corrected ID to ensure uniqueness."""
        if any((ignore_nonetype and current_id is None, current_id
                not in used_ids)):
            corrected_id = current_id
        else:
            corrected_id = next(unique_id_pool)
            while corrected_id in used_ids:
                corrected_id = next(unique_id_pool)
        return corrected_id

    unique_id_pool = unique_ids(data_type=python_type(meta['field']['type']),
                                string_length=meta['field'].get('length', 16))
    oid_id = id_attributes_map(dataset_path,
                               id_field_names='oid@',
                               field_names=field_name)
    used_ids = set()
    new_old_id = {}
    # Ensure current IDs are unique.
    for oid, current_id in oid_id.items():
        _id = _corrected_id(current_id,
                            unique_id_pool,
                            used_ids,
                            ignore_nonetype=True)
        if _id != current_id:
            new_old_id[_id] = oid_id[oid]
            oid_id[oid] = _id
        used_ids.add(_id)
    # Take care of unassigned IDs now that we know all the used IDs.
    for oid, current_id in oid_id.items():
        if current_id is None:
            _id = _corrected_id(current_id,
                                unique_id_pool,
                                used_ids,
                                ignore_nonetype=False)
        oid_id[oid] = _id
        used_ids.add(_id)
    update_attributes_by_mapping(
        dataset_path,
        field_name,
        mapping=oid_id,
        key_field_names='oid@',
        dataset_where_sql=kwargs.get('dataset_where_sql'),
        use_edit_session=kwargs.get('use_edit_session', False),
        log_level=None)
    log("End: Update.")
    return new_old_id