Пример #1
0
    class _Maker(object):
        def __init__(self, senior_maker, maker_creation_kwargs):
            # type: (ConverterExtendedMaker, KwargsDict) -> None
            self._senior_maker = senior_maker
            self._maker_creation_kwargs = maker_creation_kwargs

        __repr__ = attr_repr('_senior_maker', '_maker_creation_kwargs')

        # `ConverterMaker`-specific stuff:
        def __call__(self, **maker_call_kwargs):
            # type: (...) -> Converter
            actual_kwargs = dict(self._maker_creation_kwargs)
            actual_kwargs.update(maker_call_kwargs)
            return self._senior_maker(**actual_kwargs)

        # `ConverterExtendedMaker`-specific stuff:
        @property
        def accept_kwargs_prescribed_for(self):
            # type: () -> ConverterExtendedMaker
            return self._senior_maker

        def maker(self, **junior_maker_creation_kwargs):
            # type: (...) -> ConverterExtendedMaker
            return self.__class__(
                senior_maker=self,
                maker_creation_kwargs=junior_maker_creation_kwargs)

        # `BaseConverter.maker()`-result-specific stuff:
        @property
        def provided_kwargs(self):
            # type: () -> KwargsDict
            provided_kwargs_from_senior = getattr(self._senior_maker,
                                                  'provided_kwargs', {})
            return dict(provided_kwargs_from_senior,
                        **self._maker_creation_kwargs)
Пример #2
0
        class Spam(Foo):
            __repr__ = attr_repr('bar', 'foo', 'spam', 'ham')

            spam = 42
            ham = 43
            blabla = 'blablaaaa'

            def __init__(self):
                self.ham = 44
                self.bar = 'bar'
                self.huhuhu = 'huhuhuuuu'
Пример #3
0
class RenderFrom(object):
    """
    TODO: docs
    """
    def __init__(self, template_name):
        self._template_name = template_name

    @property
    def template_name(self):
        # type: () -> String
        return self._template_name

    __repr__ = attr_repr('template_name')
class PlainCell(object):
    def __init__(self, value):
        # type: (Value) -> None
        self._output = value

    __repr__ = attr_repr('output')

    @property
    def output(self):
        # type: () -> Value
        return self._output

    def __add__(self, other):
        # type: (Cell) -> Cell
        if isinstance(other, StemCell):
            return self
        raise DataConversionError('data duplication detected')

    __radd__ = __add__
class MultiCell(object):
    def __init__(
            self,
            *values,  # type: Value
            component_cells=(),  # type: Iterable[MultiCell]
            output_as=list,  # type: Callable[[Iterator[Value]], Any]
    ):
        self._own_values = values  # type: Tuple[Value, ...]
        self._component_cells = tuple(
            component_cells)  # type: Tuple[MultiCell, ...]
        self._output_as = output_as  # type: Callable[[Iterator[Value]], Any]

    __repr__ = attr_repr('_own_values', '_component_cells', '_output_as')

    @classmethod
    def with_output_as(cls, output_as):
        # type: (Callable[[Iterator[Value]], Any]) -> Callable[[Value], Cell]
        return functools.partial(cls, output_as=output_as)

    @reify
    def output(self):
        # type: () -> Any
        return self._output_as(self._iter_all_values())

    def __add__(self, other):
        # type: (Cell) -> Cell
        if isinstance(other, MultiCell):
            return self._merge(self, other)
        return NotImplemented

    def __radd__(self, other):
        # type: (Cell) -> Cell
        if isinstance(other, MultiCell):
            return self._merge(other, self)
        return NotImplemented

    #
    # Private helpers

    def _iter_all_values(self):
        # type: () -> Iterator[Value]

        # Note: a recursive implementation would be more concise and elegant
        # but it would be prone to "maximum recursion depth exceeded" errors.

        _partial = functools.partial

        class StackItem(object):
            def __init__(self, cell):
                # type: (MultiCell) -> None
                self.cell = cell  # type: MultiCell  # (<- only for assertions)
                self.get_unvisited_component_cell = _partial(
                    next, iter(cell._component_cells),
                    None)  # type: Callable[[], Optional[MultiCell]]
                self.own_values = cell._own_values  # type: Tuple[Value, ...]

        stack = []
        si = StackItem(self)
        while True:
            component_cell = si.get_unvisited_component_cell()
            if component_cell is not None:
                stack.append(si)
                si = StackItem(component_cell)
            else:
                for value in si.own_values:
                    yield value
                if not stack:
                    break
                si = stack.pop()
        assert si.cell is self
        assert not stack

    @classmethod
    def _merge(cls, *component_cells):
        # type: (*MultiCell) -> MultiCell
        # Note: here, actually, we do not concatenate the value
        # collections kept by the cells being merged but only store,
        # within the newly created cell, references to these cells;
        # the actual concatenation is deferred until the first
        # retrieval of the `output` attribute is performed (see:
        # the `output` and `_iter_all_values()` definitions).
        # We shaped it this way because we want to keep both of the
        # following premises:
        # * (1) `Cell`-compliant objects (in particular `MultiCell`
        #       instances) behave as immutable objects (so that, in
        #       particular, when merging two `MultiCell` instances
        #       using the `+` operator, the `_own_values` of any of
        #       them is *not* modified but a new `MultiCell` is
        #       created);
        # * (2) merging many consecutive `MultiCell` instances is
        #       still efficient; in particular, the complexity does
        #       *not* grow to `O(n**2)`.
        try:
            first_component_cell = component_cells[0]  # type: MultiCell
        except IndexError:
            raise ValueError('at least one cell must be given')
        return cls(component_cells=component_cells,
                   output_as=first_component_cell._output_as)
Пример #6
0
 class Foo(object):
     a = 'aaa'
     foo = 'foooo'
     __repr__ = attr_repr('a')
Пример #7
0
class RecordDict(collections.MutableMapping):
    """
    Record dict class for non-blacklist events.
    """

    _ADJUSTER_PREFIX = 'adjust_'
    _APPENDER_PREFIX = 'append_'

    data_spec = N6DataSpec()

    required_keys = frozenset(data_spec.result_field_specs('required'))
    optional_keys = frozenset(data_spec.result_field_specs('optional')) | {
        # note: the 'type' item is somewhat related to
        # <parser class>.event_type but *not* to <collector class>.type (!)
        'type',  ## <- FIXME???: shouldn't it be required? (not optional?)
        'enriched',  # (its values are added by enricher)

        # internal keys
        # (items whose keys start with '_' are neither recorded
        #  into database nor used for id computation)
        '_do_not_resolve_fqdn_to_ip',  # flag for enricher
        '_parsed_old',

        # internal keys of aggregated items
        '_group',
        '_first_time',

        # internal keys of blacklist items
        ## FIXME?: shouldn't they be required
        ## (not optional) for BLRecordDict???
        '_bl-series-no',
        '_bl-series-total',
        '_bl-series-id',
        '_bl-time',
        '_bl-current-time',
    }

    # for the following keys, if the given value is invalid,
    # AdjusterError is not propagated; instead the value is just
    # not stored (and a warning is logged)
    without_adjuster_error = frozenset({
        'fqdn',
        'name',
        'url',
        'url_pattern',
    })

    #
    # Instantiation-related methods

    @classmethod
    def from_json(cls, json_string, **kwargs):
        return cls(json.loads(json_string), **kwargs)

    def __init__(self,
                 iterable_or_mapping=(),
                 log_nonstandard_names=False,
                 context_manager_error_callback=None):
        self._dict = {}
        self._settable_keys = (self.required_keys | self.optional_keys)

        # to catch some kinds of bugs early...
        duplicated = self.required_keys & self.optional_keys
        if duplicated:
            raise ValueError('{} has keys declared both '
                             'as required and optional: {}'.format(
                                 self.__class__.__name__,
                                 ', '.join(sorted(duplicated))))

        missing_adjusters = [
            key for key in self._settable_keys
            if not hasattr(self, self._adjuster_name(key))
        ]
        if missing_adjusters:
            raise TypeError('{!r} has no adjusters for keys: {}'.format(
                self, ', '.join(sorted(missing_adjusters))))

        self.log_nonstandard_names = log_nonstandard_names

        # context-manager (__enter__/__exit__) -related stuff
        self.context_manager_error_callback = context_manager_error_callback
        self.used_as_context_manager = False

        self.update(iterable_or_mapping)

    @classmethod
    def _adjuster_name(cls, key):
        return cls._ADJUSTER_PREFIX + key.replace('-', '')

    #
    # Output-related methods

    def get_ready_dict(self):
        current_keys = set(self._dict)
        assert self._settable_keys >= current_keys
        missing_keys = self.required_keys - current_keys
        if missing_keys:
            raise ValueError('missing keys: ' +
                             ', '.join(sorted(missing_keys)))
        ready_dict = copy.deepcopy(self._dict)
        ######## provide the legacy item
        ######## (needed by old version of RecordDict, in not-yet-updated components)
        used_custom_keys = self.data_spec.custom_field_keys.intersection(
            ready_dict)
        if used_custom_keys:
            ready_dict['__preserved_custom_keys__'] = sorted(used_custom_keys)
        ######## ^^^ (to be removed later)
        return ready_dict

    def get_ready_json(self):
        # changed from json.dumps on bson.dumps
        ### XXX: why? bson.json_utils.dumps() pre-converts some values, but is it necessary???
        return dumps(self.get_ready_dict())

    def iter_db_items(self):
        # to be cloned later (see below)
        item_prototype = {
            key: value
            for key, value in self.get_ready_dict().iteritems()
            if not key.startswith('_')
        }  # no internal keys

        # pop actual custom items and place them in the "custom" field
        all_custom_keys = self.data_spec.custom_field_keys
        custom_items = {
            key: item_prototype.pop(key)
            for key in all_custom_keys if key in item_prototype
        }
        if custom_items:
            item_prototype['custom'] = custom_items

        # depending on "address" provide one or more database items (dicts)
        address_list = item_prototype.pop('address',
                                          None)  # NOTE: deleting `address`
        if address_list:
            # the `address` list was present and not empty
            # -> db item for each list item (each db item containing
            # `ip`[/`cc`/`asn`] of the list item + the whole `address`)
            item_prototype['address'] = address_list  # restore
            all_addr_keys_are_legal = {'ip', 'cc', 'asn'}.issuperset
            for addr in address_list:
                assert 'ip' in addr and all_addr_keys_are_legal(addr)
                # cloning the prototype dict...
                db_item = item_prototype.copy()
                # ...and updating the copy with particular address data
                db_item.update(addr)
                yield db_item
        else:
            # the `address` list was *empty* or *not* present
            # -> only one db item *without* `address`, `ip` etc.
            yield item_prototype

    __repr__ = attr_repr('_dict')

    #
    # MutableMapping interface implementation

    def __iter__(self):
        return iter(self._dict)

    def __len__(self):
        return len(self._dict)

    def __getitem__(self, key):
        return self._dict[key]

    def __delitem__(self, key):
        del self._dict[key]

    def __setitem__(self, key, value):
        ######## silently ignore the legacy item
        if key == '__preserved_custom_keys__': return
        ######## ^^^ (to be removed later)
        try:
            self._dict[key] = self._get_adjusted_value(key, value)
        except AdjusterError as exc:
            if key in self.without_adjuster_error:
                LOGGER.warning('Invalid value not stored (%s)', exc)
            else:
                raise

    def _get_adjusted_value(self, key, value):
        if key not in self._settable_keys:
            raise RuntimeError('for {!r}, key {!r} is illegal'.format(
                self, key))

        adjuster_method_name = self._adjuster_name(key)
        try:
            adjuster = getattr(self, adjuster_method_name)
        except AttributeError:
            raise RuntimeError('{!r} has no adjuster for key {!r}'.format(
                self, key))
        if adjuster is None:
            # adjuster explicitly set to None -> passing value unchanged
            return value

        try:
            return adjuster(value)
        except Exception as exc:
            if getattr(exc, 'propagate_it_anyway', False):
                raise
            adjuster_error_msg = ('{!r}.{}({value!r}) raised '
                                  '{exc.__class__.__name__}: {exc}'.format(
                                      self,
                                      adjuster_method_name,
                                      value=value,
                                      exc=exc))
            raise AdjusterError(adjuster_error_msg)

    # reimplementation only for speed
    def __contains__(self, key):
        return key in self._dict

    # reimplementation with slightly different interface
    # and some additional guarantees
    def update(self, iterable_or_mapping=()):
        iterator = (iterable_or_mapping.iteritems() if isinstance(
            iterable_or_mapping, collections.Mapping) else
                    iter(iterable_or_mapping))
        setitem = self.__setitem__
        # updating in a deterministic order: sorted by key (in particular,
        # 'category' is set *before* 'name' -- see adjust_name())
        sorted_items = sorted(iterator)
        for key, value in sorted_items:
            setitem(key, value)

    # record dicts are always deep-copied (to avoid hard-to-find bugs)
    def copy(self):
        return copy.deepcopy(self)

    __copy__ = copy

    #
    # Context manager interface

    def __enter__(self):
        self.used_as_context_manager = True
        return self

    def __exit__(self, exc_type, exc, tb):
        try:
            error_callback = self.context_manager_error_callback
        except AttributeError:
            raise TypeError('a record dict instance cannot be used '
                            'as a guarding context manager more than once')
        try:
            if exc_type is not None and error_callback is not None:
                if exc is None:
                    exc = exc_type()
                return error_callback(exc)
        finally:
            del self.context_manager_error_callback

    #
    # Adjusters

    adjust_id = make_adjuster_using_data_spec('id')
    adjust_rid = make_adjuster_using_data_spec('rid')
    adjust_source = make_adjuster_using_data_spec('source')
    adjust_origin = make_adjuster_using_data_spec('origin')
    adjust_restriction = make_adjuster_using_data_spec('restriction')
    adjust_confidence = make_adjuster_using_data_spec('confidence')
    adjust_category = make_adjuster_using_data_spec('category')
    adjust_md5 = make_adjuster_using_data_spec('md5')
    adjust_sha1 = make_adjuster_using_data_spec('sha1')
    adjust_proto = make_adjuster_using_data_spec('proto')
    adjust_sport = make_adjuster_using_data_spec('sport')
    adjust_dport = make_adjuster_using_data_spec('dport')
    adjust_count = make_adjuster_using_data_spec('count')

    adjust_time = chained(
        make_adjuster_using_data_spec('time'),  # will return datetime
        make_adjuster_applying_callable(str))  # will transform it to str

    adjust_modified = chained(
        make_adjuster_using_data_spec('modified'),  # will return datetime
        make_adjuster_applying_callable(str))  # will transform it to str

    adjust_address = chained(
        make_multiadjuster(make_dict_adjuster(ip=ipv4_preadjuster)),
        applied_for_nonfalse(make_adjuster_using_data_spec('address')))

    adjust_dip = chained(ipv4_preadjuster,
                         make_adjuster_using_data_spec('dip'))

    adjust_url = chained(
        url_preadjuster, make_adjuster_using_data_spec('url',
                                                       on_too_long=trim))

    adjust_fqdn = make_adjuster_using_data_spec('fqdn',
                                                on_too_long=trim_domain)

    adjust_client = chained(
        make_multiadjuster(),
        applied_for_nonfalse(make_adjuster_using_data_spec('client')))

    adjust_until = chained(
        make_adjuster_using_data_spec('until'),  # will return datetime
        make_adjuster_applying_callable(str))  # will transform it to str

    adjust_expires = chained(
        make_adjuster_using_data_spec('expires'),  # will return datetime
        make_adjuster_applying_callable(str))  # will transform it to str

    adjust_target = make_adjuster_using_data_spec('target', on_too_long=trim)

    adjust_type = make_adjuster_using_data_spec('_type')

    # generic internal field adjusters
    adjust__do_not_resolve_fqdn_to_ip = ensure_isinstance(bool)
    adjust__parsed_old = rd_adjuster

    # hi-freq-only internal field adjusters
    adjust__group = unicode_adjuster
    adjust__first_time = chained(
        make_adjuster_using_data_spec('_first_time'),  # will return datetime
        make_adjuster_applying_callable(str))  # will transform it to str

    # bl-only non-internal field adjusters
    adjust_status = make_adjuster_using_data_spec('status')
    adjust_replaces = make_adjuster_using_data_spec('replaces')

    # bl-only internal field adjusters
    adjust__blseriesno = make_adjuster_using_data_spec('_blseriesno')
    adjust__blseriestotal = make_adjuster_using_data_spec('_blseriestotal')
    adjust__blseriesid = make_adjuster_using_data_spec('_blseriesid')
    adjust__bltime = chained(
        make_adjuster_using_data_spec('_bltime'),  # will return datetime
        make_adjuster_applying_callable(str))  # will transform it to str
    adjust__blcurrenttime = chained(
        make_adjuster_using_data_spec('_blcurrenttime'),
        make_adjuster_applying_callable(str))

    # special custom field adjuster
    # (see the comment in the code of n6.utils.enrich.Enricher.enrich())
    adjust_enriched = make_adjuster_using_data_spec('enriched')

    # custom field adjusters
    adjust_adip = make_adjuster_using_data_spec('adip')

    adjust_additional_data = make_adjuster_using_data_spec('additional_data',
                                                           on_too_long=trim)

    adjust_alternative_fqdns = chained(
        make_multiadjuster(),
        make_adjuster_using_data_spec('alternative_fqdns',
                                      on_too_long=trim_domain_seq))

    adjust_description = make_adjuster_using_data_spec('description',
                                                       on_too_long=trim)

    adjust_ip_network = make_adjuster_using_data_spec('ip_network')

    adjust_min_amplification = make_adjuster_using_data_spec(
        'min_amplification', on_too_long=trim)

    adjust_request = make_adjuster_using_data_spec('request', on_too_long=trim)

    adjust_user_agent = make_adjuster_using_data_spec('user_agent',
                                                      on_too_long=trim)

    adjust_sender = make_adjuster_using_data_spec('sender', on_too_long=trim)

    adjust_botid = make_adjuster_using_data_spec('botid', on_too_long=trim)

    adjust_method = make_adjuster_using_data_spec('method', on_too_long=trim)

    adjust_channel = make_adjuster_using_data_spec('channel', on_too_long=trim)

    adjust_first_seen = make_adjuster_using_data_spec('first_seen',
                                                      on_too_long=trim)

    adjust_referer = make_adjuster_using_data_spec('referer', on_too_long=trim)

    adjust_proxy_type = make_adjuster_using_data_spec('proxy_type',
                                                      on_too_long=trim)

    adjust_dns_version = make_adjuster_using_data_spec('dns_version',
                                                       on_too_long=trim)

    adjust_internal_ip = make_adjuster_using_data_spec('internal_ip',
                                                       on_too_long=trim)

    adjust_ipmi_version = make_adjuster_using_data_spec('ipmi_version',
                                                        on_too_long=trim)

    adjust_mac_address = make_adjuster_using_data_spec('mac_address',
                                                       on_too_long=trim)

    adjust_sysdesc = make_adjuster_using_data_spec('sysdesc', on_too_long=trim)

    adjust_version = make_adjuster_using_data_spec('version', on_too_long=trim)

    adjust_dataset = make_adjuster_using_data_spec('dataset', on_too_long=trim)

    adjust_header = make_adjuster_using_data_spec('header', on_too_long=trim)

    adjust_detected_since = make_adjuster_using_data_spec('detected_since',
                                                          on_too_long=trim)

    adjust_handshake = make_adjuster_using_data_spec('handshake',
                                                     on_too_long=trim)

    adjust_cert_length = make_adjuster_using_data_spec('cert_length',
                                                       on_too_long=trim)

    adjust_subject_common_name = make_adjuster_using_data_spec(
        'subject_common_name', on_too_long=trim)

    adjust_visible_databases = make_adjuster_using_data_spec(
        'visible_databases', on_too_long=trim)

    adjust_url_pattern = make_adjuster_using_data_spec('url_pattern')

    adjust_urls_matched = make_adjuster_using_data_spec('urls_matched')

    adjust_username = make_adjuster_using_data_spec('username')

    adjust_email = make_adjuster_using_data_spec('email')

    adjust_facebook_id = make_adjuster_using_data_spec('facebook_id')

    adjust_iban = make_adjuster_using_data_spec('iban')

    adjust_injects = make_adjuster_using_data_spec('injects')

    adjust_phone = make_adjuster_using_data_spec('phone')

    adjust_registrar = make_adjuster_using_data_spec('registrar',
                                                     on_too_long=trim)

    adjust_x509fp_sha1 = make_adjuster_using_data_spec('x509fp_sha1')

    adjust_x509issuer = make_adjuster_using_data_spec('x509issuer')

    adjust_x509subject = make_adjuster_using_data_spec('x509subject')

    adjust_action = make_adjuster_using_data_spec('action')

    # The attribute and related methods are left for the backward
    # compatibility with older data from the MISP sources.
    adjust_misp_eventdid = make_adjuster_using_data_spec('misp_eventdid')

    adjust_misp_attr_uuid = make_adjuster_using_data_spec('misp_attr_uuid')

    adjust_misp_event_uuid = make_adjuster_using_data_spec('misp_event_uuid')

    adjust_product = make_adjuster_using_data_spec('product')

    # custom field used for cooperation with IntelMQ
    adjust_intelmq = make_adjuster_using_data_spec('intelmq')

    adjust_tags = chained(
        make_multiadjuster(),
        make_adjuster_using_data_spec('tags', on_too_long=trim_seq))

    # the `name` adjuster is a bit more complex...
    @preceded_by(unicode_adjuster)
    def adjust_name(self, value):
        category = self.get('category')
        if category is None:
            exc = RuntimeError('cannot set "name" when "category" is not set')
            exc.propagate_it_anyway = True  # let the programmer know it!
            raise exc
        if not value:
            raise ValueError('empty value')
        if category in CATEGORY_TO_NORMALIZED_NAME:
            value = self._get_normalized_name(value, category)
            value = self._adjust_name_according_to_data_spec(value)
            self._check_and_handle_nonstandard_name(value, category)
        else:
            value = self._adjust_name_according_to_data_spec(value)
        return value

    _adjust_name_according_to_data_spec = make_adjuster_using_data_spec(
        'name', on_too_long=trim)

    def _get_normalized_name(self, value, category):
        value = value.lower()
        first_char = value[0]
        normalization = NAME_NORMALIZATION.get(first_char,
                                               NAME_NORMALIZATION['ELSE'])
        for regex, normalized_value in normalization:
            if regex.search(value):
                value = normalized_value
                break
        return value

    def _check_and_handle_nonstandard_name(self, value, category):
        if self.log_nonstandard_names:
            category_std_names = self._get_category_std_names(category)
            if value not in category_std_names:
                self._log_nonstandard_name(value, category)

    def _get_category_std_names(self, category_key):
        while True:
            category_std_names = CATEGORY_TO_NORMALIZED_NAME[category_key]
            if not isinstance(category_std_names, basestring):
                return category_std_names
            category_key = category_std_names

    # private class attribute: a cache of already logged non-standard names
    # -- used in _log_nonstandard_name() to avoid cluttering the logs
    _already_logged_nonstandard_names = LimitedDict(maxlen=10000)

    def _log_nonstandard_name(
            self,
            value,
            category,
            _already_logged=_already_logged_nonstandard_names):
        if (category, value) not in _already_logged:
            category_sublogger = NONSTANDARD_NAMES_LOGGER.getChild(category)
            category_sublogger.warning(ascii_str(value))
            _already_logged[(category, value)] = None

    #
    # Appenders for multiple-adjusted attributes

    # Providing methods: append_<key> -- for example:
    # * append_address(<singular value>)
    def __getattr__(self, name):
        if name.startswith(self._APPENDER_PREFIX):
            key = name[len(self._APPENDER_PREFIX):]
            adjuster_method_name = self._adjuster_name(key)
            adjuster = getattr(self, adjuster_method_name, None)
            if self._is_multiadjuster(adjuster):

                def appender(singular_value):
                    value_seq = list(self.get(key, []))
                    value_seq.append(singular_value)
                    self[key] = value_seq

                return appender
        raise AttributeError('{.__class__.__name__!r} object has '
                             'no attribute {!r}'.format(self, name))

    @staticmethod
    def _is_multiadjuster(adjuster):
        factory_names = getattr(adjuster, '_factory_names', frozenset())
        return ('make_multiadjuster' in factory_names)
Пример #8
0
class DataConversionError(ValueError):
    """
    An exception that is supposed to be raised by converters when
    input data are wrong/invalid. It is expected (though not strictly
    required) that a user-friendly error message will be passed
    as the sole argument passed to the constructor.

    An additional feature: the `sublocation()` class method that
    returns a (single-use) context manager, which should be used by
    converters when entering conversion of some nested stuff whose
    *relative location* (within its parent structure) is a key/name
    (`str`) or an index (`int`). The method should be called with that
    *relative location* (`str` or `int`) as the sole argument; or with
    an iterable collection of such relative locations (`str`/`int`
    objects) if there is an ambiguity which one is the offending
    one (warning: such a collection must *not* be a mapping or a
    `bytes`/`bytearray`);

    thanks to that the `str()` representation of any `DataConversionError`
    raised within one or more `with` blocks of such context managers
    will be automatically prepended with a *location path* pointing to
    the problematic data item in the whole converted structure.

    For example:

    >>> class MyIntegerNumbersListConverter(object):
    ...
    ...     def __call__(self, input_list):
    ...         for index, value in enumerate(input_list):
    ...             with DataConversionError.sublocation(index):
    ...                 yield self._to_integer(value)
    ...
    ...     def _to_integer(self, value):
    ...         try:
    ...             return int(value)
    ...         except ValueError:
    ...             raise DataConversionError(
    ...                '{!a} is not a value that can be converted to '
    ...                'an integer number'.format(value))
    ...
    >>> from n6lib.structured_data_conversion.converters import BaseConverter
    >>> class MyDictOfListsFlatteningConverter(BaseConverter):
    ...
    ...     def __init__(self, sublist_converter_maker, **kwargs):
    ...         # type: (ConverterMaker) -> None
    ...         super(MyDictOfListsFlatteningConverter, self).__init__(**kwargs)
    ...         self._sublist_converter = self.make_subconverter(sublist_converter_maker)
    ...
    ...     def __call__(self, some_input_dict):
    ...         # type: (dict) -> Iterator[int]
    ...         for name, sublist in sorted(some_input_dict.items()):
    ...             with DataConversionError.sublocation(name):
    ...                 self.verify_isinstance(sublist, list)
    ...                 for value in self._sublist_converter(sublist):
    ...                     yield value
    ...
    >>> my_converter = MyDictOfListsFlatteningConverter(
    ...     sublist_converter_maker=MyIntegerNumbersListConverter)
    >>> d1 = {'bar': ['0', '1', '2', '3', '4'], 'foo': ['15', '101']}
    >>> d2 = {'bar': ['0', '1', '2', 'spam', '4'], 'foo': ['15', '101']}
    >>> d3 = {'bar': ['0', '1', '2', '3', '4'], 'foo': ['spam', '101']}
    >>> d4 = {'bar': ['0', '1', '2', '3', '4'], 'foo': {'spam': '101'}}

    >>> list(my_converter(d1))
    [0, 1, 2, 3, 4, 15, 101]

    >>> list(my_converter(d2))   # doctest: +ELLIPSIS
    Traceback (most recent call last):
      ...
    n6lib.structured_data_conversion.exceptions.DataConversionError: [bar.3] 'spam' is not a...

    >>> list(my_converter(d3))   # doctest: +ELLIPSIS
    Traceback (most recent call last):
      ...
    n6lib.structured_data_conversion.exceptions.DataConversionError: [foo.0] 'spam' is not a...

    >>> list(my_converter(d4))   # doctest: +ELLIPSIS
    Traceback (most recent call last):
      ...
    n6lib.structured_data_conversion.exceptions.DataConversionError: [foo] unexpected type of...

    An example with an iterable collection of name/index alternatives:

    >>> with DataConversionError.sublocation(['spam', 0, 'foo']):
    ...     with DataConversionError.sublocation([]):  # <- empty collection will be skipped
    ...         with DataConversionError.sublocation(['ham']):  # <- single element like scalar
    ...             raise DataConversionError('Aha!')
    ...
    Traceback (most recent call last):
      ...
    n6lib.structured_data_conversion.exceptions.DataConversionError: [{spam,0,foo}.ham] Aha!
    """
    def __init__(self, *args):
        super(DataConversionError, self).__init__(*args)
        self._location_path = collections.deque()

    @classmethod
    @contextlib.contextmanager
    def sublocation(cls, name_or_index_or_alternatives_iter, /):
        # type: (Union[NameOrIndex, Iterable[NameOrIndex]]) -> Generator[None, None, None]
        path_item = cls._get_ready_path_item(
            name_or_index_or_alternatives_iter)
        try:
            yield
        except DataConversionError as exc:
            if path_item is not None:
                exc._location_path.appendleft(path_item)
            raise

    @classmethod
    def _get_ready_path_item(cls, name_or_index_or_alternatives_iter):
        # type: (...) -> Optional[Union[NameOrIndex, List[NameOrIndex]]]
        if isinstance(name_or_index_or_alternatives_iter, (str, int)):
            path_item = name_or_index_or_alternatives_iter
            cls._verify_is_name_or_index(path_item)
        else:
            if isinstance(name_or_index_or_alternatives_iter,
                          (Mapping, bytes, bytearray)):
                # (A `Mapping` or `bytes`/`bytearray`? Let's raise an error!)
                cls._verify_is_name_or_index(
                    name_or_index_or_alternatives_iter)
            path_item = list(name_or_index_or_alternatives_iter)
            for name_or_index in path_item:
                cls._verify_is_name_or_index(name_or_index)
            if len(path_item) == 1:
                path_item = path_item[0]
            elif not path_item:
                path_item = None
        return path_item

    @staticmethod
    def _verify_is_name_or_index(name_or_index):
        # type: (NameOrIndex) -> None
        if not isinstance(name_or_index, (str, int)):
            raise TypeError('{!a} is neither a name (`str`) nor an '
                            'index (`int`)'.format(name_or_index))

    __repr__ = attr_repr('args', '_location_path')

    def __str__(self):
        return self._get_location_prefix() + super(DataConversionError,
                                                   self).__str__()

    def _get_location_prefix(self):
        if self._location_path:
            path_as_ascii_str = '.'.join(
                map(self._format_path_item, self._location_path))
            return '[{}] '.format(path_as_ascii_str)
        return ''

    def _format_path_item(self, path_item):
        # type: (Union[NameOrIndex, List[NameOrIndex]]) -> str
        if isinstance(path_item, (str, int)):
            return ascii_str(path_item)
        # This is a list of multiple name/index alternatives, so
        # let's present them in the `{foo,bar,spam}`-like form.
        assert (isinstance(path_item, list)
                and all(isinstance(alt, (str, int))
                        for alt in path_item) and len(path_item) > 1
                ), 'bug in implementation of DataConversionError?!'
        return '{' + ','.join(map(ascii_str, path_item)) + '}'