Beispiel #1
0
def filter_hook(session, msg_mid, msg, keywords, **kwargs):
    """Classify this message."""
    if not kwargs.get('incoming', False):
        return keywords

    config = session.config
    for at_config in config.prefs.autotag:
        try:
            at_tag = config.get_tag(at_config.match_tag)
            atagger = config.load_auto_tagger(at_config)
            if not atagger.trained:
                continue
            want, info = atagger.should_tag(at_config, msg, keywords)
            if want is True:
                if 'autotag' in config.sys.debug:
                    session.ui.debug(('Autotagging %s with %s (w=%s, i=%s)') %
                                     (msg_mid, at_tag.name, want, info))
                keywords.add('%s:in' % at_tag._key)
            elif at_config.unsure_tag and want is None:
                unsure_tag = config.get_tag(at_config.unsure_tag)
                if 'autotag' in config.sys.debug:
                    session.ui.debug(('Autotagging %s with %s (w=%s, i=%s)') %
                                     (msg_mid, unsure_tag.name, want, info))
                keywords.add('%s:in' % unsure_tag._key)
        except (KeyError, AttributeError, ValueError):
            pass

    return keywords
Beispiel #2
0
def filter_hook(session, msg_mid, msg, keywords):
    """Classify this message."""
    config = session.config
    for at_config in config.prefs.autotag:
        try:
            at_tag = config.get_tag(at_config.match_tag)
            atagger = config.load_auto_tagger(at_config)
            if not atagger.trained:
                continue
            want, info = atagger.should_tag(at_config, msg, keywords)
            if want is True:
                if 'autotag' in config.sys.debug:
                    session.ui.debug(('Autotagging %s with %s (w=%s, i=%s)'
                                      ) % (msg_mid, at_tag.name, want, info))
                keywords.add('%s:tag' % at_tag._key)
            elif at_config.unsure_tag and want is None:
                unsure_tag = config.get_tag(at_config.unsure_tag)
                if 'autotag' in config.sys.debug:
                    session.ui.debug(('Autotagging %s with %s (w=%s, i=%s)'
                                      ) % (msg_mid, unsure_tag.name,
                                           want, info))
                keywords.add('%s:tag' % unsure_tag._key)
        except (KeyError, AttributeError, ValueError):
            pass

    return keywords
Beispiel #3
0
    def command(self):
        session, config = self.session, self.session.config
        clean_session = mailpile.ui.Session(config)
        clean_session.ui = session.ui
        result = []

        tag_names = []
        if self.args:
            tag_names = list(self.args)
        elif self.data.get('tag', []):
            tag_names = self.data.get('tag', [])

        for tag_name in tag_names:

            tag = config.get_tag(tag_name)

            if tag:
                tag_id = tag._key

                # FIXME: Refuse to delete tag if in use by filters

                rv = (Search(clean_session, arg=['tag:%s' % tag_id]).run() and
                      Tag(clean_session, arg=['-%s' % tag_id, 'all']).run())
                if rv:
                    del config.tags[tag_id]
                    result.append({'name': tag.name, 'tid': tag_id})
                else:
                    raise Exception('That failed: %s' % rv)
            else:
                self._error('No such tag %s' % tag_name)
        if result:
            self._reorder_all_tags()
            self.finish(save=True)
        return self._success(
            _('Deleted %d tags') % len(result), {'removed': result})
Beispiel #4
0
    def command(self):
        session, config = self.session, self.session.config
        clean_session = mailpile.ui.Session(config)
        clean_session.ui = session.ui
        result = []

        tag_names = []
        if self.args:
            tag_names = list(self.args)
        elif self.data.get('tag', []):
            tag_names = self.data.get('tag', [])

        for tag_name in tag_names:

            tag = config.get_tag(tag_name)

            if tag:
                tag_id = tag._key

                # FIXME: Refuse to delete tag if in use by filters

                rv = (Search(clean_session, arg=['tag:%s' % tag_id]).run() and
                      Tag(clean_session, arg=['-%s' % tag_id, 'all']).run())
                if rv:
                    del config.tags[tag_id]
                    result.append({'name': tag.name, 'tid': tag_id})
                else:
                    raise Exception('That failed: %s' % rv)
            else:
                self._error('No such tag %s' % tag_name)
        if result:
            self._reorder_all_tags()
            self.finish(save=True)
        return self._success(_('Deleted %d tags') % len(result),
                             {'removed': result})
Beispiel #5
0
    def command(self):
        session, config, idx = self.session, self.session.config, self._idx()
        emails = [Email(idx, mid) for mid in self._choose_messages(self.args)]
        scores = self._classify(emails)
        tag = {}
        for mid in scores:
            for at_config in config.prefs.autotag:
                at_tag = config.get_tag(at_config.match_tag)
                if not at_tag:
                    continue
                want = scores[mid].get(at_tag._key, (False, ))[0]

                if want is True:
                    if at_config.match_tag not in tag:
                        tag[at_config.match_tag] = [mid]
                    else:
                        tag[at_config.match_tag].append(mid)

                elif at_config.unsure_tag and want is None:
                    if at_config.unsure_tag not in tag:
                        tag[at_config.unsure_tag] = [mid]
                    else:
                        tag[at_config.unsure_tag].append(mid)

        for tid in tag:
            idx.add_tag(session, tid, msg_idxs=[int(i, 36) for i in tag[tid]])

        return tag
Beispiel #6
0
    def command(self):
        session, config, idx = self.session, self.session.config, self._idx()
        emails = [Email(idx, mid) for mid in self._choose_messages(self.args)]
        scores = self._classify(emails)
        tag = {}
        for mid in scores:
            for at_config in config.prefs.autotag:
                at_tag = config.get_tag(at_config.match_tag)
                if not at_tag:
                    continue
                want = scores[mid].get(at_tag._key, (False, ))[0]

                if want is True:
                    if at_config.match_tag not in tag:
                        tag[at_config.match_tag] = [mid]
                    else:
                        tag[at_config.match_tag].append(mid)

                elif at_config.unsure_tag and want is None:
                    if at_config.unsure_tag not in tag:
                        tag[at_config.unsure_tag] = [mid]
                    else:
                        tag[at_config.unsure_tag].append(mid)

        for tid in tag:
            idx.add_tag(session, tid, msg_idxs=[int(i, 36) for i in tag[tid]])

        return self._success(_('Auto-tagged %d messages') % len(emails), tag)
Beispiel #7
0
 def command(self):
     session, config = self.session, self.session.config
     clean_session = mailpile.ui.Session(config)
     clean_session.ui = session.ui
     result = []
     for tag_name in self.args:
         tag = config.get_tag(tag_name)
         if tag:
             tag_id = tag._key
             # FIXME: Refuse to delete tag if in use by filters
             rv = (Search(clean_session, arg=['tag:%s' % tag_id]).run() and
                   Tag(clean_session, arg=['-%s' % tag_id, 'all']).run())
             if rv:
                 del config.tags[tag_id]
                 result.append({'name': tag.name, 'tid': tag_id})
             else:
                 raise Exception('That failed: %s' % rv)
         else:
             self._error('No such tag %s' % tag_name)
     if result:
         self.finish(save=True)
     return {'removed': result}
Beispiel #8
0
 def command(self):
     session, config = self.session, self.session.config
     clean_session = mailpile.ui.Session(config)
     clean_session.ui = session.ui
     result = []
     for tag_name in self.args:
         tag = config.get_tag(tag_name)
         if tag:
             tag_id = tag._key
             # FIXME: Refuse to delete tag if in use by filters
             rv = (Search(clean_session, arg=['tag:%s' % tag_id]).run() and
                   Tag(clean_session, arg=['-%s' % tag_id, 'all']).run())
             if rv:
                 del config.tags[tag_id]
                 result.append({'name': tag.name, 'tid': tag_id})
             else:
                 raise Exception('That failed: %s' % rv)
         else:
             self._error('No such tag %s' % tag_name)
     if result:
         self.finish(save=True, stats=False)
     return {'removed': result}
Beispiel #9
0
    def _classify(self, emails):
        session, config, idx = self.session, self.session.config, self._idx()
        results = {}
        unknown = []
        for e in emails:
            kws = self._get_keywords(e)
            result = results[e.msg_mid()] = {}
            for at_config in config.prefs.autotag:
                if not at_config.match_tag:
                    continue
                at_tag = config.get_tag(at_config.match_tag)
                if not at_tag and at_config.match_tag not in unknown:
                    session.ui.error(
                        _('Unknown tag: %s') % at_config.match_tag)
                    unknown.append(at_config.match_tag)
                    continue

                atagger = config.load_auto_tagger(at_config)
                if atagger.trained:
                    result[at_tag._key] = result.get(at_tag._key, [])
                    result[at_tag._key].append(
                        atagger.should_tag(at_config, e.get_msg(), kws))
        return results
Beispiel #10
0
    def _classify(self, emails):
        session, config, idx = self.session, self.session.config, self._idx()
        results = {}
        unknown = []
        for e in emails:
            kws = self._get_keywords(e)
            result = results[e.msg_mid()] = {}
            for at_config in config.prefs.autotag:
                if not at_config.match_tag:
                    continue
                at_tag = config.get_tag(at_config.match_tag)
                if not at_tag and at_config.match_tag not in unknown:
                    session.ui.error(_('Unknown tag: %s'
                                       ) % at_config.match_tag)
                    unknown.append(at_config.match_tag)
                    continue

                atagger = config.load_auto_tagger(at_config)
                if atagger.trained:
                    result[at_tag._key] = result.get(at_tag._key, [])
                    result[at_tag._key].append(atagger.should_tag(
                        at_config, e.get_msg(), kws
                    ))
        return results
Beispiel #11
0
    def command(self):
        session, config, idx = self.session, self.session.config, self._idx()
        tags = self.args or [asb.match_tag for asb in config.prefs.autotag]
        tids = [config.get_tag(t)._key for t in tags if t]

        session.ui.mark(_('Retraining SpamBayes autotaggers'))
        if not hasattr(config, 'autotag'):
            config.autotag = {}

        # Find all the interesting messages! We don't look in the trash,
        # but we do look at interesting spam.
        #
        # Note: By specifically stating that we DON'T want trash, we
        #       disable the search engine's default result suppression
        #       and guarantee these results don't corrupt the somewhat
        #       lame/broken result cache.
        #
        no_trash = ['-in:%s' % t._key for t in config.get_tags(type='trash')]
        interest = {}
        for ttype in ('replied', 'read', 'tagged'):
            interest[ttype] = set()
            for tag in config.get_tags(type=ttype):
                interest[ttype] |= idx.search(session,
                                              ['in:%s' % tag.slug] + no_trash
                                              ).as_set()
            session.ui.notify(_('Have %d interesting %s messages'
                                ) % (len(interest[ttype]), ttype))

        retrained = []
        count_all = 0
        for at_config in config.prefs.autotag:
            at_tag = config.get_tag(at_config.match_tag)
            if at_tag and at_tag._key in tids:
                session.ui.mark('Retraining: %s' % at_tag.name)

                yn = [(set(), set(), 'in:%s' % at_tag.slug, True),
                      (set(), set(), '-in:%s' % at_tag.slug, False)]

                # Get the current message sets: tagged and untagged messages
                # excluding trash.
                for tset, mset, srch, which in yn:
                    mset |= idx.search(session, [srch] + no_trash).as_set()

                # If we have any exclude_tags, they are particularly
                # interesting, so we'll look at them first.
                interesting = []
                for etagid in at_config.exclude_tags:
                    etag = config.get_tag(etagid)
                    if etag._key not in interest:
                        srch = ['in:%s' % etag._key] + no_trash
                        interest[etag._key] = idx.search(session, srch
                                                         ).as_set()
                    interesting.append(etag._key)
                interesting.extend(['replied', 'read', 'tagged', None])

                # Go through the interest types in order of preference and
                # while we still lack training data, add to the training set.
                for ttype in interesting:
                    for tset, mset, srch, which in yn:
                        # FIXME: Is this a good idea? No single data source
                        # is allowed to be more than 50% of the corpus, to
                        # try and encourage diversity.
                        want = min(at_config.corpus_size / 4,
                                   max(0,
                                       at_config.corpus_size / 2 - len(tset)))
                        if want:
                            if ttype:
                                adding = sorted(list(mset & interest[ttype]))
                            else:
                                adding = sorted(list(mset))
                            adding = set(list(reversed(adding))[:want])
                            tset |= adding
                            mset -= adding

                # Load classifier, reset
                atagger = config.load_auto_tagger(at_config)
                atagger.reset(at_config)
                for tset, mset, srch, which in yn:
                    count = 0
                    for msg_idx in tset:
                        e = Email(idx, msg_idx)
                        count += 1
                        count_all += 1
                        session.ui.mark(('Reading %s (%d/%d, %s=%s)'
                                         ) % (e.msg_mid(), count, len(tset),
                                              at_tag.name, which))
                        atagger.learn(at_config,
                                      e.get_msg(),
                                      self._get_keywords(e),
                                      which)

                # We got this far without crashing, so save the result.
                config.save_auto_tagger(at_config)
                retrained.append(at_tag.name)

        session.ui.mark(_('Retrained SpamBayes auto-tagging for %s'
                          ) % ', '.join(retrained))
        return {'retrained': retrained, 'read_messages': count_all}
Beispiel #12
0
    def _retrain(self, tags=None):
        "Retrain autotaggers"
        session, config, idx = self.session, self.session.config, self._idx()
        tags = tags or [asb.match_tag for asb in config.prefs.autotag]
        tids = [config.get_tag(t)._key for t in tags if t]

        session.ui.mark(_('Retraining SpamBayes autotaggers'))
        if not config.real_hasattr('autotag'):
            config.real_setattr('autotag', {})

        # Find all the interesting messages! We don't look in the trash,
        # but we do look at interesting spam.
        #
        # Note: By specifically stating that we DON'T want trash, we
        #       disable the search engine's default result suppression
        #       and guarantee these results don't corrupt the somewhat
        #       lame/broken result cache.
        #
        no_trash = ['-in:%s' % t._key for t in config.get_tags(type='trash')]
        interest = {}
        for ttype in ('replied', 'fwded', 'read', 'tagged'):
            interest[ttype] = set()
            for tag in config.get_tags(type=ttype):
                interest[ttype] |= idx.search(session, ['in:%s' % tag.slug] +
                                              no_trash).as_set()
            session.ui.notify(
                _('Have %d interesting %s messages') %
                (len(interest[ttype]), ttype))

        retrained, unreadable = [], []
        count_all = 0
        for at_config in config.prefs.autotag:
            at_tag = config.get_tag(at_config.match_tag)
            if at_tag and at_tag._key in tids:
                session.ui.mark('Retraining: %s' % at_tag.name)

                yn = [(set(), set(), 'in:%s' % at_tag.slug, True),
                      (set(), set(), '-in:%s' % at_tag.slug, False)]

                # Get the current message sets: tagged and untagged messages
                # excluding trash.
                for tset, mset, srch, which in yn:
                    mset |= idx.search(session, [srch] + no_trash).as_set()

                # If we have any exclude_tags, they are particularly
                # interesting, so we'll look at them first.
                interesting = []
                for etagid in at_config.exclude_tags:
                    etag = config.get_tag(etagid)
                    if etag._key not in interest:
                        srch = ['in:%s' % etag._key] + no_trash
                        interest[etag._key] = idx.search(session,
                                                         srch).as_set()
                    interesting.append(etag._key)
                interesting.extend(
                    ['replied', 'fwded', 'read', 'tagged', None])

                # Go through the interest types in order of preference and
                # while we still lack training data, add to the training set.
                for ttype in interesting:
                    for tset, mset, srch, which in yn:
                        # False positives are really annoying, and generally
                        # speaking any autotagged subset should be a small
                        # part of the Universe. So we divide the corpus
                        # budget 33% True, 67% False.
                        full_size = int(at_config.corpus_size *
                                        (0.33 if which else 0.67))
                        want = min(full_size // 4, max(0,
                                                       full_size - len(tset)))
                        if want:
                            if ttype:
                                adding = sorted(list(mset & interest[ttype]))
                            else:
                                adding = sorted(list(mset))
                            adding = set(list(reversed(adding))[:want])
                            tset |= adding
                            mset -= adding

                # Load classifier, reset
                atagger = config.load_auto_tagger(at_config)
                atagger.reset(at_config)
                for tset, mset, srch, which in yn:
                    count = 0
                    # We go through the liste of message in order, to avoid
                    # thrashing caches too badly.
                    for msg_idx in sorted(list(tset)):
                        try:
                            e = Email(idx, msg_idx)
                            count += 1
                            count_all += 1
                            session.ui.mark(
                                _('Reading %s (%d/%d, %s=%s)') %
                                (e.msg_mid(), count, len(tset), at_tag.name,
                                 which))
                            atagger.learn(at_config, e.get_msg(),
                                          self._get_keywords(e), which)
                        except (IndexError, TypeError, ValueError, OSError,
                                IOError):
                            if session.config.sys.debug:
                                import traceback
                                traceback.print_exc()
                            unreadable.append(msg_idx)
                            session.ui.warning(
                                _('Failed to process message at =%s') %
                                (b36(msg_idx)))

                # We got this far without crashing, so save the result.
                config.save_auto_tagger(at_config)
                retrained.append(at_tag.name)

        message = _('Retrained SpamBayes auto-tagging for %s') % ', '.join(
            retrained)
        session.ui.mark(message)
        return self._success(message,
                             result={
                                 'retrained': retrained,
                                 'unreadable': unreadable,
                                 'read_messages': count_all
                             })
Beispiel #13
0
    def command(self):
        session, config, idx = self.session, self.session.config, self._idx()
        tags = self.args or [asb.match_tag for asb in config.prefs.autotag]
        tids = [config.get_tag(t)._key for t in tags if t]

        session.ui.mark(_('Retraining SpamBayes autotaggers'))
        if not hasattr(config, 'autotag'):
            config.autotag = {}

        # Find all the interesting messages! We don't look in the trash,
        # but we do look at interesting spam.
        #
        # Note: By specifically stating that we DON'T want trash, we
        #       disable the search engine's default result suppression
        #       and guarantee these results don't corrupt the somewhat
        #       lame/broken result cache.
        #
        no_trash = ['-in:%s' % t._key for t in config.get_tags(type='trash')]
        interest = {}
        for ttype in ('replied', 'fwded', 'read', 'tagged'):
            interest[ttype] = set()
            for tag in config.get_tags(type=ttype):
                interest[ttype] |= idx.search(session, ['in:%s' % tag.slug] +
                                              no_trash).as_set()
            session.ui.notify(
                _('Have %d interesting %s messages') %
                (len(interest[ttype]), ttype))

        retrained = []
        count_all = 0
        for at_config in config.prefs.autotag:
            at_tag = config.get_tag(at_config.match_tag)
            if at_tag and at_tag._key in tids:
                session.ui.mark('Retraining: %s' % at_tag.name)

                yn = [(set(), set(), 'in:%s' % at_tag.slug, True),
                      (set(), set(), '-in:%s' % at_tag.slug, False)]

                # Get the current message sets: tagged and untagged messages
                # excluding trash.
                for tset, mset, srch, which in yn:
                    mset |= idx.search(session, [srch] + no_trash).as_set()

                # If we have any exclude_tags, they are particularly
                # interesting, so we'll look at them first.
                interesting = []
                for etagid in at_config.exclude_tags:
                    etag = config.get_tag(etagid)
                    if etag._key not in interest:
                        srch = ['in:%s' % etag._key] + no_trash
                        interest[etag._key] = idx.search(session,
                                                         srch).as_set()
                    interesting.append(etag._key)
                interesting.extend(
                    ['replied', 'fwded', 'read', 'tagged', None])

                # Go through the interest types in order of preference and
                # while we still lack training data, add to the training set.
                for ttype in interesting:
                    for tset, mset, srch, which in yn:
                        # FIXME: Is this a good idea? No single data source
                        # is allowed to be more than 50% of the corpus, to
                        # try and encourage diversity.
                        want = min(
                            at_config.corpus_size / 4,
                            max(0, at_config.corpus_size / 2 - len(tset)))
                        if want:
                            if ttype:
                                adding = sorted(list(mset & interest[ttype]))
                            else:
                                adding = sorted(list(mset))
                            adding = set(list(reversed(adding))[:want])
                            tset |= adding
                            mset -= adding

                # Load classifier, reset
                atagger = config.load_auto_tagger(at_config)
                atagger.reset(at_config)
                for tset, mset, srch, which in yn:
                    count = 0
                    for msg_idx in tset:
                        e = Email(idx, msg_idx)
                        count += 1
                        count_all += 1
                        session.ui.mark(('Reading %s (%d/%d, %s=%s)') %
                                        (e.msg_mid(), count, len(tset),
                                         at_tag.name, which))
                        atagger.learn(at_config, e.get_msg(),
                                      self._get_keywords(e), which)

                # We got this far without crashing, so save the result.
                config.save_auto_tagger(at_config)
                retrained.append(at_tag.name)

        session.ui.mark(
            _('Retrained SpamBayes auto-tagging for %s') %
            ', '.join(retrained))
        return {'retrained': retrained, 'read_messages': count_all}
Beispiel #14
0
    def _retrain(self, tags=None):
        "Retrain autotaggers"
        session, config, idx = self.session, self.session.config, self._idx()
        tags = tags or [asb.match_tag for asb in config.prefs.autotag]
        tids = [config.get_tag(t)._key for t in tags if t]

        session.ui.mark(_('Retraining SpamBayes autotaggers'))
        if not config.real_hasattr('autotag'):
            config.real_setattr('autotag', {})

        # Find all the interesting messages! We don't look in the trash,
        # but we do look at interesting spam.
        #
        # Note: By specifically stating that we DON'T want trash, we
        #       disable the search engine's default result suppression
        #       and guarantee these results don't corrupt the somewhat
        #       lame/broken result cache.
        #
        no_trash = ['-in:%s' % t._key for t in config.get_tags(type='trash')]
        interest = {}
        for ttype in ('replied', 'fwded', 'read', 'tagged'):
            interest[ttype] = set()
            for tag in config.get_tags(type=ttype):
                interest[ttype] |= idx.search(session,
                                              ['in:%s' % tag.slug] + no_trash
                                              ).as_set()
            session.ui.notify(_('Have %d interesting %s messages'
                                ) % (len(interest[ttype]), ttype))

        retrained, unreadable = [], []
        count_all = 0
        for at_config in config.prefs.autotag:
            at_tag = config.get_tag(at_config.match_tag)
            if at_tag and at_tag._key in tids:
                session.ui.mark('Retraining: %s' % at_tag.name)

                yn = [(set(), set(), 'in:%s' % at_tag.slug, True),
                      (set(), set(), '-in:%s' % at_tag.slug, False)]

                # Get the current message sets: tagged and untagged messages
                # excluding trash.
                for tset, mset, srch, which in yn:
                    mset |= idx.search(session, [srch] + no_trash).as_set()

                # If we have any exclude_tags, they are particularly
                # interesting, so we'll look at them first.
                interesting = []
                for etagid in at_config.exclude_tags:
                    etag = config.get_tag(etagid)
                    if etag._key not in interest:
                        srch = ['in:%s' % etag._key] + no_trash
                        interest[etag._key] = idx.search(session, srch
                                                         ).as_set()
                    interesting.append(etag._key)
                interesting.extend(['replied', 'fwded', 'read', 'tagged',
                                    None])

                # Go through the interest types in order of preference and
                # while we still lack training data, add to the training set.
                for ttype in interesting:
                    for tset, mset, srch, which in yn:
                        # False positives are really annoying, and generally
                        # speaking any autotagged subset should be a small
                        # part of the Universe. So we divide the corpus
                        # budget 33% True, 67% False.
                        full_size = int(at_config.corpus_size *
                                        (0.33 if which else 0.67))
                        want = min(full_size // 4,
                                   max(0, full_size - len(tset)))
                        if want:
                            if ttype:
                                adding = sorted(list(mset & interest[ttype]))
                            else:
                                adding = sorted(list(mset))
                            adding = set(list(reversed(adding))[:want])
                            tset |= adding
                            mset -= adding

                # Load classifier, reset
                atagger = config.load_auto_tagger(at_config)
                atagger.reset(at_config)
                for tset, mset, srch, which in yn:
                    count = 0
                    # We go through the liste of message in order, to avoid
                    # thrashing caches too badly.
                    for msg_idx in sorted(list(tset)):
                        try:
                            e = Email(idx, msg_idx)
                            count += 1
                            count_all += 1
                            session.ui.mark(
                                _('Reading %s (%d/%d, %s=%s)'
                                  ) % (e.msg_mid(), count, len(tset),
                                       at_tag.name, which))
                            atagger.learn(at_config,
                                          e.get_msg(),
                                          self._get_keywords(e),
                                          which)
                        except (IndexError, TypeError, ValueError,
                                OSError, IOError):
                            if session.config.sys.debug:
                                import traceback
                                traceback.print_exc()
                            unreadable.append(msg_idx)
                            session.ui.warning(
                                _('Failed to process message at =%s'
                                  ) % (b36(msg_idx)))

                # We got this far without crashing, so save the result.
                config.save_auto_tagger(at_config)
                retrained.append(at_tag.name)

        message = _('Retrained SpamBayes auto-tagging for %s'
                    ) % ', '.join(retrained)
        session.ui.mark(message)
        return self._success(message, result={
            'retrained': retrained,
            'unreadable': unreadable,
            'read_messages': count_all
        })