Ejemplo n.º 1
0
 def import_pst(self, p, store):
     folders = p.folder_generator()
     root_path = rev_cp1252(next(folders).path)  # skip root
     for folder in folders:
         with log_exc(self.log, self.stats):
             path = rev_cp1252(folder.path[len(root_path) + 1:])
             if self.options.folders and \
                path.lower() not in [f.lower() for f in self.options.folders]:
                 continue
             self.log.info("importing folder '%s'" % path)
             if self.options.import_root:
                 path = self.options.import_root + '/' + path
             folder2 = store.folder(path, create=True)
             if self.options.clean_folders:
                 folder2.empty()
             if folder.ContainerClass:
                 folder2.container_class = folder.ContainerClass
             for message in p.message_generator(folder):
                 with log_exc(self.log, self.stats):
                     self.log.debug("importing message '%s'" %
                                    (rev_cp1252(message.Subject or '')))
                     message2 = folder2.create_item(save=False)
                     self.import_attachments(message, message2.mapiobj)
                     self.import_recipients(message, message2.mapiobj)
                     self.import_props(message, message2.mapiobj)
                     self.stats['messages'] += 1
Ejemplo n.º 2
0
def test_logexc():
    log = logging.getLogger('test')

    stats = {}
    with kopano.log_exc(log, stats):
        kaboom
    assert stats['errors'] == 1
Ejemplo n.º 3
0
    def main(self):
        init_globals()

        setproctitle.setproctitle('kopano-msr service')

        self.iqueue = multiprocessing.Queue() # folders in the working queue
        self.oqueue = multiprocessing.Queue() # processed folders, used to update STORE_FOLDER_QUEUED
        self.subscribe = multiprocessing.Queue() # subscription update queue

        self.state_path = self.config['state_path']

        # initialize and start workers
        workers = [SyncWorker(self, 'msr%d'%i, nr=i, iqueue=self.iqueue, oqueue=self.oqueue)
                       for i in range(self.config['worker_processes'])]
        for worker in workers:
            worker.start()

        control_worker = ControlWorker(self, 'control', subscribe=self.subscribe)
        control_worker.start()

        # resume relocations
        for username in os.listdir(self.state_path):
            if not username.endswith('.lock'):
                with log_exc(self.log):
                    state_path = os.path.join(self.state_path, username)
                    info = pickle.loads(db_get(state_path, 'info', decode=False))

                    self.subscribe.put((username, info['target'], info['server'], True, info['store']))

        # continue using notifications
        self.notify_sync()
Ejemplo n.º 4
0
    def update(self, f):
        with log_exc(self.log): # TODO use decorator?
            entryid2 = db_get(self.state_path, 'folder_map_'+f.sourcekey)
            psk = f.parent.sourcekey or self.subtree_sk # TODO default pyko to subtree_sk?

            self.log.info('updated: %s', f)

            parent2_eid = db_get(self.state_path, 'folder_map_'+psk)
            parent2 = self.store2.folder(entryid=parent2_eid)

            self.log.info('parent: %s', parent2)

            if entryid2:
                self.log.info('exists')

                folder2 = self.store2.folder(entryid=entryid2)
                folder2.name = f.name
                folder2.container_class = f.container_class

                if folder2.parent.entryid != parent2_eid:
                    self.log.info('move folder')

                    folder2.parent.move(folder2, parent2)

            else:
                self.log.info('create')

                folder2 = parent2.folder(f.name, create=True)

                db_put(self.state_path, 'folder_map_'+f.sourcekey, folder2.entryid)

            _queue_or_store(self.state_path, self.user, self.store_entryid, f.entryid, self.iqueue)
Ejemplo n.º 5
0
    def update(self, item, flags):
        """ store updated item in 'items' database, and subject and date in 'index' database """

        with log_exc(self.log, self.stats):
            self.log.debug('folder %s: new/updated document with entryid %s, sourcekey %s', self.folder.sourcekey, item.entryid, item.sourcekey)

            data = zlib.compress(item.dumps(attachments=not self.options.skip_attachments, archiver=False, skip_broken=True))
            self.item_updates.append((item.sourcekey, data))

            orig_prop = item.get_prop(PR_EC_BACKUP_SOURCE_KEY)
            if orig_prop:
                orig_prop = _hex(orig_prop.value)
            idx = pickle_dumps({
                b'subject': item.subject,
                b'orig_sourcekey': orig_prop,
                b'last_modified': item.last_modified,
                b'backup_updated': self.service.timestamp,
            })
            self.index_updates.append((item.sourcekey, idx))

            self.stats['changes'] += 1

            self.cache_size += len(data) + len(idx)
            if self.cache_size > CACHE_SIZE:
                self.commit()
Ejemplo n.º 6
0
    def delete(self, item, flags):  # XXX batch as well, 'updating' cache?
        """ deleted item from 'items' and 'index' databases """

        with log_exc(self.log, self.stats):
            with closing(dbopen(self.folder_path + '/items')) as db_items:
                with closing(dbopen(self.folder_path + '/index')) as db_index:

                    self.log.debug(
                        'folder %s: deleted document with sourcekey %s',
                        self.folder.sourcekey, item.sourcekey)

                    # NOTE ICS may generate delete events for items that did not exist
                    # before, for example for a new message which has already been
                    # deleted in the meantime.
                    if item.sourcekey.encode('ascii') in db_items:
                        idx = pickle_loads(
                            db_index[item.sourcekey.encode('ascii')])
                        idx[b'backup_deleted'] = self.service.timestamp
                        db_index[item.sourcekey.encode(
                            'ascii')] = pickle_dumps(idx)
                    else:
                        db_index[item.sourcekey.encode(
                            'ascii')] = pickle_dumps(
                                {b'backup_deleted': self.service.timestamp})
                    self.stats['deletes'] += 1
Ejemplo n.º 7
0
    def incremental_sync(self):
        """ process changes in real-time (not yet parallelized); if no pending changes handle reindex requests """

        while True:
            with log_exc(self.log):
                try:
                    storeid = self.reindex_queue.get(block=False)
                    store = self.server.store(storeid)
                    self.log.info('handling reindex request for "%s"', store.name)
                    self.plugin.reindex(self.server.guid, store.guid)
                    self.initial_sync([store], reindex=True)
                except Empty:
                    pass
                importer = ServerImporter(self.server.guid, self.config, self.iqueue, self.log)
                t0 = time.time()
                new_state = self.server.sync(importer, self.state, log=self.log)
                if new_state != self.state:
                    changes = sum([self.oqueue.get() for i in range(len(importer.queued))]) # blocking
                    for f in importer.queued:
                        self.iqueue.put(f+(False,)) # make sure folders are at least synced to new_state
                    changes += sum([self.oqueue.get() for i in range(len(importer.queued))]) # blocking
                    self.log.info('queue processed in %.2f seconds (%d changes, ~%.2f/sec)', time.time()-t0, changes, changes/(time.time()-t0))
                    self.state = new_state
                    db_put(self.state_db, 'SERVER', self.state)
                    self.log.info('saved server sync state = %s', self.state)
                if t0 > self.syncrun.value+1:
                    self.syncrun.value = 0
            time.sleep(5)
Ejemplo n.º 8
0
def dump_props(props, stats, log):
    """ dump given MAPI properties """

    data = {}
    with log_exc(log, stats):
        data = dict((prop.proptag, prop.mapiobj.Value) for prop in props)
    return pickle_dumps(data)
Ejemplo n.º 9
0
    def update(self, item, flags):
        with log_exc(self.log):
            if item.message_class != 'IPM.Note':  # TODO None?
                return

            searchkey = item.searchkey
            header = item.header(self.headertag)

            if (item.folder == item.store.junk and \
                (not header or header.upper() != 'YES')):

                fn = os.path.join(self.hamdir, searchkey + '.eml')
                if os.path.isfile(fn):
                    os.unlink(fn)

                self.log.info("Learning message as SPAM, entryid: %s",
                              item.entryid)
                self.learn(item, searchkey, True)

            elif (item.folder == item.store.inbox and \
                  self.learnham and \
                  (self.was_spam(searchkey) or (header and header.upper() == 'YES'))):

                fn = os.path.join(self.spamdir, searchkey + '.eml')
                if os.path.isfile(fn):
                    os.unlink(fn)

                self.log.info("Learning message as HAM, entryid: %s",
                              item.entryid)
                self.learn(item, searchkey, False)
Ejemplo n.º 10
0
def dump_acl(obj, user, server, stats, log):
    """ dump acl for given store or folder """

    rows = []
    with log_exc(log, stats):
        acl_table = obj.mapiobj.OpenProperty(PR_ACL_TABLE,
                                             IID_IExchangeModifyTable, 0, 0)
        table = acl_table.GetTable(0)
        for row in table.QueryRows(-1, 0):
            entryid = row[1].Value
            try:
                row[1].Value = (b'user',
                                server.sa.GetUser(entryid,
                                                  MAPI_UNICODE).Username)
            except MAPIErrorNotFound:
                try:
                    row[1].Value = (b'group',
                                    server.sa.GetGroup(entryid,
                                                       MAPI_UNICODE).Groupname)
                except MAPIErrorNotFound:
                    log.warning(
                        "skipping access control entry for unknown user/group %s",
                        _hex(entryid))
                    continue
            rows.append(row)
    return pickle_dumps(rows)
Ejemplo n.º 11
0
    def import_message(self, message, folder2):
        type_map = [
            ('IPM.Note', 'mail'),
            ('IPM.Schedule', 'mail'),
            ('IPM.Contact', 'contact'),
            ('IPM.DistList', 'distlist'),
            ('IPM.Appointment', 'appointment'),
        ]
        for (class_, type_) in type_map:
            if message.MessageClass and message.MessageClass.startswith(
                    class_):
                break
        else:
            type_ = 'item'

        with log_exc(self.log, self.stats):
            self.log.debug(
                "importing %s '%s' (NID=%d)" %
                (type_, rev_cp1252(message.Subject or ''), message.nid.nid))
            message2 = folder2.create_item(save=False)
            self.entryid_map[message.EntryId] = message2.entryid
            self.import_attachments(message, message2.mapiobj)
            self.import_recipients(message, message2.mapiobj)
            self.import_props(message, message2.mapiobj)
            self.stats['messages'] += 1
Ejemplo n.º 12
0
    def import_message(self, message, folder2):
        type_map = [
            ('IPM.Note', 'mail'),
            ('IPM.Schedule', 'mail'),
            ('IPM.Contact', 'contact'),
            ('IPM.DistList', 'distlist'),
            ('IPM.Appointment', 'appointment'),
        ]
        for (class_, type_) in type_map:
            if message.MessageClass and message.MessageClass.startswith(
                    class_):
                break
        else:
            type_ = 'item'

        with log_exc(self.log, self.stats):
            while True:
                try:
                    self.log.debug("importing %s '%s' (NID=%d)", type_,
                                   rev_cp1252(message.Subject or ''),
                                   message.nid.nid)
                    message2 = folder2.create_item(save=False)
                    self.entryid_map[message.EntryId] = message2.entryid
                    self.import_attachments(message, message2.mapiobj)
                    self.import_recipients(message, message2.mapiobj)
                    self.import_props(message, message2.mapiobj)
                    self.stats['messages'] += 1
                    break
                except MAPIErrorNetworkError as e:
                    self.log.warning(
                        "{}: Connection to server lost, retrying in 5 sec".
                        format(e))
                    time.sleep(5)
Ejemplo n.º 13
0
def dump_rules(folder, user, server, stats, log):
    """ dump rules for given folder """

    ruledata = None
    with log_exc(log, stats):
        try:
            ruledata = folder.prop(PR_RULES_DATA).value
        except (MAPIErrorNotFound, kopano.NotFoundError):
            pass
        else:
            etxml = ElementTree.fromstring(ruledata)
            for actions in etxml.findall('./item/item/actions'):
                for movecopy in actions.findall('.//moveCopy'):
                    try:
                        s = movecopy.findall('store')[0]
                        store = server.mapisession.OpenMsgStore(0, _unbase64(s.text), None, 0)
                        guid = _hex(HrGetOneProp(store, PR_STORE_RECORD_KEY).Value)
                        store = server.store(guid) # XXX guid doesn't work for multiserver?
                        if store.public:
                            s.text = 'public'
                        else:
                            s.text = store.user.name if store != user.store else ''
                        f = movecopy.findall('folder')[0]
                        path = store.folder(entryid=_hex(_unbase64(f.text))).path
                        f.text = path
                    except (MAPIErrorNotFound, kopano.NotFoundError, binascii.Error):
                        log.warning("cannot serialize rule for unknown store/folder")
            ruledata = ElementTree.tostring(etxml)
    return pickle_dumps(ruledata)
Ejemplo n.º 14
0
 def main(self):
     config, server, plugin = self.service.config, self.service.server, self.service.plugin
     state_db = os.path.join(config['index_path'], server.guid+'_state')
     while True:
         changes = 0
         with log_exc(self.log):
             (_, storeguid, folderid, reindex) = self.iqueue.get()
             store = server.store(storeguid)
             folder = kopano.Folder(store, folderid)
             path = folder.path
             if path and \
                (folder != store.outbox) and \
                (folder != store.junk or config['index_junk']) and \
                (folder != store.drafts or config['index_drafts']):
                 suggestions = config['suggestions'] and folder != store.junk
                 self.log.info('syncing folder: "%s" "%s"', store.name, path)
                 importer = FolderImporter(server.guid, config, plugin, suggestions, self.log)
                 state = db_get(state_db, folder.entryid) if not reindex else None
                 if state:
                     self.log.info('found previous folder sync state: %s', state)
                 t0 = time.time()
                 new_state = folder.sync(importer, state, log=self.log)
                 if new_state != state:
                     plugin.commit(suggestions)
                     db_put(state_db, folder.entryid, new_state)
                     self.log.info('saved folder sync state: %s', new_state)
                     changes = importer.changes + importer.deletes 
                     self.log.info('syncing folder "%s" took %.2f seconds (%d changes, %d attachments)', path, time.time()-t0, changes, importer.attachments)
         self.oqueue.put(changes)
Ejemplo n.º 15
0
    def main(self):
        """ start initial syncing if no state found. then start query process and switch to incremental syncing """

        dspam_path = self.config['dspam_path']
        os.umask(0077)
        if not os.path.exists(dspam_path):
            os.makedirs(dspam_path)
        state_db = os.path.join(dspam_path, self.server.guid+'_state')
        state = db_get(state_db, 'SERVER')
        if state:
            self.log.debug('found previous server sync state: %s' % state)
        else:
            state=self.server.state
            self.log.debug('no previous state found, starting from state: %s' % state)
            db_put(state_db, 'SERVER', state)

        #incremental syncer
        self.log.info('startup complete, monitoring mail movements')
        importer = ItemImporter(self.server, self.config, self.log)
        while True:
            with log_exc(self.log):
                new_state = self.server.sync(importer, state, log=self.log)
                if new_state != state:
                    state = new_state
                    db_put(state_db, 'SERVER', state)
                    self.log.debug('saved server sync state = %s' % state)
            time.sleep(self.config['process_delay'])
Ejemplo n.º 16
0
def dump_delegates(user, server, stats, log):
    """ dump delegate users for given user """

    usernames = []
    with log_exc(log, stats):
        usernames = [d.user.name for d in user.delegations()]

    return pickle_dumps(usernames)
Ejemplo n.º 17
0
 def main(self):
     server = self.server
     state = server.state
     catcher = Checker(self)
     with log_exc(self.log):
         while True:
             state = server.sync(catcher, state)
             time.sleep(1)
Ejemplo n.º 18
0
    def delete(self, f, flags):
        with log_exc(self.log):
            self.log.info('deleted folder: %s', f.sourcekey)

            entryid2 = db_get(self.state_path, 'folder_map_'+f.sourcekey)
            if entryid2: # TODO why this check
                folder2 = self.store2.folder(entryid=entryid2)
                self.store2.delete(folder2)
Ejemplo n.º 19
0
 def main(self):
     server = self.server
     state = server.state  # start from current state
     importer = Importer(self)
     with log_exc(self.log):
         while True:
             state = server.sync(importer, state)
             time.sleep(1)
Ejemplo n.º 20
0
    def unsubscribe_user(self, server, username):
        self.log.info('unsubscribing: %s', username)

        storea = server.user(username).store
        storeb = server.store(entryid=STORE_STORE[storea.entryid])

        # unsubscribe user from notification
        sink = USER_SINK[username]
        storea.unsubscribe(sink)

        # unregister user everywhere
        del USER_SINK[username]
        del STORE_STORE[storea.entryid]
        del USER_INFO[username]

        # set special folders
        for attr in (
            'calendar',
            'contacts',
            'wastebasket',
            'drafts',
            'inbox',
            'journal',
            'junk',
            'notes',
            'outbox',
            'sentmail',
            'tasks',
        ):
            with log_exc(self.log):
                setattr(storeb, attr, storeb.folder(getattr(storea, attr).path))

        # transfer metadata
        with log_exc(self.log): # TODO testing: store deleted in tearDown
            storeb.settings_loads(storea.settings_dumps())

            for foldera in storea.folders():
                if foldera.path:
                    folderb = storeb.get_folder(foldera.path)
                    if folderb:
                        folderb.settings_loads(foldera.settings_dumps())

        # remove state dir
        if self.state_path:
            os.system('rm -rf %s/%s' % (self.state_path, username))
            os.system('rm -rf %s/%s.lock' % (self.state_path, username))
Ejemplo n.º 21
0
 def run(self):
     while not self.stop:
         with log_exc(self.service.log):
             for info in self.data.values():
                 if int(time.time()) - info['last_update'] > self.limit*60:
                     self.service.log.info('spreed: auto unavailable')
                     self.data.pop(info['user_id'])
                     self.service.data_set(info['user_id'], 'spreed', 'unavailable', '')
         time.sleep(1)
Ejemplo n.º 22
0
    def status_event(self, msg):
        """ parse incoming status, and update presence service """

        with log_exc(self.service.log):
            if self.service.config['xmpp_user_id_strip_domain']:
                username = unicode(msg['from']).split('/')[0].split('@')[0] # strip entire domain
            else:
                username = unicode(msg['from']).split('/')[0].replace('@chat.', '@') # XXX chat?
            self.service.data_set(username, 'xmpp', STATUS_MAP[msg['type']], msg['status'])
Ejemplo n.º 23
0
    def update(self, notification):
        with log_exc(self.log):
            self.log.info('notif: %s %s', notification.object_type, notification.event_type)

            if notification.object_type == 'item':
                folder = notification.object.folder
                _queue_or_store(self.state_path, self.user, self.store.entryid, folder.entryid, self.iqueue)

            elif notification.object_type == 'folder':
                _queue_or_store(self.state_path, self.user, self.store.entryid, None, self.iqueue)
Ejemplo n.º 24
0
 def update(self, item, flags):
     with log_exc(self.log):
         self.log.info('new/updated item: %s', item.sourcekey)
         entryid2 = db_get(self.state_path, 'item_map_'+item.sourcekey)
         if entryid2:
             item2 = self.folder2.item(entryid2)
             self.folder2.delete(item2) # TODO remove from db
         item2 = item.copy(self.folder2)
         db_put(self.state_path, 'item_map_'+item.sourcekey, item2.entryid)
         update_user_info(self.state_path, self.user, 'items', 'add', 1)
Ejemplo n.º 25
0
    def import_pst(self, p, store):
        folders = list(p.folder_generator())
        root_path = rev_cp1252(folders[0].path)
        self.distlist_entryids = []
        self.entryid_map = {}

        for folder in folders:
            with log_exc(self.log, self.stats):
                import_nids = []
                if self.options.nids:
                    for nid in self.options.nids:
                        nid = int(nid)
                        parentNid = p.nbd.nbt_entries[nid].nidParent
                        if folder.nid.nid == parentNid.nid:
                            import_nids.append(nid)
                    if not import_nids:
                        continue

                path = rev_cp1252(folder.path[len(root_path)+1:]) or '(root)'
                if path == '(root)' and folder.ContentCount == 0:
                    continue

                if self.options.folders and \
                   path.lower() not in [f.lower() for f in self.options.folders]:
                    continue

                self.log.info("importing folder '%s'", path)
                if self.options.import_root:
                    path = self.options.import_root + '/' + path
                while True:
                    try:
                        folder2 = store.folder(path, create=True)
                        if self.options.clean_folders:
                            folder2.empty()
                        if folder.ContainerClass:
                            # Imported IMAP folders have IPF.Imap in Outlook
                            if folder.ContainerClass == 'IPF.Imap':
                                self.log.info("Changing container class IPF.Imap to IPF.Note for '%s'", path)
                                folder2.container_class = 'IPF.Note'
                            else:
                                folder2.container_class = folder.ContainerClass
                        break
                    except MAPIErrorNetworkError as e:
                        self.log.warning("%s: Connection to server lost, retrying in 5 sec", e)
                        time.sleep(5)

                if import_nids:
                    for nid in import_nids:
                        message = pst.Message(pst.NID(nid), p.ltp, messaging=p.messaging)
                        self.import_message(message, folder2)
                else:
                    for message in p.message_generator(folder):
                        self.import_message(message, folder2)
        self.rewrite_entryids(store)
Ejemplo n.º 26
0
    def main(self):
        config, server, options = self.service.config, self.service.server, self.service.options
        setproctitle.setproctitle('kopano-msr worker %d' % self.nr)

        while True:
            store_entryid, folder_entryid = self.iqueue.get()

            with log_exc(self.log):

                store = self.server.store(entryid=store_entryid)
                user = store.user
                store2 = self.server.store(entryid=STORE_STORE[store_entryid])
                state_path = os.path.join(config['state_path'], user.name)

                self.log.info('syncing for user %s', user.name)

                # sync folder
                if folder_entryid:
                    try:
                        folder = store.folder(entryid=folder_entryid)

                        entryid2 = db_get(state_path, 'folder_map_'+folder.sourcekey)
                        folder2 = store2.folder(entryid=entryid2)

                    except kopano.NotFoundError: # TODO further investigate
                        self.log.info('parent folder does not exist (anymore)')
                    else:
                        self.log.info('syncing folder %s (%s)', folder.sourcekey, folder.name)

                        # check previous state
                        state = db_get(state_path, 'folder_state_'+folder.sourcekey)
                        if state:
                            self.log.info('previous folder sync state: %s', state)

                        # sync and store new state
                        importer = FolderImporter(state_path, folder2, user, self.log)
                        newstate = folder.sync(importer, state)
                        db_put(state_path, 'folder_state_'+folder.sourcekey, newstate)

                # sync hierarchy
                else:
                    self.log.info('syncing hierarchy')

                    # check previous state
                    state = db_get(state_path, 'store_state_'+store_entryid)
                    if state:
                        self.log.info('found previous store sync state: %s', state)

                    # sync and store new state
                    importer = HierarchyImporter(state_path, store2, store_entryid, self.iqueue, store.subtree.sourcekey, user, self.log)
                    newstate = store.subtree.sync_hierarchy(importer, state)
                    db_put(state_path, 'store_state_'+store_entryid, newstate)

            self.oqueue.put((store_entryid, folder_entryid))
Ejemplo n.º 27
0
    def delete(self, item, flags):
        """ for a deleted item, determine store and ask indexing plugin to delete """

        with log_exc(self.log):
            self.deletes += 1
            ids = db_get(self.mapping_db, item.sourcekey)
            if ids: # when a 'new' item is deleted right away (spooler?), the 'update' function may not have been called
                storeid, folderid = ids.split()
                doc = {'serverid': self.serverid, 'storeid': storeid, 'sourcekey': item.sourcekey}
                self.log.debug('store %s: deleted document with sourcekey %s', doc['storeid'], item.sourcekey)
                self.plugin.delete(doc)
Ejemplo n.º 28
0
 def main(self):
     server = self.server
     state = server.state
     catcher = Checker(self)
     with log_exc(self.log):
         while True:
             try:
                 state = server.sync(catcher, state)
             except Exception as e:
                 self.log.info('Error: [%s]' % e)
             time.sleep(1)
Ejemplo n.º 29
0
def dump_delegates(user, server, stats, log):
    """ dump delegate users for given user """

    usernames = []
    with log_exc(log, stats):
        try:
            usernames = [d.user.name for d in user.delegations()]
        except (MAPIErrorNotFound, kopano.NotFoundError):
            log.warning("could not load delegations for %s", user.name)

    return pickle_dumps(usernames)
Ejemplo n.º 30
0
    def update(self, item, flags):

        with log_exc(self.log):

            #only process mails that are not in wastebasket
            if item.message_class != 'IPM.Note' or item.folder == item.store.wastebasket:
                pass
            #is the document is processed not processed by the spamfilter at all?
            elif item.header(self.config['header_result'])==None:
                log_str="folder '%s', subject '%s': " % (item.folder.name, item.subject)
                self.log.debug(log_str+"ignored, no spam-headers found")
            else:

                detected_as_spam = ( item.header(self.config['header_result'])==self.config['header_result_spam'] )
                spam_user = item.header(self.config['header_user'])
                spam_id   = item.header(self.config['header_id'])
                retrained = ( db_get(self.retrained_db, spam_user+"-"+spam_id) == "1" )
                in_spamfolder = ( item.folder == item.store.junk )

                log_str="spam_user: %s, folder: '%s', subject: '%s', spam_id: %s, detected_as_spam: %s, retrained: %s, in_spamfolder: %s, CONCLUSION: " % ( spam_user, item.folder.name, item.subject, spam_id, detected_as_spam, retrained, in_spamfolder)

                if detected_as_spam:
                    if in_spamfolder:
                        if retrained:
                             self.log.info(log_str+"moved back to spam again: undo training as innocent")
                             self.train(spam_user, spam_id, "spam", "undo")
                        else:
                             self.log.debug(log_str+"spam already in spam folder, no action needed")
                    #in non-spam folder
                    else:
                        if not retrained:
                             self.log.info(log_str+"moved from spam: retraining as innocent")
                             self.train(spam_user, spam_id, "innocent", "")
                        else:
                             self.log.debug(log_str+"moved from spam, already retrained")

                #not detected as spam
                else:
                    if in_spamfolder:
                        if not retrained:
                             self.log.info(log_str+"moved to spam: retraining as spam")
                             self.train(spam_user, spam_id, "spam", "")
                        else:
                             self.log.debug(log_str+"moved to spam: already retrained")

                    #in non-spam folder
                    else:
                        if retrained:
                             self.log.info(log_str+"moved from spam again: undo training as spam")
                             self.train(spam_user, spam_id, "innocent", "undo")
                        else:
                             self.log.debug(log_str+"normal mail already in normal folder: no action needed")
Ejemplo n.º 31
0
    def main(self):
        config, server, options = self.service.config, self.service.server, self.service.options
        setproctitle.setproctitle('kopano-msr control')

        def response(conn, msg):
            self.log.info('Response: %s', msg)
            conn.sendall((msg + '\r\n').encode())

        s = kopano.server_socket(config['server_bind_name'],
                                 ssl_key=config['ssl_private_key_file'],
                                 ssl_cert=config['ssl_certificate_file'],
                                 log=self.log)

        while True:
            with log_exc(self.log):
                try:
                    conn = None
                    conn, _ = s.accept()
                    fields_terms = []
                    for data in conn.makefile():
                        self.log.info('CMD: %s', data.strip())
                        data = data.split()

                        if data[0] == 'ADD':
                            user, target_user = data[1:]
                            self.subscribe.put((user, target_user, True))
                            response(conn, 'OK:')
                            break

                        elif data[0] == 'REMOVE':
                            user = data[1]
                            self.subscribe.put((user, None, False))
                            response(conn, 'OK:')
                            break

                        elif data[0] == 'LIST':
                            response(
                                conn, 'OK: ' + ' '.join(
                                    user + ':' + target
                                    for user, target in USER_USER.items()))
                            break

                        else:
                            response(conn, 'ERROR')
                            break

                except Exception:
                    response(conn, 'ERROR')
                    raise
                finally:
                    if conn:
                        conn.close()
Ejemplo n.º 32
0
def load_delegates(user, server, data, stats, log):
    """ load delegate users for given user """

    with log_exc(log, stats):
        users = []
        for name in pickle_loads(data):
            try:
                users.append(server.user(name))
            except kopano.NotFoundError:
                log.warning("skipping delegation for unknown user '%s'", name)

        user.delete(user.delegations()) # XXX not in combination with --import-root, -f?
        for user2 in users:
            user.delegation(user2, create=True)
Ejemplo n.º 33
0
    def delete(self, item, flags):

        with log_exc(self.log):
            self.log.debug('deleted document with sourcekey %s' % ( item.sourcekey ))