Пример #1
0
def user_console(user_email_address):
    with session_scope() as db_session:
        account = db_session.query(Account).filter_by(
            email_address=user_email_address).one()

        crispin_client = new_crispin(account.id, account.provider,
                                     conn_pool_size=1)
        with crispin_client.pool.get() as c:
            crispin_client.select_folder(crispin_client.folder_names(c)['all'],
                                         uidvalidity_cb(db_session, account),
                                         c)

        server_uids = crispin_client.all_uids(c)

        banner = """
        You can access the crispin instance with the 'crispin_client' variable.
        You can access the IMAPClient connection with the 'c' variable.
        AllMail message UIDs are in 'server_uids'.
        You can refresh the session with 'refresh_crispin()'.

        IMAPClient docs are at:

            http://imapclient.readthedocs.org/en/latest/#imapclient-class-reference
        """

        IPython.embed(banner1=banner)
Пример #2
0
    def __init__(self, account_id, folder_name, email_address, provider, shared_state, state_handlers):
        self.folder_name = folder_name
        self.shared_state = shared_state
        self.state_handlers = state_handlers
        self.state = None

        self.log = get_logger(account_id, "sync")
        self.crispin_client = new_crispin(account_id, provider)

        Greenlet.__init__(self)
Пример #3
0
def _syncback_action(fn, imapaccount_id, folder_name):
    """ `folder_name` is an Inbox folder name, not a Gmail folder name. """
    with session_scope() as db_session:
        account = db_session.query(ImapAccount).join(Namespace).filter_by(
                id=imapaccount_id).one()
        crispin_client = new_crispin(account.id, account.provider,
                conn_pool_size=1, readonly=False)
        with crispin_client.pool.get() as c:
            crispin_client.select_folder(
                    _translate_folder_name(folder_name, crispin_client, c),
                    uidvalidity_cb, c)
            fn(account, db_session, crispin_client, c)
Пример #4
0
    def deco(crispin_client, db_session, log, folder_name, shared_state):
        while True:
            try:
                return fn(crispin_client, db_session, log, folder_name, shared_state)
            except IMAPClient.Error as e:
                log.error(e)
                log.debug("Creating new crispin for the job...")
                from inbox.server.crispin import new_crispin

                crispin_client = new_crispin(
                    crispin_client.account_id,
                    crispin_client.PROVIDER,
                    conn_pool_size=crispin_client.conn_pool_size,
                    readonly=crispin_client.readonly,
                )
                continue
Пример #5
0
def check_new_uids(account_id, provider, folder_name, log, uid_download_stack, poll_frequency, syncmanager_lock):
    """ Check for new UIDs and add them to the download stack.

    We do this by comparing local UID lists to remote UID lists, maintaining
    the invariant that (stack uids)+(local uids) == (remote uids).

    We also remove local messages that have disappeared from the remote, since
    it's totally probable that users will be archiving mail as the initial
    sync goes on.

    We grab a new IMAP connection from the pool for this to isolate its
    actions from whatever the main greenlet may be doing.

    Runs until killed. (Intended to be run in a greenlet.)
    """
    log.info("Spinning up new UID-check poller for {}".format(folder_name))
    # can't mix and match crispin clients when playing with different folders
    crispin_client = new_crispin(account_id, provider, conn_pool_size=1)
    with crispin_client.pool.get() as c:
        with session_scope() as db_session:
            crispin_client.select_folder(folder_name, uidvalidity_cb(db_session, crispin_client.account_id), c)
        while True:
            remote_uids = set(crispin_client.all_uids(c))
            # We lock this section to make sure no messages are being
            # downloaded while we make sure the queue is in a good state.
            with syncmanager_lock:
                with session_scope() as db_session:
                    local_uids = set(account.all_uids(account_id, db_session, folder_name))
                    stack_uids = set(uid_download_stack.queue)
                    local_with_pending_uids = local_uids | stack_uids
                    deleted_uids = remove_deleted_uids(
                        account_id, db_session, log, folder_name, local_uids, remote_uids, syncmanager_lock, c
                    )
                    # XXX This double-grabs syncmanager_lock, does that cause
                    # a deadlock?
                    log.info("Removed {} deleted UIDs from {}".format(len(deleted_uids), folder_name))

                # filter out messages that have disappeared on the remote side
                new_uid_download_stack = {u for u in uid_download_stack.queue if u in remote_uids}

                # add in any new uids from the remote
                for uid in remote_uids:
                    if uid not in local_with_pending_uids:
                        new_uid_download_stack.add(uid)

                uid_download_stack.queue = sorted(new_uid_download_stack, key=int)
            sleep(poll_frequency)
Пример #6
0
    def sync(self):
        """ Start per-folder syncs. Only have one per-folder sync in the
            'initial' state at a time.
        """
        with session_scope() as db_session:
            saved_states = dict(
                (saved_state.folder_name, saved_state.state)
                for saved_state in db_session.query(FolderSync).filter_by(account_id=self.account_id)
            )
            crispin_client = new_crispin(self.account_id, self.provider)
            with crispin_client.pool.get() as c:
                sync_folders = crispin_client.sync_folders(c)
                imapaccount = db_session.query(ImapAccount).get(self.account_id)
                folder_names = crispin_client.folder_names(c)
                save_folder_names(self.log, imapaccount, folder_names, db_session)
        for folder in sync_folders:
            if saved_states.get(folder) != "finish":
                self.log.info("Initializing folder sync for {0}".format(folder))
                thread = ImapFolderSyncMonitor(
                    self.account_id,
                    folder,
                    self.email_address,
                    self.provider,
                    self.shared_state,
                    self.folder_state_handlers,
                )
                thread.start()
                self.folder_monitors.add(thread)
                while not self._thread_polling(thread) and not self._thread_finished(thread):
                    sleep(self.heartbeat)
                # Allow individual folder sync monitors to shut themselves down
                # after completing the initial sync.
                if self._thread_finished(thread):
                    self.log.info("Folder sync for {} is done.".format(folder))
                    # NOTE: Greenlet is automatically removed from the group
                    # after finishing.

        self.folder_monitors.join()
Пример #7
0
Файл: imap.py Проект: cenk/inbox
    def sync(self):
        """ Start per-folder syncs. Only have one per-folder sync in the
            'initial' state at a time.
        """
        with session_scope() as db_session:
            saved_states = dict((saved_state.folder_name, saved_state.state) \
                    for saved_state in db_session.query(FolderSync).filter_by(
                    imapaccount_id=self.account_id))
            crispin_client = new_crispin(self.account_id, self.provider)
            with crispin_client.pool.get() as c:
                sync_folders = crispin_client.sync_folders(c)
                imapaccount = db_session.query(ImapAccount).get(self.account_id)
                folder_names = crispin_client.folder_names(c)
                save_folder_names(self.log, imapaccount, folder_names,
                        db_session)
        for folder in sync_folders:
            if saved_states.get(folder) != 'finish':
                self.log.info("Initializing folder sync for {0}".format(folder))
                thread = ImapFolderSyncMonitor(self.account_id, folder,
                        self.email_address, self.provider, self.shared_state,
                        self.folder_state_handlers)
                thread.start()
                self.folder_monitors.append(thread)
                while not self._thread_polling(thread) and \
                        not self._thread_finished(thread):
                    sleep(self.heartbeat)
                # Allow individual folder sync monitors to shut themselves down
                # after completing the initial sync.
                if self._thread_finished(thread):
                    self.log.info("Folder sync for {0} is done.".format(folder))
                    self.folder_monitors.pop()

        # Just hang out. We don't want to block, but we don't want to return
        # either, since that will let the threads go out of scope.
        while True:
            sleep(self.heartbeat)
Пример #8
0
def crispin_client(account_id, account_provider):
    from inbox.server.crispin import new_crispin
    return new_crispin(account_id, account_provider, conn_pool_size=1)
Пример #9
0
def download_queued_threads(crispin_client, db_session, log, folder_name,
                            message_download_stack, status_cb,
                            syncmanager_lock, c):
    """ Download threads until `message_download_stack` is empty.

    UIDs and g_metadata that come out of `message_download_stack` are for
    the _folder that threads are being expanded in_.

    Threads are downloaded in the order they come out of the stack, which
    _ought_ to be putting newest threads at the top. Messages are
    downloaded newest-to-oldest in thread. (Threads are expanded to all
    messages in the email archive that belong to the threads corresponding
    to the given uids.)
    """
    num_total_messages = message_download_stack.qsize()
    log.info("{} messages found initially (unsorted by thread)"
             .format(num_total_messages))

    # We still need the original crispin connection for progress reporting,
    # so the easiest thing to do here with the current pooling setup is to
    # create a new crispin client for querying All Mail.
    all_mail_crispin_client = new_crispin(crispin_client.account_id,
                                          crispin_client.PROVIDER,
                                          conn_pool_size=1)
    log.info("Expanding threads and downloading messages.")

    with all_mail_crispin_client.pool.get() as all_mail_c:
        all_mail_crispin_client.select_folder(
            crispin_client.folder_names(c)['all'],
            uidvalidity_cb(db_session, crispin_client.account_id), all_mail_c)

        # Since we do thread expansion, for any given thread, even if we
        # already have the UID in the given GMessage downloaded, we may not
        # have _every_ message in the thread. We have to expand it and make
        # sure we have all messages.
        acc = db_session.query(ImapAccount).join(Namespace).filter_by(
            id=crispin_client.account_id).one()
        while not message_download_stack.empty():
            message = message_download_stack.get_nowait()
            # Don't try to re-download any messages that are in the same
            # thread. (Putting this _before_ the download to guarantee no
            # context switches happen in the meantime; we _should_ re-download
            # if another message arrives on the thread.)
            processed_msgs = [m for m in message_download_stack.queue if
                              m.g_metadata.thrid ==
                              message.g_metadata.thrid]
            processed_msgs.append(message)
            message_download_stack.queue = [
                m for m in message_download_stack.queue if m.g_metadata.thrid
                != message.g_metadata.thrid]
            thread_uids = all_mail_crispin_client.expand_threads(
                [message.g_metadata.thrid], all_mail_c)
            thread_g_metadata = all_mail_crispin_client.g_metadata(
                thread_uids, all_mail_c)
            download_thread(all_mail_crispin_client, db_session, log,
                            syncmanager_lock, thread_g_metadata,
                            message.g_metadata.thrid, thread_uids, all_mail_c)
            # Since we download msgs from All Mail, we need to separately make
            # sure we have ImapUids recorded for this folder (used in progress
            # tracking and delete detection).
            for msg in processed_msgs:
                add_new_imapuid(db_session, msg, folder_name, acc)
            report_progress(crispin_client, db_session, log, folder_name,
                            message_download_stack.qsize(), status_cb, c)
        log.info("Message download queue emptied")
Пример #10
0
def check_new_g_thrids(account_id, provider, folder_name, log,
                       message_download_stack, poll_frequency,
                       syncmanager_lock):
    """ Check for new X-GM-THRIDs and add them to the download stack.

    We do this by comparing local UID lists to remote UID lists, maintaining
    the invariant that (stack uids)+(local uids) == (remote uids).

    We also remove local messages that have disappeared from the remote, since
    it's totally probable that users will be archiving mail as the initial
    sync goes on.

    We grab a new IMAP connection from the pool for this to isolate its
    actions from whatever the main greenlet may be doing.

    Runs until killed. (Intended to be run in a greenlet.)
    """
    # can't mix and match crispin clients when playing with different folders
    crispin_client = new_crispin(account_id, provider, conn_pool_size=1)
    with crispin_client.pool.get() as c:
        with session_scope() as db_session:
            crispin_client.select_folder(folder_name,
                                         uidvalidity_cb(
                                             db_session,
                                             crispin_client.account_id), c)
        while True:
            log.info("Checking for new/deleted messages during initial sync.")
            remote_uids = set(crispin_client.all_uids(c))
            # We lock this section to make sure no messages are being modified
            # in the database while we make sure the queue is in a good state.
            with syncmanager_lock:
                with session_scope() as db_session:
                    local_uids = set(account.all_uids(account_id, db_session,
                                                      folder_name))
                    stack_uids = {gm.uid for gm in
                                  message_download_stack.queue}
                    local_with_pending_uids = local_uids | stack_uids
                    deleted_uids = remove_deleted_uids(
                        account_id, db_session, log, folder_name,
                        local_uids, remote_uids, syncmanager_lock, c)
                    # NOTE: This double-grabs syncmanager_lock, but that
                    # seems to work just fine in the same greenlet.
                    log.info("Removed {} deleted UIDs from {}".format(
                        len(deleted_uids), folder_name))

                # filter out messages that have disappeared on the remote side
                new_message_download_stack = [gm for gm in
                                              message_download_stack.queue
                                              if gm.uid in remote_uids]

                # add in any new uids from the remote
                new_uids = [uid for uid in remote_uids if uid not in
                            local_with_pending_uids]
                flags = crispin_client.flags(new_uids, c)
                g_metadata = crispin_client.g_metadata(new_uids, c)
                for new_uid in new_uids:
                    # could have disappeared from the folder in the meantime
                    if new_uid in flags and new_uid in g_metadata:
                        new_message_download_stack.append(
                            GMessage(new_uid, g_metadata[new_uid],
                                     flags[new_uid].flags,
                                     flags[new_uid].labels))

                message_download_stack.queue = sorted(
                    new_message_download_stack, key=lambda m: int(m.uid))

            log.info("Idling on {0} with {1} timeout".format(
                folder_name, poll_frequency))
            c.idle()
            c.idle_check(timeout=poll_frequency)
            c.idle_done()
            log.info("IDLE on {0} detected changes or timeout reached"
                     .format(folder_name))