def db_write_lock(namespace_id): """ Protect updating this namespace's Inbox datastore data. Note that you should also use this to wrap any code that _figures out_ what to update the datastore with, because outside the lock you can't guarantee no one is updating the data behind your back. """ return Lock(_db_write_lockfile_name(namespace_id), block=True)
def _get_lock_object(cls, account_id, lock_for=dict()): """ Make sure we only create one lock per account per process. (Default args are initialized at import time, so `lock_for` acts as a module-level memory cache.) """ return lock_for.setdefault( account_id, Lock(cls._sync_lockfile_name(account_id), block=False))
from inbox.util.concurrency import retry_with_logging from inbox.log import get_logger, log_uncaught_errors logger = get_logger() from inbox.models.session import session_scope from inbox.models import ActionLog, Namespace from inbox.sqlalchemy_ext.util import safer_yield_per from inbox.util.file import Lock from inbox.actions import (mark_read, mark_unread, archive, unarchive, star, unstar, save_draft, delete_draft, mark_spam, unmark_spam, mark_trash, unmark_trash, send_draft, send_directly) # Global lock to ensure that only one instance of the syncback service is # running at once. Otherwise different instances might execute the same action # twice. syncback_lock = Lock('/var/lock/inbox_syncback/global.lock', block=False) ACTION_FUNCTION_MAP = { 'archive': archive, 'unarchive': unarchive, 'mark_read': mark_read, 'mark_unread': mark_unread, 'star': star, 'unstar': unstar, 'mark_spam': mark_spam, 'unmark_spam': unmark_spam, 'mark_trash': mark_trash, 'unmark_trash': unmark_trash, 'send_draft': send_draft, 'save_draft': save_draft, 'delete_draft': delete_draft,
def lock(block, filename=None): if filename is None: handle, filename = tempfile.mkstemp() return Lock(filename, block=block)