コード例 #1
0
ファイル: redisnm.py プロジェクト: isabella232/beaker-1
class RedisSynchronizer(SynchronizerImpl):
    """Synchronizer based on redis.

    Provided ``url`` can be both a redis connection string or
    an already existing StrictRedis instance.

    This Synchronizer only supports 1 reader or 1 writer at time, not concurrent readers.
    """

    # If a cache entry generation function can take a lot,
    # but 15 minutes is more than a reasonable time.
    LOCK_EXPIRATION = 900
    MACHINE_ID = machine_identifier()

    def __init__(self, identifier, url):
        super(RedisSynchronizer, self).__init__()
        self.identifier = "beaker_lock:%s" % identifier
        if isinstance(url, string_type):
            self.client = RedisNamespaceManager.clients.get(
                url, redis.StrictRedis.from_url, url)
        else:
            self.client = url

    def _get_owner_id(self):
        return ("%s-%s-%s" %
                (self.MACHINE_ID, os.getpid(),
                 threading.current_thread().ident)).encode("ascii")

    def do_release_read_lock(self):
        self.do_release_write_lock()

    def do_acquire_read_lock(self, wait):
        self.do_acquire_write_lock(wait)

    def do_release_write_lock(self):
        identifier = self.identifier
        owner_id = self._get_owner_id()

        def execute_release(pipe):
            lock_value = pipe.get(identifier)
            if lock_value == owner_id:
                pipe.delete(identifier)

        self.client.transaction(execute_release, identifier)

    def do_acquire_write_lock(self, wait):
        owner_id = self._get_owner_id()
        while True:
            if self.client.setnx(self.identifier, owner_id):
                self.client.pexpire(self.identifier,
                                    self.LOCK_EXPIRATION * 1000)
                return True

            if not wait:
                return False
            time.sleep(0.2)
コード例 #2
0
ファイル: mongodb.py プロジェクト: Madhuri8090/my-first-blog1
class MongoSynchronizer(SynchronizerImpl):
    """Provides a Writer/Reader lock based on MongoDB.

    Provided ``url`` can be both a mongodb connection string or
    an already existing MongoClient instance.

    The data will be stored into ``beaker_locks`` collection of the
    *default database*, so make sure your connection string or
    MongoClient point to a default database.

    Locks are identified by local machine, PID and threadid, so
    are suitable for use in both local and distributed environments.
    """
    # If a cache entry generation function can take a lot,
    # but 15 minutes is more than a reasonable time.
    LOCK_EXPIRATION = 900
    MACHINE_ID = machine_identifier()

    def __init__(self, identifier, url):
        super(MongoSynchronizer, self).__init__()
        self.identifier = identifier
        if isinstance(url, string_type):
            self.client = MongoNamespaceManager.clients.get(url, pymongo.MongoClient, url)
        else:
            self.client = url
        self.db = self.client.get_default_database()

    def _clear_expired_locks(self):
        now = datetime.datetime.utcnow()
        expired = now - datetime.timedelta(seconds=self.LOCK_EXPIRATION)
        self.db.beaker_locks.delete_many({'_id': self.identifier, 'timestamp': {'$lte': expired}})
        return now

    def _get_owner_id(self):
        return '%s-%s-%s' % (self.MACHINE_ID, os.getpid(), threading.current_thread().ident)

    def do_release_read_lock(self):
        owner_id = self._get_owner_id()
        self.db.beaker_locks.update_one({'_id': self.identifier, 'readers': owner_id},
                                        {'$pull': {'readers': owner_id}})

    def do_acquire_read_lock(self, wait):
        now = self._clear_expired_locks()
        owner_id = self._get_owner_id()
        while True:
            try:
                self.db.beaker_locks.update_one({'_id': self.identifier, 'owner': None},
                                                {'$set': {'timestamp': now},
                                                 '$push': {'readers': owner_id}},
                                                upsert=True)
                return True
            except pymongo.errors.DuplicateKeyError:
                if not wait:
                    return False
                time.sleep(0.2)

    def do_release_write_lock(self):
        self.db.beaker_locks.delete_one({'_id': self.identifier, 'owner': self._get_owner_id()})

    def do_acquire_write_lock(self, wait):
        now = self._clear_expired_locks()
        owner_id = self._get_owner_id()
        while True:
            try:
                self.db.beaker_locks.update_one({'_id': self.identifier, 'owner': None,
                                                 'readers': []},
                                                {'$set': {'owner': owner_id,
                                                          'timestamp': now}},
                                                upsert=True)
                return True
            except pymongo.errors.DuplicateKeyError:
                if not wait:
                    return False
                time.sleep(0.2)
コード例 #3
0
class MongoSynchronizer(SynchronizerImpl):
    # If a cache entry generation function can take a lot,
    # but 15 minutes is more than a reasonable time.
    LOCK_EXPIRATION = 900
    MACHINE_ID = machine_identifier()

    def __init__(self, identifier, url):
        super(MongoSynchronizer, self).__init__()
        self.identifier = identifier
        if isinstance(url, string_type):
            self.client = MongoNamespaceManager.clients.get(
                url, pymongo.MongoClient, url)
        else:
            self.client = url
        self.db = self.client.get_default_database()

    def _clear_expired_locks(self):
        now = datetime.datetime.utcnow()
        expired = now - datetime.timedelta(seconds=self.LOCK_EXPIRATION)
        self.db.beaker_locks.delete_many({
            '_id': self.identifier,
            'timestamp': {
                '$lte': expired
            }
        })
        return now

    def _get_owner_id(self):
        return '%s-%s-%s' % (self.MACHINE_ID, os.getpid(),
                             threading.current_thread().ident)

    def do_release_read_lock(self):
        self.db.beaker_locks.update_one(
            {
                '_id': self.identifier,
                'readers': self._get_owner_id()
            }, {'$pull': {
                'readers': self._get_owner_id()
            }})

    def do_acquire_read_lock(self, wait):
        now = self._clear_expired_locks()
        while True:
            try:
                self.db.beaker_locks.update_one(
                    {
                        '_id': self.identifier,
                        'owner': None
                    }, {
                        '$set': {
                            'timestamp': now
                        },
                        '$push': {
                            'readers': self._get_owner_id()
                        }
                    },
                    upsert=True)
                return True
            except pymongo.errors.DuplicateKeyError:
                if not wait:
                    return False
                time.sleep(0.2)

    def do_release_write_lock(self):
        self.db.beaker_locks.delete_one({
            '_id': self.identifier,
            'owner': self._get_owner_id()
        })

    def do_acquire_write_lock(self, wait):
        now = self._clear_expired_locks()
        while True:
            try:
                self.db.beaker_locks.update_one(
                    {
                        '_id': self.identifier,
                        'owner': None,
                        'readers': []
                    }, {
                        '$set': {
                            'owner': self._get_owner_id(),
                            'timestamp': now
                        }
                    },
                    upsert=True)
                return True
            except pymongo.errors.DuplicateKeyError:
                if not wait:
                    return False
                time.sleep(0.2)