def lightning_order_with_redlock() -> None:
    """
    Lightning order with Redlock algorithm.
    :return: None
    """
    r = redis.Redis()

    dlm = Redlock([{
        'host': 'localhost',
        'port': 6379,
        'db': 0
    }, ])  # Stands for "distributed lock manager"

    lock = None
    try:
        # Try to acquire the lock
        lock = dlm.lock(LOCK_KEY, 30000)  # If not acquiring the lock, block here
        # Business codes
        remaining = int(r.get('stock'))
        if remaining > 0:
            r.set('stock', str(remaining - 1))
            print(f'Deducted stock, {remaining - 1} remaining')
        else:
            print('Failed to deduct stock')
    except MultipleRedlockException as e:
        print(e)
    finally:
        # Release the lock
        dlm.unlock(lock)
Exemplo n.º 2
0
class RedisLock(object):

    def __init__(self, redis_host='127.0.0.1', redis_port=6379):
        self._lock_manager = Redlock([{
            "host": redis_host,
            "port": redis_port,
            "db": 0,
        }])


    @contextmanager
    def lock(self, lock_id, timeout=10000):
        '''
        Context manager to acquire global distributed lock across all applications
        using the same redis.

        Uses implementation of redis distlock
        @see https://redis.io/topics/distlock

        @lock_id string  uniq id of the lock
        @timeout integer  timeout after which the lock will be released (in milliseconds)
        '''
        lock = self._lock_manager.lock(lock_id, timeout)
        if lock is False:
            raise LockFailed()
        try:
            yield lock
        finally:
            self._lock_manager.unlock(lock)
Exemplo n.º 3
0
class RedisClient(object):
    def __init__(self, host, port=6379, db=0):
        self.redis = redis.StrictRedis(host=host, port=port, db=db)
        self.redlock = Redlock([self.redis])
    
    def __getattr__(self, name):
        method = getattr(self.redis, name)
        return method

    def acquireDLock(self, resource, ttl=300):
        """
        获取分布式锁。
        ttl: 锁的过期时间,单位是秒,默认300秒。
        """
        resource = 'lock_' + resource
        try:
            return self.redlock.lock(resource, ttl * 1000)
        except MultipleRedlockException as e:
            logging.error('Error acquiring dlock: %s', e)
            return False

    def releaseDLock(self, lock):
        """
        释放分布式锁
        """
        try:
            self.redlock.unlock(lock)
            return True
        except MultipleRedlockException as e:
            logging.error('Error releasing dlock: %s', e)
            return False
Exemplo n.º 4
0
def dlock(key, ttl, **kwargs):
    """
    分布式锁
    :param key: 分布式锁ID
    :param ttl: 分布式锁生存时间
    :param kwargs: 可选参数字典
    :return: None
    """
    resource_servers = [{
        'host': REDIS_HOST,
        'port': REDIS_PORT,
        'db': REDIS_DB,
        'password': REDIS_PASSWORD
    }]
    dl = Redlock(resource_servers)
    # 获取锁
    lock = dl.lock(key, ttl)
    # if ret is False:
    #     detail = u'acquire lock[%s] error' % key
    #     raise AcquireLockError(detail)

    yield lock

    # 释放锁
    if isinstance(lock, Lock):
        dl.unlock(lock)
Exemplo n.º 5
0
class RedisStore(AbstractStore):
    """Redis-based backend for deployments with replicas > 1"""
    def __init__(self, url: str):
        logger.info("Connecting to Redis on {}..".format(url))
        self._redis = redis.StrictRedis.from_url(url)
        self._redlock = Redlock([url])

    def set(self, key, value):
        self._redis.set(key, json.dumps(value, separators=(",", ":")))

    def get(self, key):
        value = self._redis.get(key)
        if value:
            return json.loads(value.decode("utf-8"))

    def acquire_lock(self):
        return self._redlock.lock("update", 10000)

    def release_lock(self, lock):
        self._redlock.unlock(lock)

    def publish(self, event_type, event_data):
        self._redis.publish(
            "default",
            "{}:{}".format(event_type,
                           json.dumps(event_data, separators=(",", ":"))),
        )

    def listen(self):
        p = self._redis.pubsub()
        p.subscribe("default")
        for message in p.listen():
            if message["type"] == "message":
                event_type, data = message["data"].decode("utf-8").split(
                    ":", 1)
                yield (event_type, json.loads(data))

    def create_screen_token(self):
        """Generate a new screen token and store it in Redis"""
        data = generate_token_data()
        token = data["token"]
        self._redis.set("screen-tokens:{}".format(token), json.dumps(data))
        return token

    def redeem_screen_token(self, token: str, remote_addr: str):
        """Validate the given token and bind it to the IP"""
        redis_key = "screen-tokens:{}".format(token)
        data = self._redis.get(redis_key)
        if not data:
            raise ValueError("Invalid token")
        data = json.loads(data.decode("utf-8"))
        data = check_token(token, remote_addr, data)
        self._redis.set(redis_key, json.dumps(data))
Exemplo n.º 6
0
class RedisStore(AbstractStore):
    '''Redis-based backend for deployments with replicas > 1'''
    def __init__(self, url: str):
        logger.info('Connecting to Redis on {}..'.format(url))
        self._redis = redis.StrictRedis.from_url(url)
        self._redlock = Redlock([url])

    def set(self, key, value):
        self._redis.set(key, json.dumps(value, separators=(',', ':')))

    def get(self, key):
        value = self._redis.get(key)
        if value:
            return json.loads(value.decode('utf-8'))

    def acquire_lock(self):
        return self._redlock.lock('update', 10000)

    def release_lock(self, lock):
        self._redlock.unlock(lock)

    def publish(self, event_type, event_data):
        self._redis.publish(
            'default',
            '{}:{}'.format(event_type,
                           json.dumps(event_data, separators=(',', ':'))))

    def listen(self):
        p = self._redis.pubsub()
        p.subscribe('default')
        for message in p.listen():
            if message['type'] == 'message':
                event_type, data = message['data'].decode('utf-8').split(
                    ':', 1)
                yield (event_type, json.loads(data))

    def create_screen_token(self):
        '''Generate a new screen token and store it in Redis'''
        data = generate_token_data()
        token = data['token']
        self._redis.set('screen-tokens:{}'.format(token), json.dumps(data))
        return token

    def redeem_screen_token(self, token: str, remote_addr: str):
        '''Validate the given token and bind it to the IP'''
        redis_key = 'screen-tokens:{}'.format(token)
        data = self._redis.get(redis_key)
        if not data:
            raise ValueError('Invalid token')
        data = json.loads(data.decode('utf-8'))
        data = check_token(token, remote_addr, data)
        self._redis.set(redis_key, json.dumps(data))
Exemplo n.º 7
0
class TestRedlock(APITestCase):
    def setUp(self):
        self.redlock = Redlock([{"host": "localhost"}])

    def test_lock(self):
        lock = self.redlock.lock("pants", 100)
        print(lock)
        self.assertEqual(lock.resource, "pants")
        self.redlock.unlock(lock)

        lock = self.redlock.lock("pants", 10)
        print(lock)
        self.redlock.unlock(lock)
Exemplo n.º 8
0
class RedisStore(AbstractStore):
    '''Redis-based backend for deployments with replicas > 1'''

    def __init__(self, url: str):
        logger.info('Connecting to Redis on {}..'.format(url))
        self._redis = redis.StrictRedis.from_url(url)
        self._redlock = Redlock([url])

    def set(self, key, value):
        self._redis.set(key, json.dumps(value, separators=(',', ':')))

    def get(self, key):
        value = self._redis.get(key)
        if value:
            return json.loads(value.decode('utf-8'))

    def acquire_lock(self):
        return self._redlock.lock('update', 10000)

    def release_lock(self, lock):
        self._redlock.unlock(lock)

    def publish(self, event_type, event_data):
        self._redis.publish('default', '{}:{}'.format(event_type, json.dumps(event_data, separators=(',', ':'))))

    def listen(self):
        p = self._redis.pubsub()
        p.subscribe('default')
        for message in p.listen():
            if message['type'] == 'message':
                event_type, data = message['data'].decode('utf-8').split(':', 1)
                yield (event_type, json.loads(data))

    def create_screen_token(self):
        '''Generate a new screen token and store it in Redis'''
        data = generate_token_data()
        token = data['token']
        self._redis.set('screen-tokens:{}'.format(token), json.dumps(data))
        return token

    def redeem_screen_token(self, token: str, remote_addr: str):
        '''Validate the given token and bind it to the IP'''
        redis_key = 'screen-tokens:{}'.format(token)
        data = self._redis.get(redis_key)
        if not data:
            raise ValueError('Invalid token')
        data = json.loads(data.decode('utf-8'))
        data = check_token(token, remote_addr, data)
        self._redis.set(redis_key, json.dumps(data))
Exemplo n.º 9
0
class TestRedlock(unittest.TestCase):

    def setUp(self):
        self.redlock = Redlock([{"host": "localhost"}])

    def test_lock(self):
        lock = self.redlock.lock("pants", 100)
        self.assertEqual(lock.resource, "pants")
        self.redlock.unlock(lock)
        lock = self.redlock.lock("pants", 10)
        self.redlock.unlock(lock)

    def test_blocked(self):
        lock = self.redlock.lock("pants", 1000)
        bad = self.redlock.lock("pants", 10)
        self.assertFalse(bad)
        self.redlock.unlock(lock)

    def test_bad_connection_info(self):
        with self.assertRaises(Warning):
            Redlock([{"cat": "hog"}])

    def test_py3_compatible_encoding(self):
        lock = self.redlock.lock("pants", 1000)
        key = self.redlock.servers[0].get("pants")
        self.assertEquals(lock.key, key)
Exemplo n.º 10
0
class TestRedlock(unittest.TestCase):
    def setUp(self):
        self.redlock = Redlock([{"host": "localhost"}])

    def test_lock(self):
        lock = self.redlock.lock("pants", 100)
        self.assertEqual(lock.resource, "pants")
        self.redlock.unlock(lock)
        lock = self.redlock.lock("pants", 10)
        self.redlock.unlock(lock)

    def test_blocked(self):
        lock = self.redlock.lock("pants", 1000)
        bad = self.redlock.lock("pants", 10)
        self.assertFalse(bad)
        self.redlock.unlock(lock)

    def test_bad_connection_info(self):
        with self.assertRaises(Warning):
            Redlock([{"cat": "hog"}])

    def test_py3_compatible_encoding(self):
        lock = self.redlock.lock("pants", 1000)
        key = self.redlock.servers[0].get("pants")
        self.assertEquals(lock.key, key)
Exemplo n.º 11
0
    def id(self):
        deviceIP = self.client.host
        self.session_id = self.get_redis_session(deviceIP)
        if not self.check_session_invalid(self.session_id, deviceIP):
            local_ip = self.get_local_ip()
            redisobj = RedisConfig()
            redis_server = redisobj.get_redis_servers(True)
            redis_conf_ls = [{
                "host": s["host"],
                "port": s["port"],
                "db": s["dbno"]
            } for s in redis_server]
            lock_mgmt = Redlock(redis_conf_ls)
            device_lock = lock_mgmt.lock(
                local_ip + "_" + deviceIP + '_' + self.username +
                '_device_lock_calabash', 30 * 1000)
            tmp_count = 0
            while isinstance(device_lock,
                             bool) and not device_lock and tmp_count < 1000:
                tmp_count += 1
                time.sleep(0.5)
                self.session_id = self.get_redis_session(deviceIP)
                if self.check_session_invalid(self.session_id, deviceIP):
                    return self.session_id

                device_lock = lock_mgmt.lock(
                    local_ip + "_" + deviceIP + '_' + self.username +
                    '_device_lock_calabash', 30 * 1000)

            self.session_id = self.get_redis_session(deviceIP)
            if self.check_session_invalid(self.session_id, deviceIP):
                return self.session_id

            self.authenticate(self.username, self.password)
            #set sessionid to redis
            self.set_session_to_redis(self.session_id, deviceIP)
            lock_mgmt.unlock(device_lock)
        return self.session_id
Exemplo n.º 12
0
class SlaveSpider(Spider):
    name = 'slave_spider'
    default_retry_count = 3
    default_retry_delay = 1
    def __init__(self):
        self.dlm = Redlock([{"host": "42.96.132.158", "port": 6379, "db": 0, 'password': "******"}, ])
        self.list = []
        self.driver = PhantomJS()

    def start_requests(self):
        self.load_payload()
        while True:
            my_lock = self.dlm.lock('URLSLOCK', 1000)
            while isinstance(my_lock, bool):
                pass
            len = self.dlm.servers[0].llen('urls')
            while len > 0:
                url = self.dlm.servers[0].lpop('urls')
                self.dlm.unlock(my_lock)
                urls = self.get_urls(url=url)
                for u in urls:
                    yield Request(url=u, callback=self.parse)
        log.msg('the slave spider is over!', log.WARNING)

    def parse(self, response):
        log.msg(response.url+' is over', log.INFO)

    def get_urls(self, url):
        for l in self.list:
            temp = url + l
            yield temp

    def load_payload(self):
        f = open('Xspider/payload.txt', 'r')
        for line in f.readlines():
            line = line.strip('\n')
            self.list.append(line)
        f.close()
Exemplo n.º 13
0
class TestRedlock(unittest.TestCase):

    def setUp(self):
        try:
            self.redlock = Redlock([{"host": "localhost"}])
            self.dstlock = Redlock([{"host": "localhost", "port": 6379, "socket_timeout": 0.5},
                                    {"host": "localhost", "port": 6380, "socket_timeout": 0.5},
                                    {"host": "localhost", "port": 6381, "socket_timeout": 0.5}])
        except Exception as e:
            pass

    '''
    def test_distribute_locks(self):

        import time
        while True:
            if not self.dstlock.lock("fizz", 1000):
                print ("lock failed")
            else:
                print("lock success")

            time.sleep(1)
    '''

    def test_lock(self):
        lock = self.redlock.lock("pants", 100)
        self.assertEqual(lock.resource, "pants")
        self.redlock.unlock(lock)
        lock = self.redlock.lock("pants", 10)
        self.redlock.unlock(lock)

    def test_blocked(self):
        lock = self.redlock.lock("pants", 1000)
        bad = self.redlock.lock("pants", 10)
        self.assertFalse(bad)
        self.redlock.unlock(lock)

    def test_bad_connection_info(self):
        with self.assertRaises(Warning):
            Redlock([{"cat": "hog"}])

    def test_py3_compatible_encoding(self):
        lock = self.redlock.lock("pants", 1000)
        key = self.redlock.servers[0].get("pants")
        self.assertEqual(lock.key, key)

    def test_ttl_not_int_trigger_exception_value_error(self):
        with self.assertRaises(ValueError):
            self.redlock.lock("pants", 1000.0)

    def test_multiple_redlock_exception(self):
        ex1 = Exception("Redis connection error")
        ex2 = Exception("Redis command timed out")
        exc = MultipleRedlockException([ex1, ex2])
        exc_str = str(exc)
        self.assertIn('connection error', exc_str)
        self.assertIn('command timed out', exc_str)
Exemplo n.º 14
0
class RedisLocker(ILocker):
    def __init__(self, redis_con):
        self.dlm = Redlock([redis_con, ], retry_count=1)
        self._locks = {}

    def _lock(self, key, timeout):
        """ Trying to acquire lock
            If not success then sleep (timeout // 10) milliseconds
            and try again
        """
        lock_key = f'lock/{key}'
        sleep_time = timeout / (10 * 1000)
        while True:
            lock = self.dlm.lock(lock_key, timeout)
            if lock:
                break
            time.sleep(sleep_time)

        self._locks[lock_key] = lock

    def _unlock(self, key):
        lock_key = f'lock/{key}'
        lock = self._locks[lock_key]
        self.dlm.unlock(lock)
Exemplo n.º 15
0
class TestRedlock(unittest.TestCase):
    def setUp(self):
        self.redlock = Redlock([{"host": "localhost"}])

    def test_lock(self):
        lock_name = "test_lock"
        lock = self.redlock.lock(lock_name, 100, TEST_LOCK_TTL_MILLIS)
        self.assertEqual(lock.resource, lock_name)
        self.redlock.unlock(lock)
        lock = self.redlock.lock(lock_name, 10, TEST_LOCK_TTL_MILLIS)
        self.redlock.unlock(lock)

    def test_blocked(self):
        lock_name = "test_blocked"
        lock = self.redlock.lock(lock_name, 1000, TEST_LOCK_TTL_MILLIS)

        with self.assertRaises(MultipleRedlockException):
            self.redlock.lock(lock_name, 10, TEST_LOCK_TTL_MILLIS)

        self.redlock.unlock(lock)

    def test_bad_connection_info(self):
        with self.assertRaises(Warning):
            Redlock([{"cat": "hog"}])

    def test_py3_compatible_encoding(self):
        lock_name = "test_py3_compatible_encoding"
        lock = self.redlock.lock(lock_name, 1000, TEST_LOCK_TTL_MILLIS)
        key = self.redlock.servers[0].get(lock_name)
        self.assertEqual(lock.key, int(key))

    def test_ttl_not_int_trigger_exception_value_error(self):
        lock_name = "ttl_not_int"
        with self.assertRaises(ValueError):
            self.redlock.lock(lock_name, 1000.0, 1000.0)

    def test_multiple_redlock_exception(self):
        ex1 = Exception("Redis connection error")
        ex2 = Exception("Redis command timed out")
        exc = MultipleRedlockException([ex1, ex2])
        exc_str = str(exc)
        self.assertIn('connection error', exc_str)
        self.assertIn('command timed out', exc_str)
Exemplo n.º 16
0
class TestRedlock(unittest.TestCase):

    def setUp(self):
        self.redlock = Redlock([{"host": "localhost"}])

    def test_lock(self):
        lock = self.redlock.lock("pants", 100)
        self.assertEqual(lock.resource, "pants")
        self.redlock.unlock(lock)
        lock = self.redlock.lock("pants", 10)
        self.redlock.unlock(lock)

    def test_blocked(self):
        lock = self.redlock.lock("pants", 1000)
        bad = self.redlock.lock("pants", 10)
        self.assertFalse(bad)
        self.redlock.unlock(lock)

    def test_bad_connection_info(self):
        with self.assertRaises(Warning):
            Redlock([{"cat": "hog"}])

    def test_py3_compatible_encoding(self):
        lock = self.redlock.lock("pants", 1000)
        key = self.redlock.servers[0].get("pants")
        self.assertEquals(lock.key, key)

    def test_ttl_not_int_trigger_exception_value_error(self):
        with self.assertRaises(ValueError):
            self.redlock.lock("pants", 1000.0)

    def test_multiple_redlock_exception(self):
        ex1 = Exception("Redis connection error")
        ex2 = Exception("Redis command timed out")
        exc = MultipleRedlockException([ex1, ex2])
        exc_str = str(exc)
        self.assertIn('connection error', exc_str)
        self.assertIn('command timed out', exc_str)
Exemplo n.º 17
0
traces_path = sys.argv[3]

if not traces_path[-1] == '/':
    traces_path += "/"

# To consume messages
consumer = KafkaConsumer(topic,
                         group_id='traces_cache',
                         bootstrap_servers=[kafka_connection])
consumer.commit()
dlm = Redlock([
    {
        "host": "localhost",
        "port": 6379,
        "db": 0
    },
])
for message in consumer:
    key = message.key.decode('utf-8')
    lock = False
    while not lock:
        lock = dlm.lock(key, 5000)

    with open(traces_path + key, 'ab') as f:
        f.write(message.value)

    dlm.unlock(lock)

    consumer.task_done(message)
    consumer.commit()
Exemplo n.º 18
0
#     line = line.strip('\n')
#     url = url + line
#     list.append(url)
# print list

from redlock import Redlock
from redlock import MultipleRedlockException
import time
dlm = Redlock([
    {
        "host": "42.96.132.158",
        "port": 6379,
        "db": 0,
        'password': "******"
    },
])

try:
    dlm.servers[0].flushall()
    my_lock = dlm.lock('LOCK', 1000)
    while True:
        if isinstance(my_lock, bool):
            print 'wait'
            time.sleep(0.5)
        else:
            print 'dosomething'
            dlm.servers[0].lpush('urls', time.time())
    dlm.unlock(my_lock)
    time.sleep(0.5)
except MultipleRedlockException, e:
    raise e
Exemplo n.º 19
0
class ProbitScheduler(Scheduler):
    def __init__(self, redis_connection=None, locker=None, *args, **kwargs):
        self.__redis_connection = redis_connection
        if self.__redis_connection is None:
            self.__redis_connection = StrictRedis.from_url(
                current_app.conf.CELERY_REDIS_SCHEDULER_URL)

        self._schedule = EntryProxy(self.__redis_connection)
        self._locker = locker
        if self._locker is None:
            self._locker = Redlock(
                [current_app.conf.CELERY_REDIS_SCHEDULER_URL])
        super(ProbitScheduler, self).__init__(*args, **kwargs)

    def setup_schedule(self):
        self.install_default_entries(self._schedule)
        self._merge(self.app.conf.CELERYBEAT_SCHEDULE)

    def get_schedule(self):
        return self._schedule

    schedule = property(
        get_schedule)  # This isn't inherited anymore?  Do we want to do this?

    def sync(self):
        # Reload the schedule from the collection
        self._schedule = EntryProxy(self.__redis_connection)

    # I'm not sure what reserve() is intended to do, but it does not do what we
    # need it to do, so we define a _lock() method as well.
    def maybe_due(self, entry, publisher=None):
        is_due, next_time_to_run = entry.is_due()
        if not is_due:
            return next_time_to_run
        lock = self._lock(entry.name)
        if not lock:
            return next_time_to_run
        try:
            # Now that we have the lock, double-check the timestamps on the
            # entry before executing it.
            entry = self._schedule.sync(entry.name)
            if entry is None:
                return next_time_to_run
            is_due, next_time_to_run = entry.is_due()
            if not is_due:
                return next_time_to_run

            return Scheduler.maybe_due(self, entry, publisher)
        finally:
            self._unlock(lock)

    def _lock(self, name):
        return self._locker.lock(name, 1000)

    def _unlock(self, lock):
        self._locker.unlock(lock)

    def _merge(self, schedule):
        """schedule_keys = self.__redis_connection.hgetall(ENTRY_LIST_KEY).keys()

        if len(schedule_keys) > 0:
            self.__redis_connection.hdel(ENTRY_LIST_KEY, *schedule_keys)"""

        for name, entry_dict in list(schedule.items()):
            entry = ScheduleEntry(name, **entry_dict)
            if name not in self._schedule:
                self._schedule[name] = entry
            else:
                # _lock() the existing entry so that these values aren't changed
                # while we're merging them.
                lock = self._lock(name)
                if lock:
                    try:
                        existing = self._schedule.sync(name)
                        if existing:
                            entry.last_run_at = existing.last_run_at
                            entry.total_run_count = existing.total_run_count
                        self._schedule[name] = entry
                    finally:
                        self._unlock(lock)
Exemplo n.º 20
0
gameplays = db.gameplays.find({'modified': True})
for gameplay in gameplays:
    try:
        gameplay_file = '%s' % gameplay['_id']

        lock = False
        while not lock:
            lock = dlm.lock(gameplay_file, 5000)

        try:
            shutil.move(traces_path + gameplay_file, cache_path + gameplay_file)
        except FileNotFoundError as e:
            print(e)
            continue
        finally:
            dlm.unlock(lock)
            db.gameplays.update_one({'_id': gameplay['_id'], 'lastAccessed': gameplay['lastAccessed']},
                                    {'$set': {'modified': False}})

        with open(cache_path + gameplay_file, 'rb') as src:
            with open(storage_path + gameplay_file, 'ab') as dst:
                dst.write(src.read())
        with open(cache_path + gameplay_file, 'r') as f:
            traces = f.read().split('\n')
            collection_name = 'gameplays_results_' + str(gameplay['gameId'])
            if collection_name not in db.collection_names():
                db.create_collection(collection_name)

            gameplays_results = db[collection_name]

            result = gameplays_results.find_one({'gameplayId': gameplay['_id']})
Exemplo n.º 21
0
class ConsumerQueue(object):
    def __init__(self,
                 redis_host=None,
                 redis_port=None,
                 redis_db=None,
                 producer_queue_id=None,
                 consumer_queue_id=None):
        assert redis_host
        assert redis_port
        assert redis_db
        assert producer_queue_id
        assert consumer_queue_id

        pool = redis.ConnectionPool(host=redis_host,
                                    port=redis_port,
                                    db=redis_db)
        self._broker = redis.Redis(connection_pool=pool)
        self._consumer_queue_id = producer_queue_id
        self._consumer_queue_id = consumer_queue_id
        self._producer_queue_id = producer_queue_id

        self._lock_manager = Redlock([{
            'host': redis_host,
            'port': redis_port, 'db': redis_db},
        ])

    def dequeue_task(self):
        """ Pulls the descriptor for a task that is ready for processing
         returns a descriptor or None if no tasks are present
        """
        # do we still have work items in this worker's queue
        descriptor = self._broker.lindex(self._consumer_queue_id, 0)
        if not descriptor:
            # check the global work queue for work
            descriptor = self._broker.rpoplpush(self._producer_queue_id,
                                                self._consumer_queue_id)

        return descriptor

    def retire_task(self, descriptor=None):
        """ Mark a task as having been successfully dispatched
        """
        assert descriptor

        popped_id = self._broker.lpop(self._consumer_queue_id)
        assert popped_id == descriptor

        return popped_id

    def lock_work_item(self,
                       descriptor=None,
                       lock_timeout=None):
        """ Locks a task to prevent other consumers from double processing
        """
        assert descriptor
        assert lock_timeout

        lock = self._lock_manager.lock(descriptor, lock_timeout)
        return lock

    def unlock_task(self,
                    lock=None):
        assert lock

        self._lock_manager.unlock(lock)
Exemplo n.º 22
0
class CacheManager(object):
    def __init__(self, redis_engine, lock_ttl=1000):
        self.redis = redis_engine
        self.redlock = Redlock([redis_engine])
        self.lock_ttl = lock_ttl

    @staticmethod
    def get_lock_key(key):
        return "%s:lock" % key

    def lock(self, key, nowait=False):
        lock_key = self.get_lock_key(key)
        l = False
        while not l:
            l = self.redlock.lock(lock_key, self.lock_ttl)
            if l or nowait:
                return l
            time.sleep(0.3)

    def unlock(self, lock):
        self.redlock.unlock(lock)

    def get_item(self, key, recover=None, ttl=None, arguments=([], {}), **kw):
        item_key = self.redis.get(key)

        value = self.redis.get(item_key) if item_key else None

        # return None if cannot recover the value
        if value is None and recover is None:
            return None

        # Trying to recover the value
        if value is None:
            return self.set_item(key, lambda: recover(*arguments[0], **arguments[1]), ttl=ttl)
        else:
            return deserialize(value)

    def set_item(self, key, value, ttl=None):
        old_item_key = self.redis.get(key)

        if old_item_key is None:
            # this is a new item
            # locking it to prevent concurrency violation
            lock = self.lock(key, nowait=True)
            if not lock:
                # it seems this item is loading in another thread
                # so, waiting for that:
                # wait & make sure the object is reloaded, the release the lock
                self.unlock(self.lock(key))
                return self.get_item(key)

            # check if item loaded
            v = self.get_item(key, None)
            if v:
                self.unlock(lock)
                return v

        else:
            lock = None

        try:
            guid = uuid.uuid1()
            item_key = "%s:%s" % (key, guid)

            value = value() if callable(value) else value

            self.redis.set(item_key, serialize(value), ex=ttl)

            self.redis.set(key, item_key, ex=ttl)
            if old_item_key:
                self.redis.delete(old_item_key)
            return value
        finally:
            if lock:
                self.unlock(lock)

    def hget_item(self, key, dict_key, recover=None, ttl=None, arguments=([], {}), **kw):
        item_key = self.redis.get(key)

        value = self.redis.hget(item_key, dict_key) if item_key else None

        # return None if cannot recover the value
        if value is None and recover is None:
            return None

        # Trying to recover the value
        if value is None:
            return self.hset_item(key, dict_key, lambda: recover(*arguments[0], **arguments[1]), ttl=ttl)
        else:
            return deserialize(value)

    def hset_item(self, key, dict_key, value, ttl=None):
        old_item_key = self.redis.get(key)

        if old_item_key is None:
            # this is a new item
            # locking it to prevent concurrency violation
            lock = self.lock(key, nowait=True)
            if not lock:
                # it seems this item is loading in another thread
                # so, waiting for that:
                # wait & make sure the object is reloaded, the release the lock
                self.unlock(self.lock(key))
                return self.hget_item(key, dict_key)

            # check if item loaded
            v = self.hget_item(key, dict_key, None)
            if v:
                self.unlock(lock)
                return v
        else:
            lock = None

        try:
            guid = uuid.uuid1()
            item_key = "%s:%s" % (key, guid)

            value = value() if callable(value) else value

            self.redis.hset(item_key, dict_key, serialize(value))
            self.redis.expire(item_key, time=ttl)
            self.redis.set(key, item_key, ex=ttl)

            if old_item_key:
                self.redis.delete(old_item_key)

            return value
        finally:
            if lock:
                self.unlock(lock)

    def del_item(self, *keys):
        item_keys_to_remove = []
        for key in keys:
            item_key = self.redis.get(key)
            if item_key is not None:
                item_keys_to_remove.append(item_key)

        self.redis.delete(*item_keys_to_remove)
        self.redis.delete(*keys)

    def get_list(self, key, recover=None, ttl=None, arguments=([], {}), key_extractor=None):
        # First, get the keys list
        value = self.redis.get(key)
        if value is None and recover is None:
            return None, None

        # Trying to recover the value
        if value is None:
            return self.set_list(
                key, lambda: recover(*arguments[0], **arguments[1]), ttl=ttl, key_extractor=key_extractor
            )
        else:
            metadata, item_keys = deserialize(value)
            return metadata, [self.get_item(item_key) for item_key in item_keys]

    def set_list(self, key, value, ttl=None, key_extractor=None):
        # locking it to prevent concurrency violation
        lock = self.lock(key)

        # check if item loaded
        v = self.get_list(key)
        if v[1]:
            self.unlock(lock)
            return v  # packed: (value_metadata, value)

        if key_extractor is None:

            def key_extractor(x):
                return x["id"]

        try:
            value_metadata, value = value() if callable(value) else value
            item_keys = []
            assert isinstance(value, collections.Iterable), "Value must be iterable"

            for item_value in value:
                item_key = create_cache_key(key.split(":")[0], key_extractor(item_value))
                item_keys.append(item_key)
                self.set_item(item_key, item_value, ttl=ttl)

            self.redis.set(key, serialize((value_metadata, item_keys)), ex=ttl)

            return value_metadata, value
        finally:
            if lock:
                self.unlock(lock)

    def invalidate_item(self, key):
        item_key = self.redis.get(key)
        self.redis.delete(key)
        if item_key:
            self.redis.delete(item_key)

    def invalidate_list(self, key):
        self.redis.delete(key)

    def decorate(self, namespace, list_=False, ttl=None, key_extractor=None):
        def decorator(func):
            def wrapper(*args, **kwargs):
                cache_key = create_cache_key(namespace, kwargs)
                cache_params = dict(ttl=ttl, recover=func, arguments=(args, kwargs))
                if list_:
                    cache_method = self.get_list
                    cache_params["key_extractor"] = key_extractor
                else:
                    cache_method = self.get_item
                return cache_method(cache_key, **cache_params)

            return wrapper

        return decorator
Exemplo n.º 23
0
# host = '42.96.132.158'
# psd = 'wjdh84928399'
# r = redis.Redis(host=host, password=psd, db=0, port=6379, socket_timeout=10)
# url = r.lpop('urls')
# f = open('Xspider/payload.txt', 'r')
# list = []
# for line in f.readlines():
#     line = line.strip('\n')
#     url = url + line
#     list.append(url)
# print list

from redlock import Redlock
from redlock import MultipleRedlockException
import time
dlm = Redlock([{"host": "42.96.132.158", "port": 6379, "db": 0, 'password': "******"}, ])

try:
    dlm.servers[0].flushall()
    my_lock = dlm.lock('LOCK', 1000)
    while True:
        if isinstance(my_lock, bool):
            print 'wait'
            time.sleep(0.5)
        else:
            print 'dosomething'
            dlm.servers[0].lpush('urls', time.time())
    dlm.unlock(my_lock)
    time.sleep(0.5)
except MultipleRedlockException, e:
    raise e
Exemplo n.º 24
0
class RecordLockManager(object):
    def __init__(self, conf, prefix=None, ttl_sec=60, backlog_path=None):
        self.backlog_lock = Lock()
        self.conf = conf
        self.redlock = Redlock([{"host": conf['backend']['host'], "port": int(conf['backend']['port']), "db": 13, "password": conf['backend']['password']}, ],
                               retry_count=100000, retry_delay=0.01)
        if prefix is not None:
            self.prefix = prefix + "-"
        self.ttl = ttl_sec * 1000
        self.backlog_filename_pat = re.compile(self.prefix + '([\w\d\.\-\\/]+?)\-backlog\.([\w\d\-]+)')
        self.backlog = {}
        self.backlog_path = backlog_path # Should be abs path to the dir containing backlog pickles
        self.is_dumping = False
        if not os.path.exists(self.backlog_path):
            os.makedirs(self.backlog_path)
        self.reload()


    def parse_backlog_filename(self, name):
        match = self.backlog_filename_pat.search(name)
        if match is not None:
            return match.group(1), match.group(2)
        else:
            return None, None

    def reload(self):
        for i in os.listdir(self.backlog_path):
            if os.path.isfile(os.path.join(self.backlog_path, i)):
                name, postfix = self.parse_backlog_filename(i)
                if name is not None:
                    with open(self.make_real_path(i), 'rb') as ifile:
                        a = pickle.load(ifile)
                        (data, saved_context) = a
                        task = self.make_task(name, data, postfix=postfix)
                        task.saved_context = saved_context
                        task.is_backlog = True
                        self.backlog[task.pickle_name] = task

    def make_real_path(self, pickle_name):
        if self.backlog_path is not None:
            return os.path.join(self.backlog_path, pickle_name)
        else:
            return pickle_name

    # TODO: implement exponential back off
    def choose_backlog_and_lock_it(self, ttl_sec=None, retry=5):
        with self.backlog_lock:
            count = 0
            while count < retry:
                if len(self.backlog) < 1:
                    return None
                backlog_task = self.backlog[random.choice(self.backlog.keys())]
                if backlog_task.lock is not None:
                    # this task is already locked, maybe by other thread
                    continue
                count+=1
                self.try_lock(backlog_task, ttl_sec)
                if not backlog_task.lock:
                    return None
                else:
                    return backlog_task
            return None


    def try_lock(self, task, ttl_sec=None, block=False):
        if ttl_sec is not None:
            ttl = ttl_sec * 1000
        else:
            ttl = self.ttl
        lock = self.redlock.lock(task.lock_name, ttl)
        while block and not lock:
            lock = self.redlock.lock(task.lock_name, ttl)
        if not lock:
            task.lock = None
        else:
            task.lock = lock

    def store_backlog(self, task, saved_context):
        print "Store backlog " + task.lock_name
        self.is_dumping = True
        task.saved_context = saved_context
        with open(self.make_real_path(task.pickle_name), "wb") as ofile:
            pickle.dump((task.data, saved_context), ofile)
        self.is_dumping = False
        with self.backlog_lock:
            # Must ensure task holds no lock before store it as backlog
            # otherwise, it may never scheduled again
            if task.lock is not None:
                self.unlock(task.lock)
                task.lock = None
            task.is_backlog = True
            self.backlog[task.pickle_name] = task

    def make_task(self, name, data, postfix=None):
        return Task(name, self.prefix, data, is_backlog=False, postfix=postfix)

    def unlock(self, lock):
        self.redlock.unlock(lock)

    def commit_task(self, task):
        if task.is_backlog:
            with self.backlog_lock:
                if task.pickle_name in self.backlog:
                    del self.backlog[task.pickle_name]
                real_path = self.make_real_path(task.pickle_name)
                if os.path.exists(real_path):
                    os.remove(real_path)
        if task.lock is not None:
            self.unlock(task.lock)
            task.lock = None
class ProbitScheduler(Scheduler):
    def __init__(self, redis_connection=None, locker=None, *args, **kwargs):
        self.__redis_connection = redis_connection
        if self.__redis_connection is None:
            self.__redis_connection = StrictRedis.from_url(current_app.conf.CELERY_REDIS_SCHEDULER_URL)

        self._schedule = EntryProxy(self.__redis_connection)
        self._locker = locker
        if self._locker is None:
            self._locker = Redlock([current_app.conf.CELERY_REDIS_SCHEDULER_URL])
        super(ProbitScheduler, self).__init__(*args, **kwargs)

    def setup_schedule(self):
        self.install_default_entries(self._schedule)
        self._merge(self.app.conf.CELERYBEAT_SCHEDULE)

    def get_schedule(self):
        return self._schedule

    schedule = property(get_schedule)  # This isn't inherited anymore?  Do we want to do this?

    def sync(self):
        # Reload the schedule from the collection
        self._schedule = EntryProxy(self.__redis_connection)

    # I'm not sure what reserve() is intended to do, but it does not do what we
    # need it to do, so we define a _lock() method as well.
    def maybe_due(self, entry, publisher=None):
        is_due, next_time_to_run = entry.is_due()
        if not is_due:
            return next_time_to_run
        lock = self._lock(entry.name)
        if not lock:
            return next_time_to_run
        try:
            # Now that we have the lock, double-check the timestamps on the
            # entry before executing it.
            entry = self._schedule.sync(entry.name)
            if entry is None:
                return next_time_to_run
            is_due, next_time_to_run = entry.is_due()
            if not is_due:
                return next_time_to_run

            return Scheduler.maybe_due(self, entry, publisher)
        finally:
            self._unlock(lock)

    def _lock(self, name):
        return self._locker.lock(name, 1000)

    def _unlock(self, lock):
        self._locker.unlock(lock)

    def _merge(self, schedule):
        """schedule_keys = self.__redis_connection.hgetall(ENTRY_LIST_KEY).keys()

        if len(schedule_keys) > 0:
            self.__redis_connection.hdel(ENTRY_LIST_KEY, *schedule_keys)"""

        for name, entry_dict in schedule.items():
            entry = ScheduleEntry(name, **entry_dict)
            if name not in self._schedule:
                self._schedule[name] = entry
            else:
                # _lock() the existing entry so that these values aren't changed
                # while we're merging them.
                lock = self._lock(name)
                if lock:
                    try:
                        existing = self._schedule.sync(name)
                        if existing:
                            entry.last_run_at = existing.last_run_at
                            entry.total_run_count = existing.total_run_count
                        self._schedule[name] = entry
                    finally:
                        self._unlock(lock)