예제 #1
0
def test_context_manager():
    """
    Test a RedLock can be released by the context manager automically.

    """
    with RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000):
        lock = RedLock("test_context_manager", [{
            "host": "localhost"
        }],
                       ttl=1000)
        locked = lock.acquire()
        assert locked == False

    lock = RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000)
    locked = lock.acquire()
    assert locked == True

    # try to lock again within a with block
    try:
        with RedLock("test_context_manager", [{"host": "localhost"}]):
            # shouldn't be allowed since someone has the lock already
            assert False
    except RedLockError:
        # we expect this call to error out
        pass

    lock.release()
예제 #2
0
파일: lock2.py 프로젝트: tomsovic/Learn
def worker_lock_manager(key, ttl, **kwargs):
    """
    分布式锁
    :param key: 分布式锁ID
    :param ttl: 分布式锁生存时间
    :param kwargs: 可选参数字典
    :return: None
    """
    redis_servers = [{
        # 'host': '127.0.0.1',
        'host': '192.168.1.24',
        'port': 6379,
        'db': 0,
        'password': ''
    }]

    rlk = RedLock(redis_servers)

    # 获取锁
    lock = rlk.lock(key, ttl)

    yield lock

    # 释放锁
    rlk.unlock(lock)
예제 #3
0
def run_test(fixture_id, exc_lock, uut, test, progress, unique_res=None, wait_res=None):
    """
    Temporary test runner
    :param uut:
    :param test:
    :return:
    """
    print (test)
    report_sequence_status(fixture_id, uut, test, progress)
    if wait_res:
        exc_lock.wait_for_all(wait_res)

    lock = None
    if unique_res:
        res_lock = RedLock(unique_res)
        report_lock_status(fixture_id, unique_res, True)
        lock = res_lock.acquire()
        while not lock:
            time.sleep(0.02)
            lock = res_lock.acquire()
        print ('locking {}'.format(unique_res))
    time.sleep(randrange(10, 3500) / 1000.0)
    # todo use REST api to set function status
    verdict = rand_verdict()
    payload = {'verdict': verdict, 'test': test, 'uut': uut}  # ,'execution_id':execution_id, 'process_id':process_id}
    print (payload)
    if lock:
        print ('releasing {}'.format(unique_res))
        res_lock.release()
        report_lock_status(fixture_id, unique_res, False)
    return verdict
예제 #4
0
def test_context_manager():
    """
    Test a RedLock can be released by the context manager automically.
    """
    ttl = 1000
    with RedLock("test_context_manager", [{
            "host": "localhost"
    }], ttl=ttl) as validity:
        assert 0 < validity < ttl - ttl * CLOCK_DRIFT_FACTOR - 2
        lock = RedLock("test_context_manager", [{
            "host": "localhost"
        }],
                       ttl=ttl)
        locked = lock.acquire()
        assert locked is False

    lock = RedLock("test_context_manager", [{"host": "localhost"}], ttl=ttl)
    locked = lock.acquire()
    assert locked is True

    # try to lock again within a with block
    try:
        with RedLock("test_context_manager", [{"host": "localhost"}]):
            # shouldn't be allowed since someone has the lock already
            assert False
    except RedLockError:
        # we expect this call to error out
        pass

    lock.release()
예제 #5
0
def test_lock_with_multi_backend():
    """
    Test a RedLock can be acquired when at least N/2+1 redis instances are alive.
    Set redis instance with port 6380 down or debug sleep during test.
    """
    lock = RedLock("test_simple_lock",
                   connection_details=[{
                       "host": "localhost",
                       "port": 6379,
                       "db": 0,
                       "socket_timeout": 0.2
                   }, {
                       "host": "localhost",
                       "port": 6379,
                       "db": 1,
                       "socket_timeout": 0.2
                   }, {
                       "host": "localhost",
                       "port": 6380,
                       "db": 0,
                       "socket_timeout": 0.2
                   }],
                   ttl=1000)
    locked = lock.acquire()
    lock.release()
    assert locked == True
예제 #6
0
class GlobalLock(object):
    """ A lock object that blocks globally via Redis. Note that Redis is not considered a tier-1
      service, so this lock should not be used for any critical code paths.
  """
    def __init__(self, name, lock_ttl=600):
        self._lock_name = name
        self._redis_info = dict(app.config['USER_EVENTS_REDIS'])
        self._redis_info.update({
            'socket_connect_timeout': 5,
            'socket_timeout': 5,
            'single_connection_client': True
        })

        self._lock_ttl = lock_ttl
        self._redlock = None

    def __enter__(self):
        if not self.acquire():
            raise LockNotAcquiredException()

    def __exit__(self, type, value, traceback):
        self.release()

    def acquire(self):
        logger.debug('Acquiring global lock %s', self._lock_name)
        try:
            self._redlock = RedLock(self._lock_name,
                                    connection_details=[self._redis_info],
                                    ttl=self._lock_ttl)
            acquired = self._redlock.acquire()
            if not acquired:
                logger.debug('Was unable to not acquire lock %s',
                             self._lock_name)
                return False

            logger.debug('Acquired lock %s', self._lock_name)
            return True
        except RedLockError:
            logger.debug('Could not acquire lock %s', self._lock_name)
            return False
        except RedisError as re:
            logger.debug('Could not connect to Redis for lock %s: %s',
                         self._lock_name, re)
            return False

    def release(self):
        if self._redlock is not None:
            logger.debug('Releasing lock %s', self._lock_name)
            try:
                self._redlock.release()
            except RedLockError:
                logger.debug('Could not release lock %s', self._lock_name)
            except RedisError as re:
                logger.debug(
                    'Could not connect to Redis for releasing lock %s: %s',
                    self._lock_name, re)

            logger.debug('Released lock %s', self._lock_name)
            self._redlock = None
예제 #7
0
def test_from_url():
    """
    Test a RedLock can be acquired via from_url.
    """
    lock = RedLock("test_from_url", [{"url": "redis://localhost/0"}], ttl=1000)
    locked = lock.acquire()
    lock.release()
    assert locked == True
예제 #8
0
def test_from_url():
    """
    Test a RedLock can be acquired via from_url.
    """
    lock = RedLock("test_from_url", [{"url": "redis://localhost/0"}], ttl=1000)
    locked = lock.acquire()
    lock.release()
    assert locked == True
예제 #9
0
def test_simple_lock():
    """
    Test a RedLock can be acquired.
    """
    lock = RedLock("test_simple_lock", [{"host": "localhost"}], ttl=1000)
    locked = lock.acquire()
    lock.release()
    assert locked == True
예제 #10
0
def test_simple_lock():
    """
    Test a RedLock can be acquired.
    """
    lock = RedLock("test_simple_lock", [{"host": "localhost"}], ttl=1000)
    locked = lock.acquire()
    lock.release()
    assert locked == True
예제 #11
0
def test_lock_with_validity():
    """
    Test a RedLock can be acquired and the lock validity is also retruned.
    """
    ttl = 1000
    lock = RedLock("test_simple_lock", [{"host": "localhost"}], ttl=ttl)
    locked, validity = lock.acquire_with_validity()
    lock.release()
    assert locked is True
    assert 0 < validity < ttl - ttl * CLOCK_DRIFT_FACTOR - 2
예제 #12
0
def test_lock_with_validity():
    """
    Test a RedLock can be acquired and the lock validity is also retruned.
    """
    ttl = 1000
    lock = RedLock("test_simple_lock", [{"host": "localhost"}], ttl=ttl)
    locked, validity = lock.acquire_with_validity()
    lock.release()
    assert locked is True
    assert 0 < validity < ttl - ttl * CLOCK_DRIFT_FACTOR - 2
예제 #13
0
def test_lock_check():
    test_lock = RedLock("test_lock_check")
    assert test_lock.check() == False
    test_lock.acquire()
    assert test_lock.check() == True
    test_lock.release()
    assert test_lock.check() == False
예제 #14
0
def test_lock_with_multi_backend():
    """
    Test a RedLock can be acquired when at least N/2+1 redis instances are alive.
    Set redis instance with port 6380 down or debug sleep during test.
    """
    lock = RedLock("test_simple_lock", connection_details=[
        {"host": "localhost", "port": 6379, "db": 0, "socket_timeout": 0.2},
        {"host": "localhost", "port": 6379, "db": 1, "socket_timeout": 0.2},
        {"host": "localhost", "port": 6380, "db": 0, "socket_timeout": 0.2}], ttl=1000)
    locked = lock.acquire()
    lock.release()
    assert locked == True
예제 #15
0
class Lock():
    def __init__(self, key, ttl=1000, retry=None, delay=None):
        self.key = key
        self.ttl = ttl
        retry = retry if retry else settings.REDIS_BLOCKER_RETRY
        delay = delay if delay else settings.REDIS_BLOCKER_DELAY
        self.redis = RedLock(key, settings.REDIS_BLOCKER, retry, delay, ttl)

    def __enter__(self):
        if not self.redis.acquire():
            raise RedisLockFail("Can't get block")

    def __exit__(self, type, value, traceback):
        self.redis.release()
예제 #16
0
def test_lock_is_locked():
    lock = RedLock("test_lock_is_locked")
    # Clear possible initial states
    [node.delete(lock.resource) for node in lock.redis_nodes]

    assert lock.locked() is False

    lock.acquire()
    assert lock.locked() is True

    lock.release()
    assert lock.locked() is False
예제 #17
0
def execute_task_with_lock(proc, task_name, lock_key, x, y):
    """Executing tasks with lock
    
    Executing the task, but only one at a time, meaning
    if there are the same tasks being processed, the incoming
    one will be stalled
    
    Arguments:
        proc {Function} -- Actual function performing the task
        task_name {string} -- Name of the task
        lock_key {string} -- Lock key for the task
        x {int} -- First argument of the task
        y {int} -- Second argument of the task
    
    Returns:
        int -- result of the task
    """

    result = -1

    has_no_lock = True

    while has_no_lock:
        try:
            with RedLock(lock_key):
                result = proc(x, y)
                has_no_lock = False
        except:
            print("waiting for the previous %s to complete, wait for 1 sec" %
                  task_name)

            time.sleep(1)
            has_no_lock = True

    return result
예제 #18
0
    def on_execute(self, data):
        karel_model = KarelModel(current_app.logger)
        map = ImpactMap()
        map.load(self.redis.get(data["game_id"]))
        karel_model.load_world(map.to_compiler())
        karel = Karel(karel_model, data["game_id"], data["nickname"], data["handle"])
        compiler = KarelCompiler(karel)

        try:
            compiler.compile(str(data["code"]))
        except Exception as e:
            emit("error", str(e), room=data["game_id"])
        else:
            try:
                while not compiler.execute_step():
                    pass
            except DyingException:
                pass

            if True:  # TODO: This code should be executed when the player didn't wins
                for beeper in iter(partial(karel_model.return_beeper, data["handle"]), None):
                    msg = {"handle": data["handle"], "command": "spawnBeeper",
                           "params": {"x": beeper[1] * 24, "y": beeper[0] * 24}}
                    emit("command", json.dumps(msg), room=data["game_id"])

                karel_model.respawn(data["handle"])
                msg = '{"handle": "%s", "command": "die"}' % (data["handle"])
                emit('command', msg, room=data["game_id"])
                with RedLock("redlock:{}".format(data["game_id"])):
                    map = ImpactMap()
                    map.load(self.redis.get(data["game_id"]))
                    map.from_compiler(karel_model.dump_world())
                    self.redis.set(data["game_id"], json.dumps(map.impact_map))
예제 #19
0
def wait_job():
    from core.continuous_queries.continuous_queries import get_continuous_query_by_name, cq_generate_update_query
    print("Checking cq_jobs in 'running' state")

    with RedLock("lock/recomputation_jobs/wait_job"):
        jobs = ContinuousQueryRecomputeJob.query.filter(
            or_(ContinuousQueryRecomputeJob.state == "running")).all()
        jobs = sorted(jobs, key=lambda x: 1 if x.priority == "high" else 2)
        for recomputation_job in jobs:
            # I will execute re-execute the continuous query
            cq = get_continuous_query_by_name(recomputation_job.cq_name)

            last_run_start_ts = int(
                time.mktime(recomputation_job.last_run_start.timetuple()))
            last_run_end_ts = int(
                time.mktime(recomputation_job.last_run_end.timetuple()))

            last_run_start_ts = max(last_run_start_ts, 0)
            last_run_end_ts = max(last_run_end_ts, 0)

            cq_update_query = cq_generate_update_query(
                recomputation_job.cq_name, last_run_start_ts, last_run_end_ts)

            execution_start = time.time()
            result = influx_run_query(cq_update_query)
            execution_end = time.time()
            recomputation_job.last_execution_time = execution_end - execution_start

            recomputation_job.wait()
            db.session.add(recomputation_job)
            db.session.commit()
            pass
        db.session.remove()
    pass
예제 #20
0
 def _release(self, acquire_num):
     with RedLock(self._redlock_name,
                  connection_details=[self.conn],
                  retry_times=30) as _lock:
         res = self.conn.decr(self.key, acquire_num)
         if res <= 0:
             self._reset_lock()
     return True
예제 #21
0
def test_fail_to_lock_acquired():
    lock1 = RedLock("test_fail_to_lock_acquired", [{"host": "localhost"}], ttl=1000)
    lock2 = RedLock("test_fail_to_lock_acquired", [{"host": "localhost"}], ttl=1000)

    lock1_locked = lock1.acquire()
    lock2_locked = lock2.acquire()
    lock1.release()

    assert lock1_locked == True
    assert lock2_locked == False
예제 #22
0
def test_lock_extend():
    test_lock = RedLock("test_lock_extend", ttl=2000)
    test_lock.acquire()
    time.sleep(1)
    test_lock.extend()
    time.sleep(1.5)
    assert test_lock.check() == True
예제 #23
0
def test_lock_expire():
    lock1 = RedLock("test_lock_expire", [{"host": "localhost"}], ttl=500)
    lock1.acquire()
    time.sleep(1)

    # Now lock1 has expired, we can accquire a lock
    lock2 = RedLock("test_lock_expire", [{"host": "localhost"}], ttl=1000)
    locked = lock2.acquire()
    assert locked == True

    lock1.release()
    lock3 = RedLock("test_lock_expire", [{"host": "localhost"}], ttl=1000)
    locked = lock3.acquire()
    assert locked == False
예제 #24
0
def get_lock(lock):
    """
    Get a distributed lock based on redis using RedLock algorithm
    """
    return RedLock(lock, connection_details=[
        {
            'connection_pool': pool
        },
    ])
예제 #25
0
def test_lock_is_locked():
    lock = RedLock("test_lock_is_locked")
    # Clear possible initial states
    [node.delete(lock.resource) for node in lock.redis_nodes]

    assert lock.locked() is False

    lock.acquire()
    assert lock.locked() is True

    lock.release()
    assert lock.locked() is False
예제 #26
0
 def on_spawn_beeper(self, data):
     with RedLock("redlock:{}".format(data["game_id"])):
         map = ImpactMap()
         map_data = self.redis.get(data["game_id"])
         map.load(map_data)
         beeper = map.spawn_beeper()
         self.redis.set(data["game_id"], json.dumps(map.impact_map))
     msg = {"handle": "common", "command": "spawnBeeper",
            "params": {"x": beeper["x"], "y": beeper["y"]}}
     emit("command", json.dumps(msg), room=data["game_id"])
예제 #27
0
def get_lock(lockname, timeout=DEFAULT_TIMEOUT, retry_times=1, retry_delay=200):
    try:
        from django_redis import get_redis_connection
    except ImportError:
        raise Exception("Can't get default Redis connection")
    else:
        redis_client = get_redis_connection()

    if timeout is None:
        # redlock doesn't support infinite timeout
        # anyway it's bad idea to have infinite lock
        timeout = DEFAULT_TIMEOUT

    lock = RedLock(lockname, [redis_client], retry_times=retry_times, retry_delay=retry_delay, ttl=int(timeout * 1000))
    got_lock = lock.acquire()
    try:
        yield got_lock
    finally:
        if got_lock:
            lock.release()
예제 #28
0
 def _acquire_lock(self):
     with RedLock(self._redlock_name,
                  connection_details=[self.conn],
                  retry_times=30) as _lock:
         res = self.conn.get(self.key)
         if res:
             res = int(res)
             if res + self.acquire_num > self.concurrent_num:  # locked
                 return False
         self.conn.incr(self.key, self.acquire_num)
         self.conn.expire(self.key, self.expire)  # 防止死锁
     return True
예제 #29
0
    def acquire(self):
        logger.debug("Acquiring global lock %s", self._lock_name)
        try:
            self._redlock = RedLock(self._lock_name,
                                    connection_details=[self._redis_info],
                                    ttl=self._lock_ttl)
            acquired = self._redlock.acquire()
            if not acquired:
                logger.debug("Was unable to not acquire lock %s",
                             self._lock_name)
                return False

            logger.debug("Acquired lock %s", self._lock_name)
            return True
        except RedLockError:
            logger.debug("Could not acquire lock %s", self._lock_name)
            return False
        except RedisError as re:
            logger.debug("Could not connect to Redis for lock %s: %s",
                         self._lock_name, re)
            return False
예제 #30
0
def test_context_manager():
    """
    Test a RedLock can be released by the context manager automically.

    """
    ttl = 1000
    with RedLock("test_context_manager", [{"host": "localhost"}], ttl=ttl) as validity:
        assert 0 < validity < ttl - ttl * CLOCK_DRIFT_FACTOR - 2
        lock = RedLock("test_context_manager", [{"host": "localhost"}], ttl=ttl)
        locked = lock.acquire()
        assert locked is False

    lock = RedLock("test_context_manager", [{"host": "localhost"}], ttl=ttl)
    locked = lock.acquire()
    assert locked is True

    # try to lock again within a with block
    try:
        with RedLock("test_context_manager", [{"host": "localhost"}]):
            # shouldn't be allowed since someone has the lock already
            assert False
    except RedLockError:
        # we expect this call to error out
        pass

    lock.release()
예제 #31
0
def test_context_manager():
    """
    Test a RedLock can be released by the context manager automically.

    """
    with RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000):
        lock = RedLock("test_context_manager", [{
            "host": "localhost"
        }],
                       ttl=1000)
        locked = lock.acquire()
        assert locked == False

    lock = RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000)
    locked = lock.acquire()
    assert locked == True

    lock.release()
예제 #32
0
def get_long_lock(lockname, retry_times=1, retry_delay=200):
    try:
        from django_redis import get_redis_connection
    except ImportError:
        raise Exception("Can't get default Redis connection")
    else:
        redis_client = get_redis_connection()

    ttl = int(long_lock_ttl * 1000)
    lock = RedLock(lockname, [redis_client], retry_times=retry_times, retry_delay=retry_delay, ttl=ttl)
    got_lock = lock.acquire()

    if got_lock:
        thread = LongLockUpdateThread(lock)
        thread.start()

    try:
        yield got_lock
    finally:
        if got_lock:
            lock.release()
            thread.stop()
예제 #33
0
 def wrapper(*args):
     conn = [{
         'host': Config.MASTER_CACHE_HOST,
         'port': Config.MASTER_CACHE_PORT,
         'password': Config.MASTER_CACHE_PASSWORD,
         'db': 0
     }]
     k = str(args[0].__class__) + func.__name__
     v = cachesrv.get(k)
     if not v:
         with RedLock(k + '.lock', connection_details=conn):
             v = json.dumps(func(*args))
             cachesrv.set(k, v, Config.MASTER_CACHE_TTL)
     return v
예제 #34
0
def test_fail_to_lock_acquired():
    lock1 = RedLock("test_fail_to_lock_acquired", [{"host": "localhost"}], ttl=1000)
    lock2 = RedLock("test_fail_to_lock_acquired", [{"host": "localhost"}], ttl=1000)

    lock1_locked = lock1.acquire()
    lock2_locked = lock2.acquire()
    lock1.release()

    assert lock1_locked == True
    assert lock2_locked == False
예제 #35
0
 async def _group_add_result(self, graph_uuid, group_uuid, task_uuid,
                             result):
     with RedLock('%s_%s' % (graph_uuid, group_uuid),
                  connection_details=[{
                      'url': self._url
                  }]):
         uuids_key = GRAPH_TASK_GROUP_UUIDS_PATTERN.format(
             graph_uuid=graph_uuid, group_uuid=group_uuid)
         results_key = GRAPH_TASK_GROUP_RESULT_PATTERN.format(
             graph_uuid=graph_uuid, group_uuid=group_uuid)
         pipe = self._connection.pipeline()
         pipe.rpush(uuids_key, task_uuid)
         pipe.rpush(results_key, json.dumps(result))
         pipe.expire(uuids_key, self.app.config.backend_results_timelife)
         pipe.expire(results_key, self.app.config.backend_results_timelife)
         group_len = pipe.llen(uuids_key)
         await pipe.execute()
         return await group_len
예제 #36
0
def finish_job():
    print("Checking cq_jobs in 'waiting' state")

    with RedLock("lock/recomputation_jobs/run_job"):
        jobs = ContinuousQueryRecomputeJob.query.filter(
            ContinuousQueryRecomputeJob.state == "waiting").all()
        for recomputation_job in jobs:
            time_interval_start_ts = float(
                time.mktime(recomputation_job.time_interval_start.timetuple()))
            last_run_start_ts = float(
                time.mktime(recomputation_job.last_run_start.timetuple()))

            if last_run_start_ts <= time_interval_start_ts:
                recomputation_job.finish()
                db.session.add(recomputation_job)
                db.session.commit()
        db.session.remove()
    pass
예제 #37
0
def run_job():
    print("Checking cq_jobs in 'created' or 'waiting' state")

    with RedLock("lock/recomputation_jobs/run_job"):
        jobs = ContinuousQueryRecomputeJob.query.filter(
            or_(ContinuousQueryRecomputeJob.state == "created",
                ContinuousQueryRecomputeJob.state == "waiting")).all()
        for recomputation_job in jobs:
            if recomputation_job.state == "created":
                recomputation_job.last_run_start = recomputation_job.time_interval_end
                recomputation_job.last_run_end = recomputation_job.time_interval_end

            time_interval_start_ts = float(
                time.mktime(recomputation_job.time_interval_start.timetuple()))
            last_run_start_ts = float(
                time.mktime(recomputation_job.last_run_start.timetuple()))

            if last_run_start_ts > time_interval_start_ts:

                if recomputation_job.last_execution_time:
                    interval_processed = recomputation_job.last_run_end.timestamp(
                    ) - recomputation_job.last_run_start.timestamp()
                    time_delta = interval_processed * (
                        MAX_EXECUTION_TIME_PER_RUN /
                        recomputation_job.last_execution_time)
                else:
                    time_delta = 12 * 3600

                time_delta = min(time_delta, MAX_TIME_DELTA)

                recomputation_job.last_run_end = recomputation_job.last_run_start
                recomputation_job.last_run_start = recomputation_job.last_run_end - datetime.timedelta(
                    seconds=time_delta)

                if time_delta == 0.0:
                    recomputation_job.finish()
                else:
                    recomputation_job.run()

                db.session.add(recomputation_job)
                db.session.commit()
        db.session.remove()
예제 #38
0
def test_context_manager():
    """
    Test a RedLock can be released by the context manager automically.

    """
    with RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000):
        lock = RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000)
        locked = lock.acquire()
        assert locked == False

    lock = RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000)
    locked = lock.acquire()
    assert locked == True

    # try to lock again within a with block
    try:
        with RedLock("test_context_manager", [{"host": "localhost"}]):
            # shouldn't be allowed since someone has the lock already
            assert False
    except RedLockError:
        # we expect this call to error out
        pass

    lock.release()
예제 #39
0
def test_locked_span_lock_instances():
    lock1 = RedLock("test_locked_span_lock_instances")
    lock2 = RedLock("test_locked_span_lock_instances")
    # Clear possible initial states
    [node.delete(lock1.resource) for node in lock1.redis_nodes]

    assert lock1.locked() == lock2.locked() is False
    lock1.acquire()

    assert lock1.locked() == lock2.locked() is True

    lock1.release()
    assert lock1.locked() == lock2.locked() is False
예제 #40
0
 def __init__(self, key, ttl=1000, retry=None, delay=None):
     self.key = key
     self.ttl = ttl
     retry = retry if retry else settings.REDIS_BLOCKER_RETRY
     delay = delay if delay else settings.REDIS_BLOCKER_DELAY
     self.redis = RedLock(key, settings.REDIS_BLOCKER, retry, delay, ttl)
예제 #41
0
    ap.add_argument("--update-interval", type=int, default=60)
    ap.add_argument("-v", "--verbose", action="store_true")
    ap.add_argument("--debug", action="store_true")
    ap.add_argument("--dry-run", action="store_true")
    args = ap.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s")

    if args.debug:
        logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)-8s %(message)s")

    mc = pm.MongoClient(host=args.mongodb_uri, connect=False)
    rc = redis.Redis(host=args.redis_host, port=args.redis_port, password=args.redis_pwd, db=0)
    df = redis.Redis(host=args.redis_host, port=args.redis_port, password=args.redis_pwd, db=1)
    lock = RedLock([{"host": args.redis_host, "port": args.redis_port, "db": 3}])

    logging.info("building filter ...")
    with mc.rssnews.news.find({}, {"uuid": True}) as cursor:
        for news_item in cursor:
            if df.scard(news_item["uuid"]) == 0:
                for sym in news_item["symbols"]:
                    df.sadd(news_item["uuid"], sym)
        logging.info("%d existing urls in total.", df.dbsize())

    logging.info("generating tasks ...")
    with mc.rssnews.feed.find() as cursor:
        logging.info("number of rss feeds = %d", cursor.count())
        tasks = []
        for item in cursor:
            logging.debug("rss=%(url)s", item)