コード例 #1
0
def test_alternative_db(real_redis):
    """Test if we can set different values for the same key in different
    redis databases. The actual redis db depends on the config.
    """
    Pool().reload(cfg={
        "names": {
            "snmp": 1,
            "img": 2,
        },
    })

    default_prox = Proxy("cache")
    default_prox.set("x", 0)

    snmp_prox = Proxy("cache", db_name="snmp")
    snmp_prox.set("x", 1)

    img_prox = Proxy("cache", db_name="img")
    img_prox.set("x", 2)

    assert default_prox.get("x").val() == 0
    assert snmp_prox.get("x").val() == 1
    assert img_prox.get("x").val() == 2

    not_prox = Proxy("cache", db_name="not_there")
    not_prox.set("x", 3)

    # Not existing db_names will land it db 0, same as default:
    assert default_prox.get("x").val() == 3
    assert not_prox.get("x").val() == 3
コード例 #2
0
def test_parallel_proxy_sets(real_redis):
    """Test many parallel sets from more than one process."""

    Proxy(['QC']).set("x", 0)
    n_increments = 1000
    jobs = 4

    def _many_increments():
        """Make a lot of individually locked increments.
        In practice you should of course do the lock around the for loop
        to reduce lock contention, but here we want to trigger races.
        """
        # Note: Every process needs it's own lock.
        # Obvious, but easy to forget in unittests.
        proxy = Proxy(['QC'])
        for _ in range(n_increments):
            with proxy:
                proxy.set("x", proxy.get("x").val() + 1)

    with background_thread(_many_increments, (), jobs=jobs - 1):
        # This code runs in the foreground:
        _many_increments()

    # See if really all increments got counted.
    assert Proxy(['QC']).get("x").val() == jobs * n_increments

    # Reset and see if it also works for mutliple processes
    Proxy(['QC']).set("x", 0)
    with background_proc(_many_increments, (), jobs=jobs - 1):
        # This code runs in the foreground:
        _many_increments()

    # See if really all increments got counted.
    assert Proxy(['QC']).get("x").val() == jobs * n_increments
コード例 #3
0
def test_recursive_local_lock(fake_redis):
    """Test if a local value can be modified by the same thread,
    but blocks when being accessed from more than one.
    """
    root_prox = Proxy(path=('QualityControl', ))
    root_prox.clear()

    thread_died_yet = False

    def _lock_in_thread():
        """The root_prox lock shoud block until it was released"""
        with root_prox:
            # This timeout here is to make sure thread_died_yet was set.
            time.sleep(0.05)
            assert thread_died_yet

    with root_prox:
        # Spin up a thread and see if it blocks as expected.
        thr = threading.Thread(target=_lock_in_thread)
        thr.start()
        thr.join(0.25)

        # This flag is needed since _lock_in_thread will (rightfully)
        # acquire the lock once this context manager left.
        thread_died_yet = True

    time.sleep(0.5)
コード例 #4
0
def test_recursive_lock_expire(real_redis):
    """Regression test for https://adnymics.atlassian.net/browse/DEV-1569:

    Sometimes locks somehow survived and had their expire time stripped.
    This was because release() set the value on a recursive lock, which
    stripped the expire time without setting it again.

    We test this as integration, because this error does not show
    when using fakeredis, only when using a real redis connection.
    """
    prox = Proxy(
        ['lock-test'],
        lock_acquire_timeout=10,
        lock_expire_timeout=15,
    )

    conn = Pool().get_connection()

    prox._redis_lock.acquire()
    prox._redis_lock.acquire()

    assert conn.ttl("l:.lock-test") == 15

    prox._redis_lock.release()

    assert conn.ttl("l:.lock-test") == 15
    time.sleep(1)
    assert conn.ttl("l:.lock-test") == 14

    prox._redis_lock.release()

    # The lock was cleared -> no key anymore -> negative ttl
    assert conn.ttl("l:.lock-test") < 0
コード例 #5
0
def test_many_open_connections(real_redis):
    """
    Test the connection pooling and see if we do not fail on too many.
    This test acquires 1000 open connections and runs some gets on it,
    to assert that the connection is actually being used.

    NOTE: This test assumes a high enough (> 1000) maxclients in redis.conf
    """
    prox = Proxy(
        ['ImageCache'],
        lock_acquire_timeout=100,
        lock_expire_timeout=105,
    )

    # Just set it to some dummy value.
    prox.set("url", "https://...")

    n_threads = 200
    barrier = Barrier(n_threads)

    def _use_me():
        """Helper function that does some dummy work"""
        # Wait for all threads to start.
        # We want to make sure the connection getting happens
        # at the same time for all threads.
        barrier.wait()

        for _ in range(100):
            assert prox["url"].exists()
            assert prox.val() == {"url": "https://..."}
            assert prox["url"].val() == "https://..."

    with background_thread(_use_me, (), jobs=n_threads-1):
        # Just use the main thread as one extra worker (thus -1)
        _use_me()
コード例 #6
0
def test_sequential_lock(fake_redis):
    """
    Simply test if incrementing a value locking in a single thread
    works (other tests always test concurrent access)
    """
    locked_val = Proxy('LockMe').set("x", 0)
    for _ in range(1000):
        with locked_val:
            locked_val.set("x", locked_val.get("x").val() + 1)
コード例 #7
0
def test_basic_locking(fake_redis):
    """Check if single process locking works"""
    root_prox = Proxy(path=('QualityControl', ))
    root_prox.clear()

    root_prox.acquire()
    assert root_prox.is_locked()
    root_prox.set('child', {})
    root_prox.release()
コード例 #8
0
def test_if_equal(fake_redis):
    """See if __eq__ works as expected"""
    Proxy("section").set("x", 1)
    Proxy("section").set("y", 1)

    assert Proxy("section.x") == Proxy("section.y")
    Proxy("section").set("y", 2)
    assert Proxy("section.x") != Proxy("section.y")
コード例 #9
0
 def _many_increments():
     """Make a lot of individually locked increments.
     In practice you should of course do the lock around the for loop
     to reduce lock contention, but here we want to trigger races.
     """
     # Note: Every process needs it's own lock.
     # Obvious, but easy to forget in unittests.
     proxy = Proxy(['QC'])
     for _ in range(n_increments):
         with proxy:
             proxy.set("x", proxy.get("x").val() + 1)
コード例 #10
0
def test_basic_set(fake_redis):
    """See if the very basic testcases work"""
    root_prox = Proxy(path=('TestQualityControl', ))
    root_prox.clear()

    root_prox.set('a', 2)
    assert root_prox.get('a').val() == 2

    root_prox.set('a.b', 3)
    assert root_prox.get('a.b').val() == 3
    assert root_prox.get('a').val() == {'b': 3}

    root_prox.set('a.c', None)
    assert root_prox.get('a.c').val() is None
    assert root_prox.get('a').val() == {'b': 3, 'c': None}

    root_prox.set('a', {'b': 42, 'e': 3})
    assert root_prox.get('a').val() == {'b': 42, 'e': 3}
コード例 #11
0
def test_parallel_lock(real_redis):
    """Test if two processes really block on a common ressource"""
    val = Proxy('LockMe').set("x", 0)

    checks = defaultdict(bool)

    def _lock_me():
        """See if the thread could acquire the lock at last"""
        time.sleep(0.5)
        val.acquire()
        val.release()
        checks["thread-got-through"] = True

    jobs = 2
    with background_thread(_lock_me, (), jobs=jobs - 1):
        val.acquire()
        time.sleep(2)
        val.release()
        time.sleep(0.5)

    assert checks["thread-got-through"]
コード例 #12
0
def test_locking_edge_cases(fake_redis):
    """See if locking edge cases like recursive locks work"""
    root_prox = Proxy(path=('QualityControl', ))
    root_prox.clear()

    assert not root_prox.is_locked()

    # This should work for the same thread:
    with root_prox:
        assert root_prox.is_locked()
        with root_prox:
            assert root_prox.is_locked()

    assert not root_prox.is_locked()

    # See if the lock is released when an exception happens
    # inside the with block.
    with pytest.raises(KeyError):
        with root_prox:
            raise KeyError("Inside job")

    assert not root_prox.is_locked()
コード例 #13
0
def test_same_reference(fake_redis):
    """Test if the same reference is returned for the same proxy path."""
    assert Proxy("x") is Proxy("x")
    assert Proxy("x") is not Proxy("y")