async def test_respect_previous_proactive_fetch_ts(event_loop, cache):
    cfg = utils.populate_cfg_defaults(None)
    cfg['proactive_policy_fetching']['enabled'] = True
    cfg['proactive_policy_fetching']['interval'] = 86400
    cfg['proactive_policy_fetching']['grace_ratio'] = 2.0
    cfg['shutdown_timeout'] = 1

    previous_proactive_fetch_ts = time.time() - 1
    init_record = base_cache.CacheEntry(0, "19990907T090909", {})
    await cache.set("good.loc", init_record)
    await cache.set_proactive_fetch_ts(previous_proactive_fetch_ts)

    pf = STSProactiveFetcher(cfg, event_loop, cache)
    await pf.start()

    # Wait for policy fetcher to do its potential work
    await asyncio.sleep(3)

    # Verify
    assert previous_proactive_fetch_ts == await cache.get_proactive_fetch_ts()

    result = await cache.get("good.loc")
    assert result == init_record  # no update

    await pf.stop()
async def test_scanning_in_batches(cache_type, cache_opts, n_items,
                                   batch_size_limit):
    # Prepare
    cache, tmpfile = await setup_cache(cache_type, cache_opts)
    data = []
    for n in range(n_items):
        item = ("test{:04d}".format(n + 1),
                base_cache.CacheEntry(n + 1, "pol_id", "pol_body"))
        data.append(item)
        await cache.set(*item)

    # Test (scan)
    token = None
    scanned = []
    while True:
        token, cache_items = await cache.scan(token, batch_size_limit)
        for cache_item in cache_items:
            scanned.append(cache_item)
        if token is None:
            break

    try:
        # Verify scanned data is same as inserted (order agnostic)
        assert len(scanned) == len(data)
        assert sorted(scanned) == sorted(data)
        # For internal LRU, verify it's scanned from LRU to MRU record
        if cache_type == "internal":
            assert scanned == data
    finally:
        await cache.teardown()
        if cache_type == 'sqlite':
            tmpfile.close()
async def test_capped_cache():
    cache = utils.create_cache("internal", {"cache_size": 2})
    await cache.setup()
    stored = base_cache.CacheEntry(0, "pol_id", "pol_body")
    await cache.set("test1", stored)
    await cache.set("test2", stored)
    await cache.set("test3", stored)
    assert await cache.get("test2") == stored
    assert await cache.get("test3") == stored
Exemple #4
0
async def test_responder_expiration(event_loop):
    async def query(host, port, domain):
        reader, writer = await asyncio.open_connection(host, port)
        stream_reader = netstring.StreamReader()
        string_reader = stream_reader.next_string()
        writer.write(netstring.encode(b'test ' + domain.encode('ascii')))
        try:
            res = b''
            while True:
                try:
                    part = string_reader.read()
                except netstring.WantRead:
                    data = await reader.read(4096)
                    assert data
                    stream_reader.feed(data)
                else:
                    if not part:
                        break
                    res += part
            return res
        finally:
            writer.close()

    with tempfile.NamedTemporaryFile() as cachedb:
        cfg = {}
        cfg["port"] = 18461
        cfg["cache_grace"] = 0
        cfg["shutdown_timeout"] = 1
        cfg["cache"] = {
            "type": "sqlite",
            "options": {
                "filename": cachedb.name,
            },
        }
        cfg = utils.populate_cfg_defaults(cfg)
        cache = utils.create_cache(cfg['cache']['type'],
                                   cfg['cache']['options'])
        await cache.setup()
        pol_body = {
            "version": "STSv1",
            "mode": "enforce",
            "mx": ["mail.loc"],
            "max_age": 1,
        }
        await cache.set("no-record.loc",
                        base_cache.CacheEntry(0, "0", pol_body))

        resp = STSSocketmapResponder(cfg, event_loop, cache)
        await resp.start()
        try:
            result = await query(cfg['host'], cfg['port'], 'no-record.loc')
            assert result == b'NOTFOUND '
        finally:
            await resp.stop()
            await cache.teardown()
async def test_cache_lifecycle(cache_type, cache_opts):
    if cache_type == 'sqlite':
        tmpfile = tempfile.NamedTemporaryFile()
        cache_opts["filename"] = tmpfile.name
    cache = utils.create_cache(cache_type, cache_opts)
    await cache.setup()
    assert await cache.get("nonexistent") == None
    stored = base_cache.CacheEntry(0, "pol_id", "pol_body")
    await cache.set("test", stored)
    await cache.set("test",
                    stored)  # second time for testing conflicting insert
    assert await cache.get("test") == stored
    await cache.teardown()
    if cache_type == 'sqlite':
        tmpfile.close()
Exemple #6
0
async def test_cache_lifecycle(cache_type, cache_opts, safe_set):
    cache, tmpfile = await setup_cache(cache_type, cache_opts)

    try:
        assert await cache.get("nonexistent") == None
        stored = base_cache.CacheEntry(0, "pol_id", "pol_body")
        if safe_set:
            await cache.safe_set("test", stored, None)
            await cache.safe_set("test", stored, None)  # second time for testing conflicting insert
        else:
            await cache.set("test", stored)
            await cache.set("test", stored)  # second time for testing conflicting insert
        assert await cache.get("test") == stored
    finally:
        await cache.teardown()
        if cache_type == 'sqlite':
            tmpfile.close()
async def test_no_cache_update_during_grace_period(event_loop, cache):
    cfg = utils.populate_cfg_defaults(None)
    cfg['proactive_policy_fetching']['enabled'] = True
    cfg['proactive_policy_fetching']['interval'] = 86400
    cfg['proactive_policy_fetching']['grace_ratio'] = 2.0
    cfg['shutdown_timeout'] = 1

    init_record = base_cache.CacheEntry(time.time() - 1, "19990907T090909", {})
    await cache.set("good.loc", init_record)

    pf = STSProactiveFetcher(cfg, event_loop, cache)
    await pf.start()

    # Wait for policy fetcher to do its round
    await asyncio.sleep(3)

    # Verify
    assert time.time() - await cache.get_proactive_fetch_ts() < 10

    result = await cache.get("good.loc")
    assert result == init_record  # no update (cached being fresh enough)

    await pf.stop()
async def test_cache_update(event_loop, cache, domain, init_policy_id,
                            expected_policy_id, expected_update):
    cfg = utils.populate_cfg_defaults(None)
    cfg['proactive_policy_fetching']['enabled'] = True
    cfg['proactive_policy_fetching']['interval'] = 1
    cfg['proactive_policy_fetching']['grace_ratio'] = 1000
    cfg["default_zone"]["timeout"] = 1
    cfg['shutdown_timeout'] = 1

    await cache.set(domain, base_cache.CacheEntry(0, init_policy_id, {}))

    pf = STSProactiveFetcher(cfg, event_loop, cache)
    await pf.start()

    # Wait for policy fetcher to do its rounds
    await asyncio.sleep(3)

    # Verify
    assert time.time() - await cache.get_proactive_fetch_ts() < 10

    result = await cache.get(domain)
    assert result
    assert result.pol_id == expected_policy_id
    if expected_update:
        assert time.time() - result.ts < 10  # update
        # Due to an id change, a new body must be fetched
        if init_policy_id != expected_policy_id:
            assert result.pol_body
        # Otherwise we don't fetch a new policy body
        else:
            assert not result.pol_body
    else:
        assert result.ts == 0
        assert not result.pol_body

    await pf.stop()