async def test_respect_previous_proactive_fetch_ts(event_loop, cache): cfg = utils.populate_cfg_defaults(None) cfg['proactive_policy_fetching']['enabled'] = True cfg['proactive_policy_fetching']['interval'] = 86400 cfg['proactive_policy_fetching']['grace_ratio'] = 2.0 cfg['shutdown_timeout'] = 1 previous_proactive_fetch_ts = time.time() - 1 init_record = base_cache.CacheEntry(0, "19990907T090909", {}) await cache.set("good.loc", init_record) await cache.set_proactive_fetch_ts(previous_proactive_fetch_ts) pf = STSProactiveFetcher(cfg, event_loop, cache) await pf.start() # Wait for policy fetcher to do its potential work await asyncio.sleep(3) # Verify assert previous_proactive_fetch_ts == await cache.get_proactive_fetch_ts() result = await cache.get("good.loc") assert result == init_record # no update await pf.stop()
async def responder(event_loop): import postfix_mta_sts_resolver.utils as utils cfg = utils.populate_cfg_defaults(None) cfg["zones"]["test2"] = cfg["default_zone"] resp = STSSocketmapResponder(cfg, event_loop) await resp.start() result = resp, cfg['host'], cfg['port'] await yield_(result) await resp.stop()
async def test_responder_expiration(event_loop): async def query(host, port, domain): reader, writer = await asyncio.open_connection(host, port) stream_reader = netstring.StreamReader() string_reader = stream_reader.next_string() writer.write(netstring.encode(b'test ' + domain.encode('ascii'))) try: res = b'' while True: try: part = string_reader.read() except netstring.WantRead: data = await reader.read(4096) assert data stream_reader.feed(data) else: if not part: break res += part return res finally: writer.close() with tempfile.NamedTemporaryFile() as cachedb: cfg = {} cfg["port"] = 18461 cfg["cache_grace"] = 0 cfg["shutdown_timeout"] = 1 cfg["cache"] = { "type": "sqlite", "options": { "filename": cachedb.name, }, } cfg = utils.populate_cfg_defaults(cfg) cache = utils.create_cache(cfg['cache']['type'], cfg['cache']['options']) await cache.setup() pol_body = { "version": "STSv1", "mode": "enforce", "mx": ["mail.loc"], "max_age": 1, } await cache.set("no-record.loc", base_cache.CacheEntry(0, "0", pol_body)) resp = STSSocketmapResponder(cfg, event_loop, cache) await resp.start() try: result = await query(cfg['host'], cfg['port'], 'no-record.loc') assert result == b'NOTFOUND ' finally: await resp.stop() await cache.teardown()
async def unix_responder(event_loop): import postfix_mta_sts_resolver.utils as utils cfg = utils.populate_cfg_defaults({ 'path': '/tmp/mta-sts.sock', 'mode': 0o666 }) cfg["zones"]["test2"] = cfg["default_zone"] resp = STSSocketmapResponder(cfg, event_loop) await resp.start() result = resp, cfg['path'] await yield_(result) await resp.stop()
async def responder(event_loop): import postfix_mta_sts_resolver.utils as utils cfg = utils.populate_cfg_defaults(None) cfg["zones"]["test2"] = cfg["default_zone"] cache = utils.create_cache(cfg['cache']['type'], cfg['cache']['options']) await cache.setup() resp = STSSocketmapResponder(cfg, event_loop, cache) await resp.start() result = resp, cfg['host'], cfg['port'] yield result await resp.stop() await cache.teardown()
async def responder(event_loop): import postfix_mta_sts_resolver.utils as utils cfg = utils.populate_cfg_defaults({"default_zone": {"require_sni": False}}) cfg["zones"]["test2"] = cfg["default_zone"] cfg["port"] = 28461 cache = utils.create_cache(cfg['cache']['type'], cfg['cache']['options']) await cache.setup() resp = STSSocketmapResponder(cfg, event_loop, cache) await resp.start() result = resp, cfg['host'], cfg['port'] await yield_(result) await resp.stop() await cache.teardown()
async def unix_responder(event_loop): import postfix_mta_sts_resolver.utils as utils cfg = utils.populate_cfg_defaults({'path': '/tmp/mta-sts.sock', 'mode': 0o666}) cfg["zones"]["test2"] = cfg["default_zone"] cache = utils.create_cache(cfg['cache']['type'], cfg['cache']['options']) await cache.setup() resp = STSSocketmapResponder(cfg, event_loop, cache) await resp.start() result = resp, cfg['path'] await yield_(result) await resp.stop() await cache.teardown()
def test_populate_cfg_defaults(cfg): res = utils.populate_cfg_defaults(cfg) assert isinstance(res['host'], str) assert isinstance(res['port'], int) assert 0 < res['port'] < 65536 assert isinstance(res['cache_grace'], (int, float)) assert isinstance(res['cache'], collections.abc.Mapping) assert res['cache']['type'] in ('redis', 'sqlite', 'internal') assert isinstance(res['default_zone'], collections.abc.Mapping) assert isinstance(res['zones'], collections.abc.Mapping) for zone in list(res['zones'].values()) + [res['default_zone']]: assert isinstance(zone, collections.abc.Mapping) assert 'timeout' in zone assert 'strict_testing' in zone
def test_populate_cfg_defaults(cfg): res = utils.populate_cfg_defaults(cfg) assert isinstance(res['host'], str) assert isinstance(res['port'], int) assert 0 < res['port'] < 65536 assert isinstance(res['cache_grace'], (int, float)) assert isinstance(res['proactive_policy_fetching']['enabled'], bool) assert isinstance(res['proactive_policy_fetching']['interval'], int) assert isinstance(res['proactive_policy_fetching']['concurrency_limit'], int) assert isinstance(res['proactive_policy_fetching']['grace_ratio'], (int, float)) assert isinstance(res['cache'], collections.abc.Mapping) assert res['cache']['type'] in ('redis', 'sqlite', 'internal') assert isinstance(res['default_zone'], collections.abc.Mapping) assert isinstance(res['zones'], collections.abc.Mapping) for zone in list(res['zones'].values()) + [res['default_zone']]: assert isinstance(zone, collections.abc.Mapping) assert 'timeout' in zone assert 'strict_testing' in zone
async def test_no_cache_update_during_grace_period(event_loop, cache): cfg = utils.populate_cfg_defaults(None) cfg['proactive_policy_fetching']['enabled'] = True cfg['proactive_policy_fetching']['interval'] = 86400 cfg['proactive_policy_fetching']['grace_ratio'] = 2.0 cfg['shutdown_timeout'] = 1 init_record = base_cache.CacheEntry(time.time() - 1, "19990907T090909", {}) await cache.set("good.loc", init_record) pf = STSProactiveFetcher(cfg, event_loop, cache) await pf.start() # Wait for policy fetcher to do its round await asyncio.sleep(3) # Verify assert time.time() - await cache.get_proactive_fetch_ts() < 10 result = await cache.get("good.loc") assert result == init_record # no update (cached being fresh enough) await pf.stop()
async def test_cache_update(event_loop, cache, domain, init_policy_id, expected_policy_id, expected_update): cfg = utils.populate_cfg_defaults(None) cfg['proactive_policy_fetching']['enabled'] = True cfg['proactive_policy_fetching']['interval'] = 1 cfg['proactive_policy_fetching']['grace_ratio'] = 1000 cfg["default_zone"]["timeout"] = 1 cfg['shutdown_timeout'] = 1 await cache.set(domain, base_cache.CacheEntry(0, init_policy_id, {})) pf = STSProactiveFetcher(cfg, event_loop, cache) await pf.start() # Wait for policy fetcher to do its rounds await asyncio.sleep(3) # Verify assert time.time() - await cache.get_proactive_fetch_ts() < 10 result = await cache.get(domain) assert result assert result.pol_id == expected_policy_id if expected_update: assert time.time() - result.ts < 10 # update # Due to an id change, a new body must be fetched if init_policy_id != expected_policy_id: assert result.pol_body # Otherwise we don't fetch a new policy body else: assert not result.pol_body else: assert result.ts == 0 assert not result.pol_body await pf.stop()
async def module_cache_fixture(): cfg = populate_cfg_defaults(None) cache = create_cache(cfg['cache']['type'], cfg['cache']['options']) await cache.setup() yield cache await cache.teardown()
async def cache(): cfg = populate_cfg_defaults(None) cache = create_cache(cfg['cache']['type'], cfg['cache']['options']) await cache.setup() await yield_(cache) await cache.teardown()
def test_empty_config(): assert utils.load_config('/dev/null') == utils.populate_cfg_defaults(None)