async def test_positive(mock_responses): server_url = "http://fake.local/v1" collection_url = server_url + COLLECTION_URL.format("bid", "cid") mock_responses.get( collection_url, payload={ "data": { "status": "work-in-progress", "last_edit_date": (utcnow() - timedelta(days=10)).isoformat(), "last_edit_by": "ldap:[email protected]", } }, ) collection_url = server_url + COLLECTION_URL.format("bid", "cid2") mock_responses.get( collection_url, payload={ "data": { "status": "signed", "last_edit_date": "2017-08-01T01:00.000" } }, ) with patch_async(f"{MODULE}.fetch_signed_resources", return_value=RESOURCES): status, data = await run(server_url, FAKE_AUTH, max_age=25) assert status is True assert data == {}
async def run(server: str, auth: str, max_approvals: int = 7, max_age_approvals: int = 7) -> CheckResult: min_timestamp = (utcnow() - timedelta(days=max_age_approvals)).timestamp() * 1000 client = KintoClient(server_url=server, auth=auth) resources = await fetch_signed_resources(server, auth) source_collections = [(r["source"]["bucket"], r["source"]["collection"]) for r in resources if r["last_modified"] >= min_timestamp] futures = [ get_latest_approvals(client, bid, cid, max_approvals, min_timestamp) for (bid, cid) in source_collections ] results = await run_parallel(*futures) collections_entries = [] for (bid, cid), entries in zip(source_collections, results): for entry in entries: collections_entries.append({"source": f"{bid}/{cid}", **entry}) # Sort collections by latest approval descending. approvals = sorted( collections_entries, key=lambda item: item["datetime"], reverse=True, ) return True, approvals
async def run(server: str, auth: str, max_approvals: int = 7, max_age_approvals: int = 7) -> CheckResult: min_timestamp = (utcnow() - timedelta(days=max_age_approvals)).timestamp() * 1000 client = KintoClient(server_url=server, auth=auth) resources = await fetch_signed_resources(server, auth) source_collections = [(r["source"]["bucket"], r["source"]["collection"]) for r in resources if r["last_modified"] >= min_timestamp] futures = [ get_latest_approvals(client, bid, cid, max_approvals, min_timestamp) for (bid, cid) in source_collections ] results = await run_parallel(*futures) # Sort collections by latest approval descending. date_sorted = sorted( zip(source_collections, results), key=lambda item: item[1][0]["datetime"] if len(item[1]) > 0 else "0000-00-00", reverse=True, ) approvals = { f"{bid}/{cid}": entries for (bid, cid), entries in date_sorted } return True, approvals
async def get_signature_age_hours(client, bucket, collection): collection = await client.get_collection(bucket=bucket, id=collection) data = collection["data"] signature_date = data["last_signature_date"] dt = datetime.fromisoformat(signature_date) delta = utcnow() - dt age = int(delta.days * 24 + delta.seconds / 3600) return age
async def run(server: str, auth: str, max_age: int) -> CheckResult: resources = await fetch_signed_resources(server, auth) client = KintoClient(server_url=server, auth=auth) futures = [ client.get_collection(bucket=resource["source"]["bucket"], id=resource["source"]["collection"]) for resource in resources ] results = await run_parallel(*futures) too_old = {} for resource, resp in zip(resources, results): metadata = resp["data"] # For this check, since we want to detect pending changes, # we also consider work-in-progress a pending request review. if metadata["status"] not in ("work-in-progress", "to-review"): continue try: last_edit = metadata["last_edit_date"] dt = datetime.fromisoformat(last_edit) age = (utcnow() - dt).days except KeyError: # Never edited. age = sys.maxsize if age > max_age: # Fetch list of editors, if necessary to contact them. group = await client.get_group( bucket=resource["source"]["bucket"], id=resource["source"]["collection"] + "-editors", ) editors = group["data"]["members"] cid = "{bucket}/{collection}".format(**resource["destination"]) too_old[cid] = { "age": age, "status": metadata["status"], "editors": editors, } """ { "security-state/cert-revocations": { "age": 82, "status": "to-review", "editors": [ "ldap:[email protected]", "ldap:[email protected]", "account:crlite_publisher" ] } } """ data = dict( sorted(too_old.items(), key=lambda item: item[1]["age"], reverse=True)) return len(data) == 0, data
async def test_positive(mock_responses): server_url = "http://fake.local/v1" mock_http_calls(mock_responses, server_url) next_month = utcnow() + timedelta(days=30) fake_cert = mock.MagicMock(not_valid_after=next_month) module = "checks.remotesettings.certificates_expiration" with patch_async(f"{module}.fetch_cert", return_value=fake_cert) as mocked: status, data = await run(server_url, min_remaining_days=29) mocked.assert_called_with("http://fake-x5u") assert status is True assert data == {}
async def test_negative(mock_responses): server_url = "http://fake.local/v1" collection_url = server_url + COLLECTION_URL.format("bid", "cid") mock_responses.get( collection_url, payload={ "data": { "status": "to-review", "last_edit_by": "ldap:[email protected]", "last_edit_date": (utcnow() - timedelta(days=10)).isoformat(), } }, ) group_url = server_url + GROUP_URL.format("bid", "cid-editors") mock_responses.get( group_url, payload={"data": { "members": ["ldap:[email protected]"] }}) collection_url = server_url + COLLECTION_URL.format("bid", "cid2") mock_responses.get(collection_url, payload={"data": { "status": "work-in-progress" }}) group_url = server_url + GROUP_URL.format("bid", "cid2-editors") mock_responses.get( group_url, payload={"data": { "members": ["ldap:[email protected]"] }}) with patch_async(f"{MODULE}.fetch_signed_resources", return_value=RESOURCES): status, data = await run(server_url, FAKE_AUTH, max_age=5) assert status is False assert data == { "main/cid": { "age": 10, "status": "to-review", "last_edit_by": "ldap:[email protected]", "editors": ["ldap:[email protected]"], }, "main/cid2": { "age": sys.maxsize, "status": "work-in-progress", "last_edit_by": "N/A", "editors": ["ldap:[email protected]"], }, }
async def test_positive(mock_responses): server_url = "http://fake.local/v1" module = "checks.remotesettings.latest_approvals" resources = [{ "last_modified": utcnow().timestamp() * 1000, "source": { "bucket": "bid", "collection": "cid" }, }] with patch_async(f"{module}.fetch_signed_resources", return_value=resources): with patch_async(f"{module}.get_latest_approvals", return_value=INFOS): status, data = await run({}, server_url, FAKE_AUTH) assert status is True assert data == {"bid/cid": INFOS}
async def run(server: str, cdn: str, min_age: int = 300) -> CheckResult: origin_client = KintoClient(server_url=server) entries = await origin_client.get_records(bucket="monitor", collection="changes") # Fetch timestamps on source server. origin_futures = [ fetch_timestamps(origin_client, bucket=entry["bucket"], collection=entry["collection"]) for entry in entries ] origin_results = await run_parallel(*origin_futures) # Do exactly the same with CDN. cdn_client = KintoClient(server_url=cdn) cdn_futures = [ fetch_timestamps(cdn_client, bucket=entry["bucket"], collection=entry["collection"]) for entry in entries ] cdn_results = await run_parallel(*cdn_futures) # Make sure everything matches. collections = {} for entry, origin_result, cdn_result in zip(entries, origin_results, cdn_results): origin_col_ts, origin_records_ts = origin_result cdn_col_ts, cdn_records_ts = cdn_result age_seconds = utcnow().timestamp() - (origin_col_ts / 1000) if (age_seconds > min_age and origin_col_ts != cdn_col_ts or origin_records_ts != cdn_records_ts): collections["{bucket}/{collection}".format(**entry)] = { "source": { "collection": origin_col_ts, "records": origin_records_ts }, "cdn": { "collection": cdn_col_ts, "records": cdn_records_ts }, } return len(collections) == 0, collections
async def run(server: str, min_remaining_days: int) -> CheckResult: client = KintoClient(server_url=server, bucket="monitor", collection="changes") entries = await client.get_records() # First, fetch all collections metadata in parallel. futures = [fetch_collection_metadata(server, entry) for entry in entries] results = await run_parallel(*futures) entries_metadata = zip(entries, results) # Second, deduplicate the list of x5u URLs and fetch them in parallel. x5us = list(set(metadata["signature"]["x5u"] for metadata in results)) futures = [fetch_cert(x5u) for x5u in x5us] results = await run_parallel(*futures) expirations = { x5u: cert.not_valid_after.replace(tzinfo=datetime.timezone.utc) for x5u, cert in zip(x5us, results) } # Return collections whose certificate expires too soon. errors = {} for entry, metadata in entries_metadata: cid = "{bucket}/{collection}".format(**entry) x5u = metadata["signature"]["x5u"] expiration = expirations[x5u] remaining_days = (expiration - utcnow()).days if remaining_days < min_remaining_days: errors[cid] = {"x5u": x5u, "expires": expiration.isoformat()} """ { "main/normandy-recipes": { "x5u": "https://content-signature-2.cdn.mozilla.net/chains/remote-settings.content-signature.mozilla.org-2019-10-22-18-54-26.chain", "expires": "2019-10-22T18:54:26" }, """ return len(errors) == 0, errors