async def test_negative():
    url = "https://fake.local"

    next_month = utcnow() + timedelta(days=30)
    fake_cert = mock.MagicMock(not_valid_before=utcnow(),
                               not_valid_after=next_month)

    with mock.patch(f"{MODULE}.fetch_cert", return_value=fake_cert):
        status, data = await run(url, min_remaining_days=40)

    assert status is False
    assert data == {"expires": next_month.isoformat()}
async def test_positive_bounded_maximum():
    url = "https://fake.local"

    last_year = utcnow() - timedelta(days=365)
    next_month = utcnow() + timedelta(days=30)
    fake_cert = mock.MagicMock(not_valid_before=last_year,
                               not_valid_after=next_month)

    with mock.patch(f"{MODULE}.fetch_cert", return_value=fake_cert):
        status, data = await run(url, max_remaining_days=7)

    assert status is True
    assert data == {"expires": next_month.isoformat()}
async def test_positive():
    url = "https://fake.local"

    next_month = utcnow() + timedelta(days=30)
    fake_cert = mock.MagicMock(not_valid_before=utcnow(),
                               not_valid_after=next_month)

    with mock.patch(f"{MODULE}.fetch_cert", return_value=fake_cert) as mocked:
        status, data = await run(url)
        mocked.assert_called_with(url)

    assert status is True
    assert data == {"expires": next_month.isoformat()}
Ejemplo n.º 4
0
async def run(server: str,
              auth: str,
              max_approvals: int = 7,
              max_age_approvals: int = 7) -> CheckResult:
    min_timestamp = (utcnow() -
                     timedelta(days=max_age_approvals)).timestamp() * 1000

    client = KintoClient(server_url=server, auth=auth)

    resources = await fetch_signed_resources(server, auth)
    source_collections = [(r["source"]["bucket"], r["source"]["collection"])
                          for r in resources
                          if r["last_modified"] >= min_timestamp]

    futures = [
        get_latest_approvals(client, bid, cid, max_approvals, min_timestamp)
        for (bid, cid) in source_collections
    ]
    results = await run_parallel(*futures)

    collections_entries = []
    for (bid, cid), entries in zip(source_collections, results):
        for entry in entries:
            collections_entries.append({"source": f"{bid}/{cid}", **entry})

    # Sort collections by latest approval descending.
    approvals = sorted(
        collections_entries,
        key=lambda item: item["datetime"],
        reverse=True,
    )

    return True, approvals
Ejemplo n.º 5
0
async def run(
    url: str,
    percentage_remaining_validity: int = 5,
    min_remaining_days: int = LOWER_MIN_REMAINING_DAYS,
    max_remaining_days: int = UPPER_MIN_REMAINING_DAYS,
) -> CheckResult:
    cert = await fetch_cert(url)
    start = cert.not_valid_before.replace(tzinfo=datetime.timezone.utc)
    end = cert.not_valid_after.replace(tzinfo=datetime.timezone.utc)
    lifespan = (end - start).days

    # The minimum remaining days depends on the certificate lifespan.
    relative_minimum = lifespan * percentage_remaining_validity / 100
    bounded_minimum = int(
        min(max_remaining_days, max(min_remaining_days, relative_minimum)))
    remaining_days = (end - utcnow()).days

    logger.debug(
        f"Certificate lasts {lifespan} days and ends in {remaining_days} days "
        f"({remaining_days - bounded_minimum} days before alert).")

    success = remaining_days > bounded_minimum
    return success, {
        "expires": end.isoformat(),
    }
async def test_positive(mock_responses):
    server_url = "http://fake.local/v1"
    mock_http_calls(mock_responses, server_url)

    next_month = utcnow() + timedelta(days=30)
    fake_cert = mock.MagicMock(not_valid_before=utcnow(),
                               not_valid_after=next_month)

    module = "checks.remotesettings.certificates_expiration"
    with patch_async(f"{module}.fetch_certs",
                     return_value=[fake_cert]) as mocked:
        status, data = await run(server_url, min_remaining_days=29)
        mocked.assert_called_with("http://fake-x5u")

    assert status is True
    assert data == {}
async def test_positive(mock_responses):
    server_url = "http://fake.local/v1"

    collection_url = server_url + COLLECTION_URL.format("bid", "cid")
    mock_responses.get(
        collection_url,
        payload={
            "data": {
                "status": "work-in-progress",
                "last_edit_date": (utcnow() - timedelta(days=10)).isoformat(),
                "last_edit_by": "ldap:[email protected]",
            }
        },
    )
    collection_url = server_url + COLLECTION_URL.format("bid", "cid2")
    mock_responses.get(
        collection_url,
        payload={
            "data": {
                "status": "signed",
                "last_edit_date": "2017-08-01T01:00.000"
            }
        },
    )

    with patch_async(f"{MODULE}.fetch_signed_resources",
                     return_value=RESOURCES):
        status, data = await run(server_url, FAKE_AUTH, max_age=25)

    assert status is True
    assert data == {}
 async def status(self, task_id):
     return {
         "status": {
             "runs": [
                 {"resolved": (utcnow() - timedelta(seconds=11)).isoformat()}
             ]
         }
     }
Ejemplo n.º 9
0
async def get_signature_age_hours(client, bucket, collection):
    collection = await client.get_collection(bucket=bucket, id=collection)
    data = collection["data"]
    signature_date = data["last_signature_date"]
    dt = datetime.fromisoformat(signature_date)
    delta = utcnow() - dt
    age = int(delta.days * 24 + delta.seconds / 3600)
    return age
async def run(
    server: str,
    percentage_remaining_validity: int = 10,
    min_remaining_days: int = LOWER_MIN_REMAINING_DAYS,
    max_remaining_days: int = UPPER_MIN_REMAINING_DAYS,
) -> CheckResult:
    client = KintoClient(server_url=server)
    entries = await client.get_monitor_changes()

    # First, fetch all collections metadata in parallel.
    futures = [fetch_collection_metadata(server, entry) for entry in entries]
    results = await run_parallel(*futures)
    entries_metadata = zip(entries, results)

    # Second, deduplicate the list of x5u URLs and fetch them in parallel.
    x5us = list(set(metadata["signature"]["x5u"] for metadata in results))
    futures = [fetch_certs(x5u) for x5u in x5us]
    results = await run_parallel(*futures)

    validity: Dict[str, Tuple] = {}
    for x5u, certs in zip(x5us, results):
        # For each cert of the chain, keep track of the one that ends the earliest.
        for cert in certs:
            end = cert.not_valid_after.replace(tzinfo=datetime.timezone.utc)
            if x5u not in validity or end < validity[x5u][0]:
                start = cert.not_valid_before.replace(
                    tzinfo=datetime.timezone.utc)
                lifespan = (end - start).days
                validity[x5u] = end, lifespan

    # Return collections whose certificate expires too soon.
    errors: Dict[str, Dict] = {}
    for entry, metadata in entries_metadata:
        cid = "{bucket}/{collection}".format(**entry)
        x5u = metadata["signature"]["x5u"]
        end, lifespan = validity[x5u]

        # The minimum remaining days depends on the certificate lifespan.
        relative_minimum = lifespan * percentage_remaining_validity / 100
        bounded_minimum = int(
            min(max_remaining_days, max(min_remaining_days, relative_minimum)))
        remaining_days = (end - utcnow()).days
        logger.debug(
            f"{cid} cert lasts {lifespan} days and ends in {remaining_days} days "
            f"({remaining_days - bounded_minimum} days before alert).")
        if remaining_days < bounded_minimum:
            errors[cid] = {"x5u": x5u, "expires": end.isoformat()}
    """
    {
      "main/normandy-recipes": {
        "x5u": "https://content-signature-2.cdn.mozilla.net/chains/remote-settings.content-signature.mozilla.org-2019-10-22-18-54-26.chain",
        "expires": "2019-10-22T18:54:26"
    },
    """
    return len(errors) == 0, errors
async def test_negative(mock_responses):
    server_url = "http://fake.local/v1"

    collection_url = server_url + COLLECTION_URL.format("bid", "cid")
    mock_responses.get(
        collection_url,
        payload={
            "data": {
                "status": "to-review",
                "last_edit_by": "ldap:[email protected]",
                "last_edit_date": (utcnow() - timedelta(days=10)).isoformat(),
            }
        },
    )
    group_url = server_url + GROUP_URL.format("bid", "cid-editors")
    mock_responses.get(
        group_url, payload={"data": {
            "members": ["ldap:[email protected]"]
        }})
    collection_url = server_url + COLLECTION_URL.format("bid", "cid2")
    mock_responses.get(collection_url,
                       payload={"data": {
                           "status": "work-in-progress"
                       }})
    group_url = server_url + GROUP_URL.format("bid", "cid2-editors")
    mock_responses.get(
        group_url, payload={"data": {
            "members": ["ldap:[email protected]"]
        }})

    with patch_async(f"{MODULE}.fetch_signed_resources",
                     return_value=RESOURCES):
        status, data = await run(server_url, FAKE_AUTH, max_age=5)

    assert status is False
    assert data == {
        "main/cid": {
            "age": 10,
            "status": "to-review",
            "last_edit_by": "ldap:[email protected]",
            "editors": ["ldap:[email protected]"],
        },
        "main/cid2": {
            "age": sys.maxsize,
            "status": "work-in-progress",
            "last_edit_by": "N/A",
            "editors": ["ldap:[email protected]"],
        },
    }
async def run(server: str, cdn: str, min_age: int = 300) -> CheckResult:
    origin_client = KintoClient(server_url=server)
    entries = await origin_client.get_monitor_changes()

    # Fetch timestamps on source server.
    origin_futures = [
        fetch_timestamps(origin_client,
                         bucket=entry["bucket"],
                         collection=entry["collection"]) for entry in entries
    ]
    origin_results = await run_parallel(*origin_futures)

    # Do exactly the same with CDN.
    cdn_client = KintoClient(server_url=cdn)
    cdn_futures = [
        fetch_timestamps(cdn_client,
                         bucket=entry["bucket"],
                         collection=entry["collection"]) for entry in entries
    ]
    cdn_results = await run_parallel(*cdn_futures)

    # Make sure everything matches.
    collections = {}
    for entry, origin_result, cdn_result in zip(entries, origin_results,
                                                cdn_results):
        origin_col_ts, origin_records_ts = origin_result
        cdn_col_ts, cdn_records_ts = cdn_result

        age_seconds = utcnow().timestamp() - (origin_col_ts / 1000)
        if (age_seconds > min_age and origin_col_ts != cdn_col_ts
                or origin_records_ts != cdn_records_ts):
            collections["{bucket}/{collection}".format(**entry)] = {
                "source": {
                    "collection": origin_col_ts,
                    "records": origin_records_ts
                },
                "cdn": {
                    "collection": cdn_col_ts,
                    "records": cdn_records_ts
                },
            }

    return len(collections) == 0, collections
Ejemplo n.º 13
0
async def run(
    repositories: List[str],
    max_opened_pulls: int = 7,
    min_days_last_activity: int = 7,
    max_days_last_activity: int = 45,
) -> CheckResult:
    async with ClientSession() as session:
        futures = [pulls_info(session, repo) for repo in repositories]
        results = await run_parallel(*futures)

        now = utcnow()
        success = True
        infos = {}
        for (repo, pulls) in zip(repositories, results):
            opened = [
                utcfromisoformat(p["updated_at"]) for p in pulls
                if not p["draft"]
            ]
            if len(opened) == 0:
                continue
            # Fail if opened PR hasn't received recent activity.
            age_pulls = [(now - dt).days for dt in opened]
            if max(age_pulls) > max_days_last_activity:
                success = False
            # Fail if too many opened PR.
            old_pulls = [
                age for age in age_pulls if age > min_days_last_activity
            ]
            if len(old_pulls) > max_opened_pulls:
                success = False
            infos[repo] = {
                "pulls": {
                    "old": len(old_pulls),
                    "total": len(opened),
                }
            }
        # Sort results with repos with most old PRs first.
        sorted_by_old = dict(
            sorted(infos.items(),
                   key=lambda item: item[1]["pulls"]["old"],
                   reverse=True))
        return success, sorted_by_old
Ejemplo n.º 14
0
async def run(
    root_url: str,
    secret_name: str = DEFAULT_NAME,
    expires_seconds: int = DEFAULT_EXPIRES_SECONDS,
    client_id: str = "",
    access_token: str = "",
    certificate: str = "",
) -> CheckResult:
    # Build connection infos from parameters.
    options = tc_utils.options_from_params(root_url, client_id, access_token,
                                           certificate)

    secrets = taskcluster.aio.Secrets(options)

    # 1. Write and read.
    payload = {
        "expires":
        (utils.utcnow() + timedelta(seconds=expires_seconds)).isoformat(),
        "secret": {
            "hello": "beautiful world"
        },
    }
    await secrets.set(secret_name, payload)
    try:
        await secrets.get(secret_name)
    except taskcluster.exceptions.TaskclusterRestFailure:
        return False, f"Secret {secret_name!r} could not be retrieved"

    # 2. Remove and check.
    await secrets.remove(secret_name)
    try:
        await secrets.get(secret_name)
        return False, f"Secret {secret_name!r} was not removed"
    except taskcluster.exceptions.TaskclusterRestFailure as e:
        if getattr(e, "status_code") != 404:
            raise

    return True, {}
Ejemplo n.º 15
0
async def run(remotesettings_server: str,
              push_server: str,
              lag_margin: int = 600) -> CheckResult:
    rs_timestamp = await get_remotesettings_timestamp(remotesettings_server)
    push_timestamp = await get_push_timestamp(push_server)

    rs_datetime = utcfromtimestamp(rs_timestamp)
    push_datetime = utcfromtimestamp(push_timestamp)

    return (
        # Fail if timestamps are different and data was published a while ago.
        rs_timestamp == push_timestamp
        or (utcnow() - rs_datetime).seconds < lag_margin,
        {
            "push": {
                "timestamp": push_timestamp,
                "datetime": push_datetime.isoformat(),
            },
            "remotesettings": {
                "timestamp": rs_timestamp,
                "datetime": rs_datetime.isoformat(),
            },
        },
    )
Ejemplo n.º 16
0
async def run(server: str, auth: str, max_age: int) -> CheckResult:
    resources = await fetch_signed_resources(server, auth)

    client = KintoClient(server_url=server, auth=auth)

    futures = [
        client.get_collection(bucket=resource["source"]["bucket"],
                              id=resource["source"]["collection"])
        for resource in resources
    ]
    results = await run_parallel(*futures)

    too_old = {}
    for resource, resp in zip(resources, results):
        metadata = resp["data"]
        # For this check, since we want to detect pending changes,
        # we also consider work-in-progress a pending request review.
        if metadata["status"] not in ("work-in-progress", "to-review"):
            continue

        try:
            last_edit = metadata["last_edit_date"]
            last_edit_by = metadata["last_edit_by"]
            dt = datetime.fromisoformat(last_edit)
            age = (utcnow() - dt).days
        except KeyError:
            # Never edited.
            age = sys.maxsize
            last_edit_by = "N/A"

        if age > max_age:
            # Fetch list of editors, if necessary to contact them.
            group = await client.get_group(
                bucket=resource["source"]["bucket"],
                id=resource["source"]["collection"] + "-editors",
            )
            editors = group["data"]["members"]

            cid = "{bucket}/{collection}".format(**resource["destination"])
            too_old[cid] = {
                "age": age,
                "status": metadata["status"],
                "last_edit_by": last_edit_by,
                "editors": editors,
            }
    """
    {
      "security-state/cert-revocations": {
        "age": 82,
        "status": "to-review",
        "last_edit_by": "ldap:[email protected]",
        "editors": [
          "ldap:[email protected]",
          "ldap:[email protected]",
          "account:crlite_publisher"
        ]
      }
    }
    """
    data = dict(
        sorted(too_old.items(), key=lambda item: item[1]["age"], reverse=True))
    return len(data) == 0, data
Ejemplo n.º 17
0
async def run(
    max_age: int,
    index_path: str,
    artifacts_names: List[str],
    root_url: str,
    client_id: str = "",
    access_token: str = "",
    certificate: str = "",
) -> CheckResult:
    """
    Example configuration:

    .. code-block:: toml

        [checks.queue.latest-indexed]
        description = ""
        module = "checks.taskcluster.latest_indexed"
        params.root_url = "${TASKCLUSTER_ROOT_URL}"
        params.client_id = "${TASKCLUSTER_CLIENT_ID}"
        params.access_token = "${TASKCLUSTER_ACCESS_TOKEN}"
        params.max_age = 360
        params.index_path = "project.taskcluster.telescope.periodic-task"
        params.artifacts_names = ["public/results/status.json"]

    """
    # Build connection infos from parameters.
    options = tc_utils.options_from_params(
        root_url, client_id, access_token, certificate
    )

    # 1. Get the task id from the index.
    index = taskcluster.aio.Index(options)
    try:
        indexed_task = await index.findTask(index_path)
        task_id = indexed_task["taskId"]
    except taskcluster.exceptions.TaskclusterRestFailure as e:
        if getattr(e, "status_code") != 404:
            raise
        # No indexed task found. Failing.
        return False, f"No task found at {index_path!r}"

    # 2. Inspect the task using the queue.
    queue = taskcluster.aio.Queue(options)
    futures = [queue.latestArtifactInfo(task_id, a) for a in artifacts_names]
    try:
        artifacts = await utils.run_parallel(*futures)
    except taskcluster.exceptions.TaskclusterRestFailure as e:
        failed_call = e.body["requestInfo"]["params"]
        return False, "Artifact {name!r} of task {taskId!r} not available".format(
            **failed_call
        )

    # 3. Verify that latest run is not too old.
    status = await queue.status(task_id)
    last_run = status["status"]["runs"][-1]
    resolved_at = utils.utcfromisoformat(last_run["resolved"])
    age_task = utils.utcnow() - resolved_at
    if age_task.seconds > max_age:
        return (
            False,
            f"Latest task at {index_path!r} ({task_id!r}) is {age_task.seconds} seconds old",
        )

    # 4. Success! Return status info.
    return True, {
        **status,
        "artifacts": artifacts,
    }