Beispiel #1
0
def _email_size_too_large(from_email, to_email, efs_name, size, max_size,
                          dry_run):
    '''Emails that the EFS file system is too large'''
    email_body = EMAIL_BODY.format(efs_name, _get_readable_size(max_size),
                                   _get_readable_size(size))

    if dry_run:
        _print_email(to_email, from_email, EMAIL_SUBJECT, email_body)
    else:
        send_ses(from_email, USER_EMAIL_SUBJECT, email_body, to_email)
Beispiel #2
0
def _email_about_users(invalid_users, max_size, from_email, to_email, dry_run):
    '''Emails the list of users who are exceeding quota'''
    email_body = '\n'.join(
        ('{}: {}, Quota: {}'.format(directory, _get_readable_size(size),
                                    _get_readable_size(max_size))
         for directory, size, max_size in invalid_users))
    if dry_run:
        _print_email(to_email, from_email, EMAIL_SUBJECT_USER_LIST, email_body)
    else:
        send_ses(from_email, EMAIL_SUBJECT_USER_LIST, email_body, to_email)
Beispiel #3
0
def _email_users(invalid_users, max_size, from_email, to_email, dry_run):
    '''Emails users about the size of their directories.
       This assumes directory names are the user's email'''
    for directory, size, max_size in invalid_users:
        _, email = os.path.split(directory)
        email_body = USER_EMAIL_BODY.format(
            _get_readable_size(max_size), _get_readable_size(size),
            _get_readable_size(size - max_size))
        if dry_run:
            _print_email(email, from_email, USER_EMAIL_SUBJECT, email_body)
        else:
            send_ses(from_email, USER_EMAIL_SUBJECT, email_body, email)
def email_histogram_subscribers(current_date, target_date, notifiable_histograms, expired_histograms, notify_sheriffs = False, dry_run = False):
    if len(notifiable_histograms) == 0: # nothing to send any alerts about
        return

    # organize histograms into buckets indexed by email
    email_histogram_names = {GENERAL_TELEMETRY_ALERT: []}
    for histogram_name, entry in notifiable_histograms:
        for email in entry.get("alert_emails", []):
            if email not in email_histogram_names: email_histogram_names[email] = []
            email_histogram_names[email].append(histogram_name)
        email_histogram_names[GENERAL_TELEMETRY_ALERT].append(histogram_name)

    if notify_sheriffs: # if the sheriffs are to be alerted, they should get a list of all expiring histograms
        email_histogram_names[SHERIFF_ALERT] = email_histogram_names[GENERAL_TELEMETRY_ALERT]

    # send emails to users detailing the histograms that they are subscribed to that are expiring
    for email, expiring_histogram_names in email_histogram_names.items():
        expiring_list = "\n".join("* {name} expires in version {version} ({watchers}) - {description}".format(
            name=name, version=version_normalize_nightly(entry["expires_in_version"]),
            watchers="watched by {}".format(", ".join(email for email in entry["alert_emails"])) if "alert_emails" in entry else "no watchers",
            description=entry["description"]
        ) for name, entry in notifiable_histograms if name in expiring_histogram_names)
        if email != GENERAL_TELEMETRY_ALERT: # alert to a normal watcher
            email_body = (
                "The following histograms will be expiring on {}, and should be removed from the codebase, or have their expiry versions updated:\n\n{}\n\n"
                "This is an automated message sent by Cerberus. See https://github.com/mozilla/cerberus for details and source code."
            ).format(target_date, expiring_list)
        else: # alert to the general Telemetry alert mailing list
            expired_list = "\n".join("* {name} expired in version {version} ({watchers}) - {description}".format(
                name=name, version=version_normalize_nightly(entry["expires_in_version"]),
                watchers="watched by {}".format(", ".join(email for email in entry["alert_emails"])) if "alert_emails" in entry else "no watchers",
                description=entry["description"]
            ) for name, entry in expired_histograms)
            email_body = (
                "The following histograms will be expiring on {}, and should be removed from the codebase, or have their expiry versions updated:\n\n{}\n\n"
                "The following histograms are expired as of {}:\n\n{}\n\n"
                "This is an automated message sent by Cerberus. See https://github.com/mozilla/cerberus for details and source code."
            ).format(target_date, expiring_list, current_date, expired_list)
        if dry_run:
            print("Email notification for {}:\n===============================================\n{}\n===============================================\n".format(email, email_body))
        else:
            print("Sending email notification to {} with body:\n\n{}\n".format(email, email_body))
            send_ses(FROM_ADDR, "Telemetry Histogram Expiry", email_body, email)
Beispiel #5
0
def mail_alert(descriptor, histogram, date):
    global histograms

    if args.dry_run:
        return

    body = "This alert was generated because the distribution of the histogram " + histogram +\
           " has changed on the " + date + ". Please have a look at the following plot: http://vitillo.github.io/cerberus/dashboard/#" + date + histogram

    past_alert_emails = descriptor.get('alert_emails', [])
    alert_emails = ["*****@*****.**"]
    if histogram in histograms and 'alert_emails' in histograms[histogram]:
        alert_emails = histograms[histogram]['alert_emails']

    # Retroactively send e-mails to new subscribers
    for email in alert_emails:
        if email not in past_alert_emails:
            send_ses("*****@*****.**",
                     "Histogram regression detected", body, email)

    descriptor['alert_emails'] = alert_emails
Beispiel #6
0
def mail_alert(descriptor, histogram, date):
    global histograms

    if args.dry_run:
        return

    body = "This alert was generated because the distribution of the histogram " + histogram +\
           " has changed on the " + date + ". Please have a look at the following plot: http://mozilla.github.io/cerberus/dashboard/#" + date + histogram

    past_alert_emails = descriptor.get('alert_emails', [])
    alert_emails = ["*****@*****.**"]
    if histogram in histograms and 'alert_emails' in histograms[histogram]:
        alert_emails += histograms[histogram]['alert_emails']


    # Retroactively send e-mails to new subscribers
    for email in alert_emails:
        if email not in past_alert_emails:
            send_ses("*****@*****.**", "Distribution change detected for " + histogram,
                     body, email)

    descriptor['alert_emails'] = alert_emails
Beispiel #7
0
if __name__ == "__main__":
    # process command line arguments
    if not (2 <= len(sys.argv) <= 3) or sys.argv[1] not in {"email", "test"}:
        print_help()
        sys.exit(1)
    is_dry_run = sys.argv[1] == "test"
    now = date.today()
    if len(sys.argv) >= 3:
        try: now = datetime.strptime(sys.argv[2], "%Y-%m-%d").date()
        except ValueError:
            print "Unknown/invalid date: {}".format(sys.argv[2])
            print_help()
            sys.exit(1)
    else:
        now = date.today()

    if is_dry_run:
        if is_job_failing(now):
            print("Crash aggregates have not been updated for 2 days as of {}.".format(now))
        else:
            print("Crash aggregates have been updated within 2 days before {}.".format(now))
    elif is_job_failing(now):
        print("Sending email notification about crash aggregates not being updated to {}.".format(GENERAL_TELEMETRY_ALERT))
        email_body = (
            "As of {}, the daily crash aggregates job [1] has not output results for 2 days. This is an automated message from Cerberus [2].\n"
            "\n"
            "[1]: https://github.com/mozilla/moz-crash-rate-aggregates\n"
            "[2]: https://github.com/mozilla/cerberus\n"
        ).format(now)
        send_ses(FROM_ADDR, "[FAILURE] Crash aggregates not updating", email_body, GENERAL_TELEMETRY_ALERT)