Ejemplo n.º 1
0
def test_alert_change_type_absolute(test_repository,
                                    failure_classifications,
                                    generic_reference_data,
                                    test_perf_signature, new_value,
                                    expected_num_alerts):
    # modify the test signature to say that we alert on absolute value
    # (as opposed to percentage change)
    test_perf_signature.alert_change_type = PerformanceSignature.ALERT_ABS
    test_perf_signature.alert_threshold = 0.3
    test_perf_signature.save()

    base_time = time.time()  # generate it based off current time
    INTERVAL = 30
    _generate_performance_data(test_repository, test_perf_signature,
                               generic_reference_data,
                               base_time, 1, 0.5, INTERVAL/2)
    _generate_performance_data(test_repository, test_perf_signature,
                               generic_reference_data,
                               base_time, (INTERVAL/2) + 1, new_value,
                               INTERVAL/2)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == expected_num_alerts
    assert PerformanceAlertSummary.objects.count() == expected_num_alerts
Ejemplo n.º 2
0
def test_detect_alerts_in_series_with_retriggers(
        test_repository, failure_classifications,
        generic_reference_data, test_perf_signature):

    # sometimes we detect an alert in the middle of a series
    # where there are retriggers, make sure we handle this case
    # gracefully by generating a sequence where the regression
    # "appears" in the middle of a series with the same push
    # to make sure things are calculated correctly
    # (in this case, we're moving from consistent 0.5 to a 0.5/1.0
    # mix)
    base_time = time.time()  # generate it based off current time
    for i in range(20):
        _generate_performance_data(test_repository, test_perf_signature,
                                   generic_reference_data,
                                   base_time, 1, 0.5, 1)
    for i in range(5):
        _generate_performance_data(test_repository, test_perf_signature,
                                   generic_reference_data,
                                   base_time, 2, 0.5, 1)
    for i in range(15):
        _generate_performance_data(test_repository, test_perf_signature,
                                   generic_reference_data,
                                   base_time, 2, 1.0, 1)

    generate_new_alerts_in_series(test_perf_signature)
    _verify_alert(1, 2, 1, test_perf_signature, 0.5, 0.875, True,
                  PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)
def test_no_alerts_with_old_data(
    test_repository,
    test_issue_tracker,
    failure_classifications,
    generic_reference_data,
    test_perf_signature,
):
    base_time = 0  # 1970, too old!
    INTERVAL = 30
    _generate_performance_data(
        test_repository,
        test_perf_signature,
        test_issue_tracker,
        generic_reference_data,
        base_time,
        1,
        0.5,
        int(INTERVAL / 2),
    )
    _generate_performance_data(
        test_repository,
        test_perf_signature,
        test_issue_tracker,
        generic_reference_data,
        base_time,
        int(INTERVAL / 2) + 1,
        1.0,
        int(INTERVAL / 2),
    )

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 0
    assert PerformanceAlertSummary.objects.count() == 0
Ejemplo n.º 4
0
def test_custom_alert_threshold(
        test_repository, failure_classifications,
        generic_reference_data, test_perf_signature):

    test_perf_signature.alert_threshold = 200.0
    test_perf_signature.save()

    # under default settings, this set of data would generate
    # 2 alerts, but we'll set an artificially high threshold
    # of 200% that should only generate 1
    INTERVAL = 60
    base_time = time.time()
    _generate_performance_data(test_repository, test_perf_signature,
                               generic_reference_data,
                               base_time, 1, 0.5, INTERVAL/3)
    _generate_performance_data(test_repository, test_perf_signature,
                               generic_reference_data,
                               base_time, (INTERVAL/3) + 1, 0.6, INTERVAL/3)
    _generate_performance_data(test_repository, test_perf_signature,
                               generic_reference_data,
                               base_time, 2*(INTERVAL/3) + 1, 2.0, INTERVAL/3)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
Ejemplo n.º 5
0
def test_custom_alert_threshold(
        test_project, test_repository, test_perf_signature):

    test_perf_signature.alert_threshold = 200.0
    test_perf_signature.save()

    # under default settings, this set of data would generate
    # 2 alerts, but we'll set an artificially high threshold
    # of 200% that should only generate 1
    INTERVAL = 60
    now = time.time()
    for (t, v) in zip([i for i in range(INTERVAL)],
                      ([0.5 for i in range(INTERVAL/3)] +
                       [0.6 for i in range(INTERVAL/3)] +
                       [2.0 for i in range(INTERVAL/3)])):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=t,
            signature=test_perf_signature,
            push_timestamp=datetime.datetime.fromtimestamp(now + t),
            value=v)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
Ejemplo n.º 6
0
    def handle(self, *args, **options):
        if not options['project']:
            raise CommandError("Must specify at least one project with " "--project")
        for project in options['project']:
            repository = models.Repository.objects.get(name=project)

            signatures = PerformanceSignature.objects.filter(repository=repository)

            if options['signature']:
                signatures_to_process = signatures.filter(signature_hash__in=options['signature'])
            else:
                hashes_to_ignore = set()
                # if doing everything, only handle series which are not a
                # subtest of another (we should alert only the parent series
                # in that case)
                for signature in signatures:
                    # Don't alert on subtests which have a summary
                    hashes_to_ignore.update(
                        signature.extra_properties.get('subtest_signatures', [])
                    )
                signatures_to_process = [
                    signature
                    for signature in signatures
                    if signature.signature_hash not in hashes_to_ignore
                ]

            for signature in signatures_to_process:
                generate_new_alerts_in_series(signature)
Ejemplo n.º 7
0
def test_detect_alerts_in_series_with_retriggers(
        test_project, test_repository, test_perf_signature):

    # sometimes we detect an alert in the middle of a series
    # where there are retriggers, make sure we handle this case
    # gracefully by generating a sequence where the regression
    # "appears" in the middle of a series with the same resultset
    # to make sure things are calculated correctly
    now = time.time()
    for (t, j, v) in zip(
            ([1 for i in range(30)] +
             [2 for i in range(60)]),
            [i for i in range(90)],
            ([0.5 for i in range(50)] +
             [1.0 for i in range(40)])
    ):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=j,
            signature=test_perf_signature,
            push_timestamp=datetime.datetime.fromtimestamp(now + t),
            value=v)
    generate_new_alerts_in_series(test_perf_signature)
    _verify_alert(1, 2, 1, test_perf_signature, 0.5, 1.0, True,
                  PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED)
Ejemplo n.º 8
0
def test_custom_alert_threshold(test_repository, test_issue_tracker,
                                failure_classifications,
                                generic_reference_data, test_perf_signature):

    test_perf_signature.alert_threshold = 200.0
    test_perf_signature.save()

    # under default settings, this set of data would generate
    # 2 alerts, but we'll set an artificially high threshold
    # of 200% that should only generate 1
    INTERVAL = 60
    base_time = time.time()
    _generate_performance_data(test_repository, test_perf_signature,
                               test_issue_tracker, generic_reference_data,
                               base_time, 1, 0.5, int(INTERVAL / 3))
    _generate_performance_data(test_repository, test_perf_signature,
                               test_issue_tracker, generic_reference_data,
                               base_time,
                               int(INTERVAL / 3) + 1, 0.6, int(INTERVAL / 3))
    _generate_performance_data(test_repository, test_perf_signature,
                               test_issue_tracker, generic_reference_data,
                               base_time, 2 * int(INTERVAL / 3) + 1, 2.0,
                               int(INTERVAL / 3))

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
Ejemplo n.º 9
0
    def handle(self, *args, **options):
        if not options['project']:
            raise CommandError("Must specify at least one project with "
                               "--project")
        for project in options['project']:
            repository = models.Repository.objects.get(name=project)

            signatures = PerformanceSignature.objects.filter(
                repository=repository)

            if options['signature']:
                signatures_to_process = signatures.filter(
                    signature_hash__in=options['signature'])
            else:
                hashes_to_ignore = set()
                # if doing everything, only handle series which are not a
                # subtest of another (we should alert only the parent series
                # in that case)
                for signature in signatures:
                    # Don't alert on subtests which have a summary
                    hashes_to_ignore.update(
                        signature.extra_properties.get('subtest_signatures',
                                                       []))
                signatures_to_process = [signature for signature in signatures
                                         if signature.signature_hash not in
                                         hashes_to_ignore]

            for signature in signatures_to_process:
                generate_new_alerts_in_series(signature)
Ejemplo n.º 10
0
def test_alert_change_type_absolute(test_repository, test_issue_tracker,
                                    failure_classifications,
                                    generic_reference_data,
                                    test_perf_signature, new_value,
                                    expected_num_alerts):
    # modify the test signature to say that we alert on absolute value
    # (as opposed to percentage change)
    test_perf_signature.alert_change_type = PerformanceSignature.ALERT_ABS
    test_perf_signature.alert_threshold = 0.3
    test_perf_signature.save()

    base_time = time.time()  # generate it based off current time
    INTERVAL = 30
    _generate_performance_data(test_repository, test_perf_signature,
                               test_issue_tracker, generic_reference_data,
                               base_time, 1, 0.5, int(INTERVAL / 2))
    _generate_performance_data(test_repository, test_perf_signature,
                               test_issue_tracker, generic_reference_data,
                               base_time,
                               int(INTERVAL / 2) + 1, new_value,
                               int(INTERVAL / 2))

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == expected_num_alerts
    assert PerformanceAlertSummary.objects.count() == expected_num_alerts
Ejemplo n.º 11
0
def test_detect_alerts_in_series_with_retriggers(
        test_project, test_repository, test_perf_signature):

    # sometimes we detect an alert in the middle of a series
    # where there are retriggers, make sure we handle this case
    # gracefully by generating a sequence where the regression
    # "appears" in the middle of a series with the same resultset
    # to make sure things are calculated correctly
    now = time.time()
    for (t, j, v) in zip(
            ([1 for i in range(30)] +
             [2 for i in range(60)]),
            [i for i in range(90)],
            ([0.5 for i in range(50)] +
             [1.0 for i in range(40)])
    ):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=j,
            signature=test_perf_signature,
            push_timestamp=datetime.datetime.fromtimestamp(now + t),
            value=v)
    generate_new_alerts_in_series(test_perf_signature)
    _verify_alert(1, 2, 1, test_perf_signature, 0.5, 1.0, True)
Ejemplo n.º 12
0
def test_detect_alerts_in_series_with_retriggers(test_repository,
                                                 test_issue_tracker,
                                                 failure_classifications,
                                                 generic_reference_data,
                                                 test_perf_signature):

    # sometimes we detect an alert in the middle of a series
    # where there are retriggers, make sure we handle this case
    # gracefully by generating a sequence where the regression
    # "appears" in the middle of a series with the same push
    # to make sure things are calculated correctly
    # (in this case, we're moving from consistent 0.5 to a 0.5/1.0
    # mix)
    base_time = time.time()  # generate it based off current time
    for i in range(20):
        _generate_performance_data(test_repository, test_perf_signature,
                                   test_issue_tracker, generic_reference_data,
                                   base_time, 1, 0.5, 1)
    for i in range(5):
        _generate_performance_data(test_repository, test_perf_signature,
                                   test_issue_tracker, generic_reference_data,
                                   base_time, 2, 0.5, 1)
    for i in range(15):
        _generate_performance_data(test_repository, test_perf_signature,
                                   test_issue_tracker, generic_reference_data,
                                   base_time, 2, 1.0, 1)

    generate_new_alerts_in_series(test_perf_signature)
    _verify_alert(1, 2, 1, test_perf_signature, 0.5, 0.875, True,
                  PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)
Ejemplo n.º 13
0
def test_no_alerts_with_old_data(
        test_project, test_repository, test_perf_signature):
    base_time = 0  # 1970, too old!
    INTERVAL = 30
    _generate_performance_data(test_repository, test_perf_signature,
                               base_time, 1, 0.5, INTERVAL/2)
    _generate_performance_data(test_repository, test_perf_signature,
                               base_time, (INTERVAL/2) + 1, 1.0, INTERVAL/2)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 0
    assert PerformanceAlertSummary.objects.count() == 0
Ejemplo n.º 14
0
def test_no_alerts_with_old_data(test_project, test_repository,
                                 test_perf_signature):
    base_time = 0  # 1970, too old!
    INTERVAL = 30
    _generate_performance_data(test_repository, test_perf_signature, base_time,
                               1, 0.5, INTERVAL / 2)
    _generate_performance_data(test_repository, test_perf_signature, base_time,
                               (INTERVAL / 2) + 1, 1.0, INTERVAL / 2)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 0
    assert PerformanceAlertSummary.objects.count() == 0
Ejemplo n.º 15
0
def test_no_alerts_with_old_data(
        test_project, test_repository, test_perf_signature):
    INTERVAL = 30
    for (t, v) in zip([i for i in range(INTERVAL)],
                      ([0.5 for i in range(INTERVAL/2)] +
                       [1.0 for i in range(INTERVAL/2)])):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=t,
            signature=test_perf_signature,
            push_timestamp=datetime.datetime.fromtimestamp(t),
            value=v)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 0
    assert PerformanceAlertSummary.objects.count() == 0
Ejemplo n.º 16
0
def test_no_alerts_with_old_data(
        test_project, test_repository, test_perf_signature):
    INTERVAL = 30
    for (t, v) in zip([i for i in range(INTERVAL)],
                      ([0.5 for i in range(INTERVAL/2)] +
                       [1.0 for i in range(INTERVAL/2)])):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=t,
            signature=test_perf_signature,
            push_timestamp=datetime.datetime.fromtimestamp(t),
            value=v)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 0
    assert PerformanceAlertSummary.objects.count() == 0
Ejemplo n.º 17
0
def test_no_alerts_with_old_data(
        test_repository, test_issue_tracker,
        failure_classifications, generic_reference_data, test_perf_signature):
    base_time = 0  # 1970, too old!
    INTERVAL = 30
    _generate_performance_data(test_repository,
                               test_perf_signature,
                               test_issue_tracker,
                               generic_reference_data,
                               base_time, 1, 0.5, int(INTERVAL / 2))
    _generate_performance_data(test_repository,
                               test_perf_signature,
                               test_issue_tracker,
                               generic_reference_data,
                               base_time, int(INTERVAL / 2) + 1, 1.0, int(INTERVAL / 2))

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 0
    assert PerformanceAlertSummary.objects.count() == 0
Ejemplo n.º 18
0
def test_custom_alert_threshold(test_project, test_repository,
                                test_perf_signature, jm):

    test_perf_signature.alert_threshold = 200.0
    test_perf_signature.save()

    # under default settings, this set of data would generate
    # 2 alerts, but we'll set an artificially high threshold
    # of 200% that should only generate 1
    INTERVAL = 60
    base_time = time.time()
    _generate_performance_data(test_repository, test_perf_signature, base_time,
                               1, 0.5, INTERVAL / 3)
    _generate_performance_data(test_repository, test_perf_signature, base_time,
                               (INTERVAL / 3) + 1, 0.6, INTERVAL / 3)
    _generate_performance_data(test_repository, test_perf_signature, base_time,
                               2 * (INTERVAL / 3) + 1, 2.0, INTERVAL / 3)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
Ejemplo n.º 19
0
def test_detect_alerts_in_series_with_retriggers(test_project, test_repository,
                                                 test_perf_signature):

    # sometimes we detect an alert in the middle of a series
    # where there are retriggers, make sure we handle this case
    # gracefully by generating a sequence where the regression
    # "appears" in the middle of a series with the same push
    # to make sure things are calculated correctly
    base_time = time.time()  # generate it based off current time
    for i in range(30):
        _generate_performance_data(test_repository, test_perf_signature,
                                   base_time, 1, 0.5, 1)
    for i in range(20):
        _generate_performance_data(test_repository, test_perf_signature,
                                   base_time, 2, 0.5, 1)
    for i in range(40):
        _generate_performance_data(test_repository, test_perf_signature,
                                   base_time, 2, 1.0, 1)

    generate_new_alerts_in_series(test_perf_signature)
    _verify_alert(1, 2, 1, test_perf_signature, 0.5, 1.0, True,
                  PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)
Ejemplo n.º 20
0
def test_detect_alerts_in_series_with_retriggers(
        test_project, test_repository, test_perf_signature):

    # sometimes we detect an alert in the middle of a series
    # where there are retriggers, make sure we handle this case
    # gracefully by generating a sequence where the regression
    # "appears" in the middle of a series with the same push
    # to make sure things are calculated correctly
    base_time = time.time()  # generate it based off current time
    for i in range(30):
        _generate_performance_data(test_repository, test_perf_signature,
                                   base_time, 1, 0.5, 1)
    for i in range(20):
        _generate_performance_data(test_repository, test_perf_signature,
                                   base_time, 2, 0.5, 1)
    for i in range(40):
        _generate_performance_data(test_repository, test_perf_signature,
                                   base_time, 2, 1.0, 1)

    generate_new_alerts_in_series(test_perf_signature)
    _verify_alert(1, 2, 1, test_perf_signature, 0.5, 1.0, True,
                  PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)
Ejemplo n.º 21
0
def test_detect_alerts_in_series(test_project, test_repository,
                                 test_perf_signature):

    INTERVAL = 30
    now = time.time()
    for (t, v) in zip(
        [i for i in range(INTERVAL)],
        ([0.5
          for i in range(INTERVAL / 2)] + [1.0 for i in range(INTERVAL / 2)])):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=t,
            signature=test_perf_signature,
            push_timestamp=datetime.datetime.fromtimestamp(now + t),
            value=v)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    _verify_alert(1, (INTERVAL / 2), (INTERVAL / 2) - 1, test_perf_signature,
                  0.5, 1.0, True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED)

    # verify that no new alerts generated if we rerun
    generate_new_alerts_in_series(test_perf_signature)
    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    _verify_alert(1, (INTERVAL / 2), (INTERVAL / 2) - 1, test_perf_signature,
                  0.5, 1.0, True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED)

    # add data to generate a new alert
    for (t, v) in zip([i for i in range(INTERVAL, INTERVAL * 2)],
                      [2.0 for i in range(INTERVAL)]):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=0,
            signature=test_perf_signature,
            push_timestamp=datetime.datetime.fromtimestamp(now + t),
            value=v)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 2
    assert PerformanceAlertSummary.objects.count() == 2
    _verify_alert(2, INTERVAL, INTERVAL - 1, test_perf_signature, 1.0, 2.0,
                  True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED)
Ejemplo n.º 22
0
def test_detect_alerts_in_series(test_project, test_repository,
                                 test_perf_signature):

    INTERVAL = 30
    now = time.time()
    for (t, v) in zip([i for i in range(INTERVAL)],
                      ([0.5 for i in range(INTERVAL/2)] +
                       [1.0 for i in range(INTERVAL/2)])):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=t,
            signature=test_perf_signature,
            push_timestamp=datetime.datetime.fromtimestamp(now + t),
            value=v)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    _verify_alert(1, (INTERVAL/2), (INTERVAL/2)-1, test_perf_signature, 0.5,
                  1.0, True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED)

    # verify that no new alerts generated if we rerun
    generate_new_alerts_in_series(test_perf_signature)
    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    _verify_alert(1, (INTERVAL/2), (INTERVAL/2)-1, test_perf_signature, 0.5,
                  1.0, True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED)

    # add data to generate a new alert
    for (t, v) in zip([i for i in range(INTERVAL, INTERVAL*2)],
                      [2.0 for i in range(INTERVAL)]):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=0,
            signature=test_perf_signature,
            push_timestamp=datetime.datetime.fromtimestamp(now + t),
            value=v)

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 2
    assert PerformanceAlertSummary.objects.count() == 2
    _verify_alert(2, INTERVAL, INTERVAL-1, test_perf_signature, 1.0, 2.0,
                  True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED)
Ejemplo n.º 23
0
def test_detect_alerts_in_series(test_repository,
                                 test_issue_tracker,
                                 failure_classifications,
                                 generic_reference_data,
                                 test_perf_signature):

    base_time = time.time()  # generate it based off current time
    INTERVAL = 30
    _generate_performance_data(test_repository,
                               test_perf_signature,
                               test_issue_tracker,
                               generic_reference_data,
                               base_time, 1, 0.5, int(INTERVAL / 2))
    _generate_performance_data(test_repository,
                               test_perf_signature,
                               test_issue_tracker,
                               generic_reference_data,
                               base_time, int(INTERVAL / 2) + 1, 1.0, int(INTERVAL / 2))

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    _verify_alert(1, (INTERVAL/2)+1, (INTERVAL/2), test_perf_signature, 0.5,
                  1.0, True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)

    # verify that no new alerts generated if we rerun
    generate_new_alerts_in_series(test_perf_signature)
    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    _verify_alert(1, (INTERVAL/2)+1, (INTERVAL/2), test_perf_signature, 0.5,
                  1.0, True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)

    # add data that should be enough to generate a new alert if we rerun
    _generate_performance_data(test_repository,
                               test_perf_signature,
                               test_issue_tracker,
                               generic_reference_data,
                               base_time, (INTERVAL+1), 2.0, INTERVAL)
    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 2
    assert PerformanceAlertSummary.objects.count() == 2
    _verify_alert(2, INTERVAL+1, INTERVAL, test_perf_signature, 1.0, 2.0,
                  True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)
Ejemplo n.º 24
0
def test_detect_alerts_in_series(test_repository, test_issue_tracker,
                                 failure_classifications,
                                 generic_reference_data, test_perf_signature):

    base_time = time.time()  # generate it based off current time
    INTERVAL = 30
    _generate_performance_data(test_repository, test_perf_signature,
                               test_issue_tracker, generic_reference_data,
                               base_time, 1, 0.5, int(INTERVAL / 2))
    _generate_performance_data(test_repository, test_perf_signature,
                               test_issue_tracker, generic_reference_data,
                               base_time,
                               int(INTERVAL / 2) + 1, 1.0, int(INTERVAL / 2))

    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    _verify_alert(1, (INTERVAL / 2) + 1, (INTERVAL / 2), test_perf_signature,
                  0.5, 1.0, True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)

    # verify that no new alerts generated if we rerun
    generate_new_alerts_in_series(test_perf_signature)
    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    _verify_alert(1, (INTERVAL / 2) + 1, (INTERVAL / 2), test_perf_signature,
                  0.5, 1.0, True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)

    # add data that should be enough to generate a new alert if we rerun
    _generate_performance_data(test_repository, test_perf_signature,
                               test_issue_tracker, generic_reference_data,
                               base_time, (INTERVAL + 1), 2.0, INTERVAL)
    generate_new_alerts_in_series(test_perf_signature)

    assert PerformanceAlert.objects.count() == 2
    assert PerformanceAlertSummary.objects.count() == 2
    _verify_alert(2, INTERVAL + 1, INTERVAL, test_perf_signature, 1.0, 2.0,
                  True, PerformanceAlert.UNTRIAGED,
                  PerformanceAlertSummary.UNTRIAGED, None)
Ejemplo n.º 25
0
def generate_alerts(signature_id):
    newrelic.agent.add_custom_parameter("signature_id", signature_id)
    signature = PerformanceSignature.objects.get(id=signature_id)
    generate_new_alerts_in_series(signature)
Ejemplo n.º 26
0
def generate_alerts(signature_id):
    signature = PerformanceSignature.objects.get(id=signature_id)
    generate_new_alerts_in_series(signature)
Ejemplo n.º 27
0
def test_detect_alerts_in_series(test_project, test_repository):
    framework = PerformanceFramework.objects.create(
        name='test_talos')
    option = Option.objects.create(name='opt')
    option_collection = OptionCollection.objects.create(
        option_collection_hash='my_option_hash',
        option=option)
    platform = MachinePlatform.objects.create(
        os_name='win',
        platform='win7',
        architecture='x86',
        active_status='active')

    signature = PerformanceSignature.objects.create(
        repository=test_repository,
        signature_hash=(40*'t'),
        framework=framework,
        platform=platform,
        option_collection=option_collection,
        suite='mysuite',
        test='mytest'
    )

    INTERVAL = 30
    for (t, v) in zip([i for i in range(INTERVAL)],
                      ([0.5 for i in range(INTERVAL/2)] +
                       [1.0 for i in range(INTERVAL/2)])):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=0,
            signature=signature,
            push_timestamp=datetime.datetime.fromtimestamp(t),
            value=v)

    generate_new_alerts_in_series(signature)

    def verify_alert(alertid, expected_result_set_id,
                     expected_prev_result_set_id,
                     expected_signature, expected_prev_value,
                     expected_new_value, is_regression):
        alert = PerformanceAlert.objects.get(id=alertid)
        assert alert.prev_value == expected_prev_value
        assert alert.new_value == expected_new_value
        assert alert.series_signature == expected_signature
        assert alert.is_regression == is_regression

        summary = PerformanceAlertSummary.objects.get(id=alertid)
        assert summary.result_set_id == expected_result_set_id
        assert summary.prev_result_set_id == expected_prev_result_set_id

    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    verify_alert(1, (INTERVAL/2), (INTERVAL/2)-1, signature, 0.5, 1.0, True)

    # verify that no new alerts generated if we rerun
    generate_new_alerts_in_series(signature)
    assert PerformanceAlert.objects.count() == 1
    assert PerformanceAlertSummary.objects.count() == 1
    verify_alert(1, (INTERVAL/2), (INTERVAL/2)-1, signature, 0.5, 1.0, True)

    # add data to generate a new alert
    for (t, v) in zip([i for i in range(INTERVAL, INTERVAL*2)],
                      [2.0 for i in range(INTERVAL)]):
        PerformanceDatum.objects.create(
            repository=test_repository,
            result_set_id=t,
            job_id=0,
            signature=signature,
            push_timestamp=datetime.datetime.fromtimestamp(t),
            value=v)

    generate_new_alerts_in_series(signature)

    assert PerformanceAlert.objects.count() == 2
    assert PerformanceAlertSummary.objects.count() == 2
    verify_alert(2, INTERVAL, INTERVAL-1, signature, 1.0, 2.0, True)