예제 #1
0
def metricsmock():
    """Returns MetricsMock that a context to record metrics records

    Usage::

        def test_something(metricsmock):
            with metricsmock as mm:
                # do stuff
                assert mm.has_record(
                    stat='some.stat',
                    kwargs_contains={
                        'something': 1
                    }
                )

    """
    return MetricsMock()
예제 #2
0
    def test_filter_records_value(self):
        with MetricsMock() as mm:
            mymetrics = markus.get_metrics("foobar")
            mymetrics.incr("key1", value=1, tags=["env:stage"])

            key1_metrics = mm.filter_records(fun_name="incr", stat="foobar.key1")
            assert len(key1_metrics) == 1

            key1_metrics = mm.filter_records(
                fun_name="incr", stat="foobar.key1", value=1
            )
            assert len(key1_metrics) == 1

            key1_metrics = mm.filter_records(
                fun_name="incr", stat="foobar.key1", value=5
            )
            assert len(key1_metrics) == 0
예제 #3
0
    def test_bugzilla_error_creates_error_notification(self):
        self.assertEqual(Notification.objects.count(), 0)

        self.mock_bugzilla_requests_post.side_effect = RequestException()

        with self.assertRaises(bugzilla.BugzillaError):
            with MetricsMock() as mm:
                tasks.create_experiment_bug_task(self.user.id,
                                                 self.experiment.id)

                self.assertTrue(
                    mm.has_record(
                        markus.INCR,
                        "experiments.tasks.create_experiment_bug.started",
                        value=1,
                    ))
                self.assertTrue(
                    mm.has_record(
                        markus.INCR,
                        "experiments.tasks.create_experiment_bug.failed",
                        value=1,
                    ))
                # Failures should abort timing metrics.
                self.assertFalse(
                    mm.has_record(
                        markus.TIMING,
                        "experiments.tasks.create_experiment_bug.timing",
                    ))
                # Completed metric should not be sent.
                self.assertFalse(
                    mm.has_record(
                        markus.INCR,
                        "experiments.tasks.create_experiment_bug.completed",
                    ))

        self.mock_bugzilla_requests_post.assert_called()
        self.assertEqual(Notification.objects.count(), 1)

        experiment = Experiment.objects.get(id=self.experiment.id)
        self.assertEqual(experiment.bugzilla_id, None)

        notification = Notification.objects.get()
        self.assertEqual(notification.user, self.user)
        self.assertEqual(notification.message,
                         tasks.NOTIFICATION_MESSAGE_CREATE_BUG_FAILED)
예제 #4
0
    def test_histogram_helpers(self):
        with MetricsMock() as mm:
            markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}])
            mymetrics = markus.get_metrics("foobar")
            mymetrics.histogram("key1", value=1)
            mymetrics.histogram("keymultiple", value=1)
            mymetrics.histogram("keymultiple", value=1)

            mm.assert_histogram(stat="foobar.key1")

            mm.assert_histogram_once(stat="foobar.key1")
            with pytest.raises(AssertionError):
                mm.assert_histogram_once(stat="foobar.keymultiple")

            mm.assert_not_histogram(stat="foobar.keynot")
            mm.assert_not_histogram(stat="foobar.key1", value=5)
            with pytest.raises(AssertionError):
                mm.assert_not_histogram(stat="foobar.key1")
예제 #5
0
def metricsmock():
    """Return a MetricsMock for asserting things on metrics.

    Usage::

        def test_something(metricsmock):
            metricsmock.assert_incr_once("some.stat")
            metricsmock.assert_incr_once("other.stat", tags=["tag:enum"])

    If you ever need to clear the records in the middle of a test, do::

        metricsmock.clear_records()

    For more, see: https://markus.readthedocs.io/en/latest/testing.html

    """
    with MetricsMock() as mm:
        yield mm
예제 #6
0
def metricsmock():
    """Return MetricsMock that a context to record metrics records.

    Usage::

        def test_something(metricsmock):
            with metricsmock as mm:
                # do stuff
                assert mm.has_record(
                    'incr',
                    stat='some.stat',
                    value=1
                )

    https://markus.readthedocs.io/en/latest/testing.html

    """
    return MetricsMock()
예제 #7
0
    def test_it_sends_metrics(self, settings, mocked_autograph):
        # 3 to sign
        RecipeFactory.create_batch(3,
                                   approver=UserFactory(),
                                   enabler=UserFactory(),
                                   signed=False)
        # and 1 to unsign
        RecipeFactory(signed=True, enabled=False)

        with MetricsMock() as mm:
            call_command("update_recipe_signatures")
            mm.print_records()
            assert mm.has_record(GAUGE,
                                 stat="normandy.signing.recipes.signed",
                                 value=3)
            assert mm.has_record(GAUGE,
                                 stat="normandy.signing.recipes.unsigned",
                                 value=1)
예제 #8
0
파일: test_mock.py 프로젝트: willkg/markus
    def test_print_on_failure(self, capsys):
        with MetricsMock() as mm:
            markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}])
            mymetrics = markus.get_metrics("foobar")
            mymetrics.histogram("keymultiple", value=1)
            mymetrics.histogram("keymultiple", value=1)

            with pytest.raises(AssertionError):
                mm.assert_histogram_once(stat="foobar.keymultiple")

            # On assertion error, the assert_* methods will print the metrics
            # records to stdout.
            captured = capsys.readouterr()
            expected = (
                "<MetricsRecord type=histogram key=foobar.keymultiple value=1 tags=[]>\n"
                "<MetricsRecord type=histogram key=foobar.keymultiple value=1 tags=[]>\n"
            )
            assert captured.out == expected
예제 #9
0
    def test_experiment_bug_successfully_created(self):
        self.assertEqual(Notification.objects.count(), 0)

        with MetricsMock() as mm:
            tasks.create_experiment_bug_task(self.user.id, self.experiment.id)

            self.assertTrue(
                mm.has_record(
                    markus.INCR,
                    "experiments.tasks.create_experiment_bug.started",
                    value=1,
                )
            )
            self.assertTrue(
                mm.has_record(
                    markus.INCR,
                    "experiments.tasks.create_experiment_bug.completed",
                    value=1,
                )
            )
            self.assertTrue(
                mm.has_record(
                    markus.TIMING, "experiments.tasks.create_experiment_bug.timing"
                )
            )
            # Failed metric should not be sent.
            self.assertFalse(
                mm.has_record(
                    markus.INCR, "experiments.tasks.create_experiment_bug.failed"
                )
            )

        self.mock_bugzilla_requests_post.assert_called()

        experiment = Experiment.objects.get(id=self.experiment.id)
        self.assertEqual(experiment.bugzilla_id, self.bugzilla_id)

        notification = Notification.objects.get()
        self.assertEqual(notification.user, self.user)
        self.assertEqual(
            notification.message,
            tasks.NOTIFICATION_MESSAGE_CREATE_BUG.format(bug_url=experiment.bugzilla_url),
        )
예제 #10
0
def test_simple_recommendation(test_ctx):
    # Fix the random seed so that we get stable results between test
    # runs
    np.random.seed(seed=42)

    with mock_install_mock_curated_data(test_ctx):
        EXPECTED_RESULTS = [("def", 3320.0), ("klm", 409.99999999999994),
                            ("hij", 3100.0), ("ijk", 3200.0), ("ghi", 3430.0),
                            ("lmn", 420.0), ("jkl", 400.0), ("abc", 23.0),
                            ("fgh", 22.0), ("efg", 21.0)]

        with MetricsMock() as mm:
            manager = RecommendationManager(test_ctx)
            recommendation_list = manager.recommend("some_ignored_id", 10)

            assert isinstance(recommendation_list, list)
            assert recommendation_list == EXPECTED_RESULTS

            assert mm.has_record(TIMING, stat="taar.profile_recommendation")
예제 #11
0
    def test_configure_doesnt_affect_override(self):
        with MetricsMock() as mm:
            markus.configure([{
                'class': 'markus.backends.logging.LoggingMetrics'
            }])
            mymetrics = markus.get_metrics('foobar')
            mymetrics.incr('key1', value=1)

            assert mm.has_record(
                fun_name='incr',
                stat='foobar.key1',
                value=1,
            )

            assert not mm.has_record(
                fun_name='incr',
                stat='foobar.key1',
                value=5,
            )
예제 #12
0
    def test_crash_size_capture(self):
        """Verify we capture raw/processed crash sizes in ES crashstorage"""
        with MetricsMock() as mm:
            es_storage = ESCrashStorage(config=self.config, namespace='processor.es')

            es_storage._submit_crash_to_elasticsearch = mock.Mock()

            es_storage.save_raw_and_processed(
                raw_crash=deepcopy(a_raw_crash),
                dumps=None,
                processed_crash=deepcopy(a_processed_crash),
                crash_id=a_processed_crash['uuid']
            )

            # NOTE(willkg): The sizes of these json documents depend on what's
            # in them. If we changed a_processed_crash and a_raw_crash, then
            # these numbers will change.
            assert mm.has_record('histogram', stat='processor.es.raw_crash_size', value=27)
            assert mm.has_record('histogram', stat='processor.es.processed_crash_size', value=1738)
예제 #13
0
    def test_crash_size_capture(self):
        """Verify we capture raw/processed crash sizes in ES crashstorage"""
        with MetricsMock() as mm:
            es_storage = ESCrashStorage(config=self.config,
                                        namespace="processor.es")

            es_storage._submit_crash_to_elasticsearch = mock.Mock()

            es_storage.save_processed_crash(
                raw_crash=deepcopy(a_raw_crash),
                processed_crash=deepcopy(a_processed_crash),
            )

            # NOTE(willkg): The sizes of these json documents depend on what's
            # in them. If we changed a_processed_crash and a_raw_crash, then
            # these numbers will change.
            mm.print_records()
            mm.assert_histogram("processor.es.raw_crash_size", value=27)
            mm.assert_histogram("processor.es.processed_crash_size",
                                value=1721)
예제 #14
0
    def test_index_data_capture(self):
        """Verify we capture index data in ES crashstorage"""
        with MetricsMock() as mm:
            es_storage = ESCrashStorage(config=self.config, namespace="processor.es")

            mock_connection = mock.Mock()
            # Do a successful indexing
            es_storage._index_crash(
                connection=mock_connection,
                es_index=None,
                es_doctype=None,
                crash_document=None,
                crash_id=None,
            )
            # Do a failed indexing
            mock_connection.index.side_effect = Exception
            with pytest.raises(Exception):
                es_storage._index_crash(
                    connection=mock_connection,
                    es_index=None,
                    es_doctype=None,
                    crash_document=None,
                    crash_id=None,
                )

            assert (
                len(
                    mm.filter_records(
                        stat="processor.es.index", tags=["outcome:successful"]
                    )
                )
                == 1
            )
            assert (
                len(
                    mm.filter_records(
                        stat="processor.es.index", tags=["outcome:failed"]
                    )
                )
                == 1
            )
예제 #15
0
    def test_crash_size_capture(self):
        """Verify we capture raw/processed crash sizes in ES crashstorage"""
        raw_crash = {"ProductName": "Firefox", "ReleaseChannel": "nightly"}
        processed_crash = {
            "date_processed": "2012-04-08 10:56:41.558922",
            "uuid": "936ce666-ff3b-4c7a-9674-367fe2120408",
        }

        with MetricsMock() as mm:
            es_storage = ESCrashStorage(config=self.config,
                                        namespace="processor.es")

            es_storage._submit_crash_to_elasticsearch = mock.Mock()

            es_storage.save_processed_crash(
                raw_crash=raw_crash,
                processed_crash=processed_crash,
            )

            mm.assert_histogram("processor.es.raw_crash_size", value=55)
            mm.assert_histogram("processor.es.processed_crash_size", value=96)
예제 #16
0
def metricsmock():
    """Return a MetricsMock for asserting things on metrics.

    Usage::

        def test_something(metricsmock):
            assert metricsmock.has_record(
                'incr',
                stat='some.stat',
                value=1
            )

    If you ever need to clear the records in the middle of a test, do::

        metricsmock.clear_records()

    For more, see: https://markus.readthedocs.io/en/latest/testing.html

    """
    with MetricsMock() as mm:
        yield mm
예제 #17
0
    def test_has_record(self):
        # NOTE(willkg): .has_record() is implemented using .filter_records() so
        # we can test that aggressively and just make sure the .has_record()
        # wrapper works fine.
        #
        # If that ever changes, we should update this test.
        with MetricsMock() as mm:
            mymetrics = markus.get_metrics('foobar')
            mymetrics.incr('key1', value=1)

            assert mm.has_record(
                fun_name='incr',
                stat='foobar.key1',
                value=1,
            )

            assert not mm.has_record(
                fun_name='incr',
                stat='foobar.key1',
                value=5,
            )
예제 #18
0
    def test_submit_data_capture(self):
        with MetricsMock() as mm:
            conn = setup_mocked_s3_storage()

            # Do a successful submit
            conn.submit('fff13cf0-5671-4496-ab89-47a922141114',
                        'name_of_thing', thing_as_binary)
            # Do a failed submit
            conn._connect = mock.Mock()
            conn._connect.side_effect = Exception
            with pytest.raises(Exception):
                conn.submit('fff13cf0-5671-4496-ab89-47a922141114',
                            'name_of_thing', thing_as_binary)

            assert len(
                mm.filter_records(
                    stat='processor.s3.submit',
                    tags=['kind:name_of_thing', 'outcome:successful'])) == 1
            assert len(
                mm.filter_records(
                    stat='processor.s3.submit',
                    tags=['kind:name_of_thing', 'outcome:failed'])) == 1
예제 #19
0
    def test_everything_we_hoped_for(self, mocked_subprocess_module):
        rule = self.build_rule()

        raw_crash = copy.copy(canonical_standard_raw_crash)
        raw_dumps = {rule.dump_field: 'a_fake_dump.dump'}
        processed_crash = DotDict()
        processor_meta = get_basic_processor_meta()

        mocked_subprocess_handle = mocked_subprocess_module.Popen.return_value
        mocked_subprocess_handle.stdout.read.return_value = canonical_stackwalker_output_str
        mocked_subprocess_handle.wait.return_value = 0

        with MetricsMock() as mm:
            rule.act(raw_crash, raw_dumps, processed_crash, processor_meta)

            assert processed_crash.json_dump == canonical_stackwalker_output
            assert processed_crash.mdsw_return_code == 0
            assert processed_crash.mdsw_status_string == "OK"
            assert processed_crash.success is True

            assert mm.has_record('incr',
                                 stat='processor.breakpadstackwalkerrule.run',
                                 value=1,
                                 tags=['outcome:success', 'exitcode:0'])
예제 #20
0
    def test_filter_records_value(self):
        with MetricsMock() as mm:
            mymetrics = markus.get_metrics('foobar')
            mymetrics.incr('key1', value=1, tags=['env:stage'])

            key1_metrics = mm.filter_records(
                fun_name='incr',
                stat='foobar.key1',
            )
            assert len(key1_metrics) == 1

            key1_metrics = mm.filter_records(
                fun_name='incr',
                stat='foobar.key1',
                value=1,
            )
            assert len(key1_metrics) == 1

            key1_metrics = mm.filter_records(
                fun_name='incr',
                stat='foobar.key1',
                value=5,
            )
            assert len(key1_metrics) == 0
예제 #21
0
    def test_upload(self):
        """Verify API symbols upload"""
        user = User.objects.create(username='******', email='*****@*****.**')
        self._add_permission(user, 'upload_symbols')
        token = Token.objects.create(
            user=user,
        )
        token.permissions.add(
            Permission.objects.get(codename='upload_symbols')
        )

        url = reverse('symbols:upload')
        response = self.client.get(url)
        assert response.status_code == 405

        with MetricsMock() as metrics_mock:
            with self.settings(MEDIA_ROOT=self.tmp_dir):
                with open(ZIP_FILE, 'rb') as file_object:
                    response = self.client.post(
                        url,
                        {'file.zip': file_object},
                        # note! No HTTP_AUTH_TOKEN
                    )
                    assert response.status_code == 403

                with open(ZIP_FILE, 'rb') as file_object:
                    response = self.client.post(
                        url,
                        {'file.zip': file_object},
                        HTTP_AUTH_TOKEN=''
                    )
                    assert response.status_code == 403

                with open(ZIP_FILE, 'rb') as file_object:
                    response = self.client.post(
                        url,
                        {'file.zip': file_object},
                        HTTP_AUTH_TOKEN='somejunk'
                    )
                    assert response.status_code == 403

                with open(ZIP_FILE, 'rb') as file_object:
                    response = self.client.post(
                        url,
                        {'file.zip': file_object},
                        HTTP_AUTH_TOKEN=token.key
                    )
                    assert response.status_code == 201
                    symbol_upload = models.SymbolsUpload.objects.get(user=user)
                    assert symbol_upload.filename == 'file.zip'
                    assert symbol_upload.size
                    assert symbol_upload.content

        # This should have made one S3 connection
        connection_parameters, = self.connection_parameters
        args, kwargs = connection_parameters
        region, = args
        assert region
        assert region == settings.SYMBOLS_BUCKET_DEFAULT_LOCATION
        assert kwargs['aws_access_key_id'] == settings.AWS_ACCESS_KEY
        assert kwargs['aws_secret_access_key'] == settings.AWS_SECRET_ACCESS_KEY
        assert isinstance(kwargs['calling_format'], OrdinaryCallingFormat)

        # the ZIP_FILE contains a file called south-africa-flag.jpeg
        key = os.path.join(
            settings.SYMBOLS_FILE_PREFIX,
            'south-africa-flag.jpeg'
        )
        assert self.uploaded_keys[key]
        assert self.uploaded_headers[key] == {'Content-Type': 'image/jpeg'}

        # and a file called xpcshell.sym
        key = os.path.join(
            settings.SYMBOLS_FILE_PREFIX,
            'xpcshell.sym'
        )
        assert self.uploaded_keys[key]
        expected = {
            'Content-Type': 'text/plain',
            'Content-Encoding': 'gzip'
        }
        assert self.uploaded_headers[key] == expected
        # The sample.zip file contains the file xpcshell.sym and it's
        # 1156 bytes when un-archived. Just gzip'in the content of the
        # file will yield a file that is 476 bytes.
        # But if you do it properly there's header information in the
        # string which is a couple of extra bytes.
        assert len(self.uploaded_keys[key]) == 488

        # Verify it generated the metrics we need--since only one POST was
        # successful, then there's only one incr record
        assert metrics_mock.has_record(
            markus.INCR, 'symbols.upload.api_upload', 1, tags=['email:test_example.com']
        )
예제 #22
0
 def test_it_sends_metrics(self, settings, mocked_autograph):
     ActionFactory.create_batch(3, signed=False)
     with MetricsMock() as mm:
         call_command("update_action_signatures")
         mm.print_records()
         assert mm.has_record(GAUGE, stat="normandy.signing.actions.signed", value=3)
예제 #23
0
def metricsmock():
    return MetricsMock()
예제 #24
0
    def test_web_upload(self):
        url = reverse('symbols:web_upload')
        response = self.client.get(url)
        assert response.status_code == 302
        self.assertRedirects(
            response,
            reverse('crashstats:login') + '?next=%s' % url
        )
        user = self._login()
        response = self.client.get(url)
        assert response.status_code == 302
        self.assertRedirects(
            response,
            reverse('crashstats:login') + '?next=%s' % url
        )
        # you need to have the permission
        self._add_permission(user, 'upload_symbols')

        response = self.client.get(url)
        assert response.status_code == 200

        with MetricsMock() as metrics_mock:
            # now we can post
            with self.settings(SYMBOLS_MIME_OVERRIDES={'jpeg': 'text/plain'}):
                with open(ZIP_FILE) as file_object:
                    response = self.client.post(
                        url,
                        {'file': file_object}
                    )
                    assert response.status_code == 302

        symbol_upload = models.SymbolsUpload.objects.get(user=user)
        assert symbol_upload.filename == os.path.basename(ZIP_FILE)
        assert symbol_upload.size
        # We expect the content to be a `+` because it was new,
        # followed by the bucket name, followed by a comma, followed
        # by the symbols prefix + filename.
        line = "+%s,%s/%s\n" % (
            settings.SYMBOLS_BUCKET_DEFAULT_NAME,
            settings.SYMBOLS_FILE_PREFIX,
            'south-africa-flag.jpeg'
        )
        line += "+%s,%s/%s\n" % (
            settings.SYMBOLS_BUCKET_DEFAULT_NAME,
            settings.SYMBOLS_FILE_PREFIX,
            'xpcshell.sym'
        )
        assert symbol_upload.content == line
        assert symbol_upload.content_type == 'text/plain'
        assert self.uploaded_keys
        # the mocked key object should have its content_type set too
        assert self.created_keys[0].content_type == 'text/plain'
        expected = [
            (settings.SYMBOLS_BUCKET_DEFAULT_NAME, settings.SYMBOLS_BUCKET_DEFAULT_LOCATION)
        ]
        assert self.created_buckets == expected

        # Verify it generated the metrics we need
        assert metrics_mock.has_record(
            markus.INCR, 'symbols.upload.web_upload', 1, tags=['email:test_example.com']
        )