def test_initializing_twice_is_a_no_op(): before_config = Glean._configuration Glean.initialize(application_id=GLEAN_APP_ID, application_version=glean_version) assert before_config is Glean._configuration
def test_core_metrics_should_be_cleared_with_disabling_and_enabling_uploading( ): assert _builtins.metrics.glean.internal.metrics.os.test_has_value() Glean.set_upload_enabled(False) assert not _builtins.metrics.glean.internal.metrics.os.test_has_value() Glean.set_upload_enabled(True) assert _builtins.metrics.glean.internal.metrics.os.test_has_value()
def reset_glean(*, application_id: str, application_version: str, configuration: Optional[Configuration] = None, clear_stores: bool = True): """ Resets the Glean singleton. Args: application_id (str): The application id to use when sending pings. application_version (str): The version of the application sending Glean data. configuration (glean.config.Configuration): (optional) An object with global settings. """ from glean import Glean from glean._dispatcher import Dispatcher Dispatcher._testing_mode = True data_dir = None # type: Optional[Path] if not clear_stores: Glean._destroy_data_dir = False data_dir = Glean._data_dir Glean._reset() Glean.initialize( application_id=application_id, application_version=application_version, upload_enabled=True, configuration=configuration, data_dir=data_dir, )
def test_queued_recorded_metrics_correctly_during_init(): Glean._reset() # Enable queueing Dispatcher.set_task_queueing(True) counter_metric = CounterMetricType( disabled=False, category="telemetry", lifetime=Lifetime.APPLICATION, name="counter_metric", send_in_pings=["store1"], ) for i in range(2): counter_metric.add() Glean.initialize( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=True, ) assert counter_metric.test_has_value() assert 2 == counter_metric.test_get_value()
def test_set_application_id_and_version(safe_httpserver): safe_httpserver.serve_content(b"", code=200) Glean._reset() Glean._initialize_with_tempdir_for_testing( application_id="my-id", application_version="my-version", upload_enabled=True, configuration=Configuration(server_endpoint=safe_httpserver.url), ) assert ( "my-version" == _builtins.metrics.glean.internal.metrics.app_display_version.test_get_value() ) Glean._configuration.server_endpoint = safe_httpserver.url _builtins.pings.baseline.submit() assert 1 == len(safe_httpserver.requests) request = safe_httpserver.requests[0] assert "baseline" in request.url assert "my-id" in request.url
def test_other_label_without_predefined_labels_before_glean_init(): labeled_counter_metric = metrics.LabeledCounterMetricType( disabled=False, category="telemetry", lifetime=Lifetime.APPLICATION, name="labeled_counter_metric", send_in_pings=["metrics"], ) Glean._reset() Dispatcher.set_task_queueing(True) for i in range(21): labeled_counter_metric["label_{}".format(i)].add(1) labeled_counter_metric["label_0"].add(1) Glean.initialize( application_id="glean-python-test", application_version=glean_version, upload_enabled=True, ) assert 2 == labeled_counter_metric["label_0"].test_get_value() for i in range(1, 16): assert 1 == labeled_counter_metric["label_{}".format( i)].test_get_value() assert 5 == labeled_counter_metric["__other__"].test_get_value()
def test_tempdir_is_cleared_multiprocess(safe_httpserver): safe_httpserver.serve_content(b"", code=200) Glean._configuration.server_endpoint = safe_httpserver.url # This test requires us to write a few files in the pending pings # directory, to which language bindings have theoretically no access. # Manually create the path to that directory, at the risk of breaking # the test in the future, if that changes in the Rust code. pings_dir = Glean._data_dir / "pending_pings" pings_dir.mkdir() for _ in range(10): with (pings_dir / str(uuid.uuid4())).open("wb") as fd: fd.write(b"/data/path/\n") fd.write(b"{}\n") # Make sure that resetting while the PingUploadWorker is running doesn't # delete the directory out from under the PingUploadWorker. p1 = PingUploadWorker._process() Glean._reset() p1.wait() assert p1.returncode == 0 assert 10 == len(safe_httpserver.requests)
def test_500_error_submit_concurrent_writing(slow_httpserver, monkeypatch): # This tests that concurrently writing to the database from the main process # and the ping uploading subprocess. slow_httpserver.serve_content(b"", code=500) counter = metrics.CounterMetricType( disabled=False, category="test", name="counter", send_in_pings=["metrics"], lifetime=metrics.Lifetime.PING, ) # Force the ping upload worker into a separate process monkeypatch.setattr(PingUploadWorker, "process", PingUploadWorker._process) Glean._configuration._server_endpoint = slow_httpserver.url Glean._submit_ping_by_name("baseline") # While the uploader is running, increment the counter as fast as we can times = 0 last_process = ProcessDispatcher._last_process while last_process.poll() is None: counter.add() times += 1 # This kind of recoverable error will be tried 3 times # The number of retries is defined on glean-core assert 3 == len(slow_httpserver.requests) metric = get_upload_failure_metric() assert not metric["status_code_4xx"].test_has_value() assert 3 == metric["status_code_5xx"].test_get_value() assert times > 0 assert times == counter.test_get_value()
def burnham( verbose: bool, test_run: str, test_name: str, enable_telemetry: bool, platform: str, spore_drive: str, missions: Tuple[Mission], ) -> None: """Travel through space and complete missions with the Discovery crew. If telemetry is enabled, measure, collect, and submit non-personal information to the specified data platform with Glean. """ if verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) Glean.initialize( application_id=__title__, application_version=__version__, upload_enabled=enable_telemetry is True, data_dir=Path(TemporaryDirectory().name), configuration=Configuration(server_endpoint=platform), ) metrics.test.run.set(test_run) metrics.test.name.set(test_name) space_ship = Discovery( warp_drive=WarpDrive(), spore_drive=SporeDrive(branch=spore_drive, active=spore_drive is not None), ) pings.space_ship_ready.submit() try: for mission in missions: complete_mission(space_ship=space_ship, mission=mission) # When mission "MISSION H: DISABLE GLEAN UPLOAD" disables the Glean # SDK ping upload all pending events, metrics and pings are # cleared, except for first_run_date. We need to restore values for # test.run and test.name after re-enabling ping upload, so that we # can properly correlate new pings with the test scenario. if mission.identifier == "MISSION I: ENABLE GLEAN UPLOAD": metrics.test.run.set(test_run) metrics.test.name.set(test_name) secs = 5 logger.info("All missions completed.") logger.info(f" Waiting {secs}s for telemetry to be sent.") time.sleep(secs) except BurnhamError as err: click.echo(f"Error: {err}", err=True) sys.exit(1)
def complete(self, space_ship: Discovery) -> None: # Wait for 5 seconds to wait for the upload of pending pings to # complete. This is required for testing the deletion-request ping # functionality when we join the discovery table with the # deletion-request table using the client ID to then verify that the # deletion-request ping was submitted with the expected client ID. time.sleep(5) Glean.set_upload_enabled(False)
def fixture_initialize_glean(tmp_path_factory): """Initialize the Glean SDK for the test session.""" Glean.initialize( application_id="burnham_testing", application_version="0.1.0", upload_enabled=False, data_dir=tmp_path_factory.mktemp("glean"), )
def test_set_application_id_and_version(): Glean.reset() Glean.initialize(application_id="my-id", application_version="my-version") assert ("my-id" == _builtins.metrics.glean.internal.metrics.app_build. test_get_value()) assert ("my-version" == _builtins.metrics.glean.internal.metrics. app_display_version.test_get_value())
def test_the_app_channel_must_be_correctly_set(): Glean.reset() Glean.initialize( application_id=GLEAN_APP_ID, application_version=glean_version, configuration=Configuration(channel="my-test-channel"), ) assert ("my-test-channel" == _builtins.metrics.glean.internal.metrics. app_channel.test_get_value())
def initialize_telemetry(upload_enabled): mozregression_path = Path.home() / ".mozilla" / "mozregression" Glean.initialize( application_id="org.mozilla.mozregression", application_version=__version__, upload_enabled=upload_enabled, configuration=Configuration(allow_multiprocessing=False), data_dir=mozregression_path / "data", )
def test_disabling_upload_sends_deletion_request(safe_httpserver): safe_httpserver.serve_content(b"", code=200) Glean._configuration.server_endpoint = safe_httpserver.url # Ensure nothing was received yet assert 0 == len(safe_httpserver.requests) # Disabling upload will trigger a deletion-request ping Glean.set_upload_enabled(False) assert 1 == len(safe_httpserver.requests)
def test_data_dir_is_required(): Glean._reset() with pytest.raises(TypeError): Glean.initialize( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=True, configuration=Glean._configuration, )
def test_initializing_twice_is_a_no_op(): before_config = Glean._configuration Glean._initialize_with_tempdir_for_testing( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=True, ) assert before_config is Glean._configuration
def test_the_app_channel_must_be_correctly_set(): Glean._reset() Glean._initialize_with_tempdir_for_testing( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=True, configuration=Configuration(channel="my-test-channel"), ) assert ("my-test-channel" == _builtins.metrics.glean.internal.metrics. app_channel.test_get_value())
def test_disabling_upload_should_disable_metrics_recording(): counter_metric = CounterMetricType( disabled=False, category="telemetry", lifetime=Lifetime.APPLICATION, name="counter_metric", send_in_pings=["store1"], ) Glean.set_upload_enabled(False) counter_metric.add(1) assert False is counter_metric.test_has_value()
def test_app_display_version_unknown(): from glean import _builtins Glean._reset() Glean._initialize_with_tempdir_for_testing( application_id=GLEAN_APP_ID, application_version=None, upload_enabled=True, ) assert ("Unknown" == _builtins.metrics.glean.internal.metrics. app_display_version.test_get_value())
def test_set_application_build_id(): Glean._reset() Glean._initialize_with_tempdir_for_testing( application_id="my-id", application_version="my-version", application_build_id="123ABC", upload_enabled=True, ) assert ("123ABC" == _builtins.metrics.glean.internal.metrics.app_build. test_get_value())
def test_recording_upload_errors_doesnt_clobber_database( tmpdir, safe_httpserver, monkeypatch): """ Test that running the ping uploader subprocess doesn't clobber the database. If, under some bug, the subprocess had "upload_enabled" set to True, it could record upload errors in the database, clobbering any metrics that might have meanwhile been recorded in the main process. This test is known to fail if "upload_enabled" is set to `True` in the subprocess. """ tmpdir = Path(tmpdir) Glean._reset() Glean.initialize( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=True, data_dir=tmpdir, ) counter_metric = CounterMetricType( disabled=False, category="telemetry", lifetime=Lifetime.PING, name="counter_metric", send_in_pings=["baseline"], ) counter_metric.add(10) safe_httpserver.serve_content(b"", code=400) # Force the ping upload worker into a separate process monkeypatch.setattr(PingUploadWorker, "process", PingUploadWorker._process) Glean._configuration._server_endpoint = safe_httpserver.url Glean._submit_ping_by_name("baseline") ProcessDispatcher._wait_for_last_process() assert 1 == len(safe_httpserver.requests) # Force a reload of the database from disk Glean._reset() Glean.initialize( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=True, data_dir=tmpdir, ) metric = get_upload_failure_metric() assert not metric["status_code_4xx"].test_has_value()
def test_400_error_submit(safe_httpserver, monkeypatch): safe_httpserver.serve_content(b"", code=400) # Force the ping upload worker into a separate process monkeypatch.setattr(PingUploadWorker, "process", PingUploadWorker._process) Glean._configuration._server_endpoint = safe_httpserver.url Glean._submit_ping_by_name("baseline") ProcessDispatcher._wait_for_last_process() assert 1 == len(safe_httpserver.requests) metric = get_upload_failure_metric() assert 1 == metric["status_code_4xx"].test_get_value() assert not metric["status_code_5xx"].test_has_value()
def test_events_should_not_record_when_upload_is_disabled(): class EventKeys(enum.Enum): TEST_NAME = 0 event_metric = metrics.EventMetricType( disabled=False, category="ui", lifetime=Lifetime.PING, name="click", send_in_pings=["store1"], allowed_extra_keys=["test_name"], ) assert Glean.get_upload_enabled() Glean.set_upload_enabled(True) event_metric.record({EventKeys.TEST_NAME: "event1"}) snapshot1 = event_metric.test_get_value() assert 1 == len(snapshot1) Glean.set_upload_enabled(False) assert not Glean.get_upload_enabled() event_metric.record({EventKeys.TEST_NAME: "event2"}) with pytest.raises(ValueError): event_metric.test_get_value() Glean.set_upload_enabled(True) event_metric.record({EventKeys.TEST_NAME: "event3"}) snapshot3 = event_metric.test_get_value() assert 1 == len(snapshot3)
def __init__(self): """Initiate Glean, load pings and metrics.""" logger.debug("Initializing Glean...") Glean.initialize( application_id="MozPhab", application_version=MOZPHAB_VERSION, upload_enabled=config.telemetry_enabled, configuration=Configuration(), data_dir=Path(environment.MOZBUILD_PATH) / "telemetry-data", ) self.pings = load_pings(environment.MOZPHAB_MAIN_DIR / "pings.yaml") self.metrics = load_metrics(environment.MOZPHAB_MAIN_DIR / "metrics.yaml")
def test_no_sending_deletion_ping_if_unchanged_outside_of_run(safe_httpserver, tmpdir): safe_httpserver.serve_content(b"", code=200) Glean._reset() config = Configuration(server_endpoint=safe_httpserver.url) Glean.initialize( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=False, data_dir=Path(str(tmpdir)), configuration=config, ) assert 0 == len(safe_httpserver.requests) Glean._reset() Glean.initialize( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=False, data_dir=Path(str(tmpdir)), configuration=config, ) assert 0 == len(safe_httpserver.requests)
def test_presubmit_makes_a_valid_ping(tmpdir, ping_schema_url, monkeypatch): # Bug 1648140: Submitting a ping prior to initialize meant that the core # metrics wouldn't yet be set. info_path = Path(str(tmpdir)) / "info.txt" Glean._reset() ping_name = "preinit_ping" ping = PingType(name=ping_name, include_client_id=True, send_if_empty=True, reason_codes=[]) # This test relies on testing mode to be disabled, since we need to prove the # real-world async behaviour of this. Dispatcher._testing_mode = False Dispatcher._queue_initial_tasks = True # Submit a ping prior to calling initialize ping.submit() Glean._initialize_with_tempdir_for_testing( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=True, configuration=Glean._configuration, ) monkeypatch.setattr(Glean._configuration, "ping_uploader", _RecordingUploader(info_path)) # Wait until the work is complete Dispatcher._task_worker._queue.join() while not info_path.exists(): time.sleep(0.1) with info_path.open("r") as fd: url_path = fd.readline() serialized_ping = fd.readline() print(url_path) assert ping_name == url_path.split("/")[3] assert 0 == validate_ping.validate_ping( io.StringIO(serialized_ping), sys.stdout, schema_url=ping_schema_url, )
def test_500_error_submit(safe_httpserver, monkeypatch): safe_httpserver.serve_content(b"", code=500) # Force the ping upload worker into a separate process monkeypatch.setattr(PingUploadWorker, "process", PingUploadWorker._process) Glean._configuration._server_endpoint = safe_httpserver.url Glean._submit_ping_by_name("baseline") ProcessDispatcher._wait_for_last_process() # This kind of recoverable error will be tried 10 times # The number of retries is defined on glean-core assert 3 == len(safe_httpserver.requests) metric = get_upload_failure_metric() assert not metric["status_code_4xx"].test_has_value() assert 3 == metric["status_code_5xx"].test_get_value()
def test_labeled_string_type(ping_schema_url): labeled_string_metric = metrics.LabeledStringMetricType( disabled=False, category="telemetry", lifetime=Lifetime.APPLICATION, name="labeled_string_metric", send_in_pings=["metrics"], ) labeled_string_metric["label1"].set("foo") labeled_string_metric["label2"].set("bar") assert labeled_string_metric["label1"].test_has_value() assert "foo" == labeled_string_metric["label1"].test_get_value() assert labeled_string_metric["label2"].test_has_value() assert "bar" == labeled_string_metric["label2"].test_get_value() json_content = Glean.test_collect(_builtins.pings.metrics) assert 0 == validate_ping.validate_ping(io.StringIO(json_content), sys.stdout, schema_url=ping_schema_url) tree = json.loads(json_content) assert ("foo" == tree["metrics"]["labeled_string"] ["telemetry.labeled_string_metric"]["label1"]) assert ("bar" == tree["metrics"]["labeled_string"] ["telemetry.labeled_string_metric"]["label2"])
def test_other_label_without_predefined_labels(ping_schema_url): labeled_counter_metric = metrics.LabeledCounterMetricType( disabled=False, category="telemetry", lifetime=Lifetime.APPLICATION, name="labeled_counter_metric", send_in_pings=["metrics"], ) for i in range(21): labeled_counter_metric["label_{}".format(i)].add(1) labeled_counter_metric["label_0"].add(1) assert 2 == labeled_counter_metric["label_0"].test_get_value() for i in range(1, 16): assert 1 == labeled_counter_metric["label_{}".format( i)].test_get_value() assert 5 == labeled_counter_metric["__other__"].test_get_value() json_content = Glean.test_collect(_builtins.pings.metrics) assert 0 == validate_ping.validate_ping(io.StringIO(json_content), sys.stdout, schema_url=ping_schema_url)