def test_maximum_queue(): Dispatcher.set_task_queueing(True) for i in range(Dispatcher.MAX_QUEUE_SIZE + 10): Dispatcher.launch(lambda: 0) assert len(Dispatcher._preinit_task_queue) == Dispatcher.MAX_QUEUE_SIZE
def test_other_label_without_predefined_labels_before_glean_init(): labeled_counter_metric = metrics.LabeledCounterMetricType( disabled=False, category="telemetry", lifetime=Lifetime.APPLICATION, name="labeled_counter_metric", send_in_pings=["metrics"], ) Glean._reset() Dispatcher.set_task_queueing(True) for i in range(21): labeled_counter_metric["label_{}".format(i)].add(1) labeled_counter_metric["label_0"].add(1) Glean.initialize( application_id="glean-python-test", application_version=glean_version, upload_enabled=True, ) assert 2 == labeled_counter_metric["label_0"].test_get_value() for i in range(1, 16): assert 1 == labeled_counter_metric["label_{}".format( i)].test_get_value() assert 5 == labeled_counter_metric["__other__"].test_get_value()
def test_queued_recorded_metrics_correctly_during_init(): Glean._reset() # Enable queueing Dispatcher.set_task_queueing(True) counter_metric = CounterMetricType( disabled=False, category="telemetry", lifetime=Lifetime.APPLICATION, name="counter_metric", send_in_pings=["store1"], ) for i in range(2): counter_metric.add() Glean.initialize( application_id=GLEAN_APP_ID, application_version=glean_version, upload_enabled=True, ) assert counter_metric.test_has_value() assert 2 == counter_metric.test_get_value()
def test_maximum_tasks(): Dispatcher.set_task_queueing(True) for _ in range(Dispatcher.MAX_QUEUE_SIZE + 10): Dispatcher.task(lambda: 0)() assert len(Dispatcher._preinit_task_queue) == Dispatcher.MAX_QUEUE_SIZE
def test_queue_tasks_are_flushed_off_the_main_thread(): main_thread_id = threading.get_ident() Dispatcher._testing_mode = False Dispatcher._queue_initial_tasks = False thread_canary = [0] def test_task(): assert main_thread_id != threading.get_ident() thread_canary[0] += 1 Dispatcher._testing_mode = False Dispatcher._queue_initial_tasks = True for i in range(3): Dispatcher.launch(test_task) assert 3 == len(Dispatcher._preinit_task_queue) assert 0 == thread_canary[0] Dispatcher.flush_queued_initial_tasks() Dispatcher._task_worker._queue.join() assert 3 == thread_canary[0] assert 0 == len(Dispatcher._preinit_task_queue)
def task_runner(): for i in range(3): Dispatcher.launch(test_task) assert 3 == len(Dispatcher._preinit_task_queue) assert 0 == thread_canary[0] Dispatcher.flush_queued_initial_tasks()
def test_tasks_run_off_the_main_thread(): main_thread_id = threading.get_ident() thread_canary = [False] def test_task(): assert main_thread_id != threading.get_ident() assert False is thread_canary[0] thread_canary[0] = True Dispatcher.launch(test_task) Dispatcher._task_worker._queue.join() assert True is thread_canary[0]
def test_flush_queued_events_on_startup_and_correctly_handle_preinit_events( safe_httpserver, ): safe_httpserver.serve_content(b"", code=200) Glean._configuration.server_endpoint = safe_httpserver.url Glean._configuration.log_pings = True class EventKeys(enum.Enum): SOME_EXTRA = 0 event = metrics.EventMetricType( disabled=False, category="telemetry", lifetime=Lifetime.PING, name="test_event", send_in_pings=["events"], allowed_extra_keys=["some_extra"], ) event.record(extra={EventKeys.SOME_EXTRA: "run1"}) assert 1 == len(event.test_get_value()) Dispatcher.set_task_queueing(True) event.record(extra={EventKeys.SOME_EXTRA: "pre-init"}) testing.reset_glean( application_id="glean-python-test", application_version=glean_version, clear_stores=False, configuration=Configuration(server_endpoint=safe_httpserver.url, log_pings=True), ) event.record(extra={EventKeys.SOME_EXTRA: "post-init"}) assert 1 == len(safe_httpserver.requests) request = safe_httpserver.requests[0] assert "events" in request.url assert 1 == len(event.test_get_value()) Glean._submit_ping_by_name("events") assert 2 == len(safe_httpserver.requests) request = safe_httpserver.requests[1] assert "events" in request.url
def test_dispatched_tasks_throwing_exceptions_are_correctly_handled(): Dispatcher._testing_mode = False Dispatcher._queue_initial_tasks = False thread_canary = [0] def exception_task(): 42 / 0 Dispatcher.launch(exception_task) def working_task(): thread_canary[0] += 1 for i in range(3): Dispatcher.launch(working_task) Dispatcher._task_worker._queue.join() assert 3 == thread_canary[0]
def test_launch_correctly_adds_tasks_to_queue_if_queue_tasks_is_true(): thread_canary = [0] Dispatcher.set_task_queueing(True) @Dispatcher.task def update(): thread_canary[0] += 1 for i in range(3): update() assert 3 == len(Dispatcher._preinit_task_queue) assert 0 == thread_canary[0] Dispatcher.flush_queued_initial_tasks() assert 3 == thread_canary[0] assert 0 == len(Dispatcher._preinit_task_queue)
def test_queued_tasks_are_executed_in_the_order_they_are_received(): main_thread_id = threading.get_ident() Dispatcher._testing_mode = False Dispatcher._queue_initial_tasks = True class Job: thread_counter = [0] thread_list = [] def __init__(self, num): self.num = num def __lt__(self, other): return id(self) < id(other) def __call__(self): assert main_thread_id != threading.get_ident() self.thread_counter[0] += 1 self.thread_list.append(self.num) for i in range(50): Dispatcher.launch(Job(i)) Dispatcher.flush_queued_initial_tasks() for i in range(50, 100): Dispatcher.launch(Job(i)) Dispatcher._task_worker._queue.join() assert Job.thread_list == list(range(100)) assert Job.thread_counter[0] == 100
def task_runner(): for i in range(50): Dispatcher.launch(Job(i)) Dispatcher.flush_queued_initial_tasks() for i in range(50, 100): Dispatcher.launch(Job(i))
def test_that_thread_joins_before_directory_is_deleted_in_reset(): Dispatcher._testing_mode = False Dispatcher._queue_initial_tasks = False thread_canary = [0] boolean_metric = metrics.BooleanMetricType( disabled=False, category="telemetry", lifetime=Lifetime.APPLICATION, name="boolean_metric", send_in_pings=["store1"], ) def slow_task(): time.sleep(1) # This will cause a Rust panic if the data directory was deleted in # Glean._reset() before this has a chance to run. boolean_metric.set(True) thread_canary[0] = 1 Dispatcher.launch(slow_task) Glean._reset() assert thread_canary[0] == 1
def test_overflowing_the_task_queue_records_telemetry(): Dispatcher.set_task_queueing(True) for i in range(110): Dispatcher.launch(lambda: None) assert 100 == len(Dispatcher._preinit_task_queue) assert 10 == Dispatcher._overflow_count Dispatcher.flush_queued_initial_tasks() assert 110 == _builtins.metrics.glean.error.preinit_tasks_overflow.test_get_value() json_content = Glean.test_collect(_builtins.pings.metrics) json_tree = json.loads(json_content) assert 110 == json_tree["metrics"]["counter"]["glean.error.preinit_tasks_overflow"]