def test_put(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "bar")) as stor: stor.put(test_input) self.assertEqual(stor.get().get(), test_input) with LocalFileStorage(os.path.join(TEST_FOLDER, "bar")) as stor: self.assertEqual(stor.get().get(), test_input) with mock.patch("os.rename", side_effect=throw(Exception)): self.assertIsNone(stor.put(test_input))
def test_check_storage_size_links(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "asd4"), 1000) as stor: stor.put(test_input) with mock.patch("os.path.islink") as os_mock: os_mock.return_value = True self.assertTrue(stor._check_storage_size())
def __init__(self, **kwargs: Any) -> None: """Azure Monitor base exporter for OpenTelemetry. :keyword str api_version: The service API version used. Defaults to latest. :rtype: None """ parsed_connection_string = ConnectionStringParser( kwargs.get('connection_string')) self._instrumentation_key = parsed_connection_string.instrumentation_key self._timeout = 10.0 # networking timeout in seconds self._api_version = kwargs.get('api_version') or _SERVICE_API_LATEST self._consecutive_redirects = 0 # To prevent circular redirects temp_suffix = self._instrumentation_key or "" default_storage_path = os.path.join(tempfile.gettempdir(), _TEMPDIR_PREFIX + temp_suffix) config = AzureMonitorClientConfiguration( parsed_connection_string.endpoint, **kwargs) policies = [ RequestIdPolicy(**kwargs), config.headers_policy, config.user_agent_policy, config.proxy_policy, ContentDecodePolicy(**kwargs), # Handle redirects in exporter, set new endpoint if redirected RedirectPolicy(permit_redirects=False), config.retry_policy, config.authentication_policy, config.custom_hook_policy, config.logging_policy, # Explicitly disabling to avoid infinite loop of Span creation when data is exported # DistributedTracingPolicy(**kwargs), config.http_logging_policy or HttpLoggingPolicy(**kwargs) ] self.client = AzureMonitorClient( host=parsed_connection_string.endpoint, connection_timeout=self._timeout, policies=policies, **kwargs) self.storage = LocalFileStorage( path=default_storage_path, max_size=50 * 1024 * 1024, # Maximum size in bytes. maintenance_period=60, # Maintenance interval in seconds. retention_period=7 * 24 * 60 * 60, # Retention period in seconds )
def test_check_storage_size_error(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "asd5"), 1) as stor: with mock.patch("os.path.getsize", side_effect=throw(OSError)): stor.put(test_input) with mock.patch("os.path.islink") as os_mock: os_mock.return_value = True self.assertTrue(stor._check_storage_size())
def test_put_max_size(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "asd")) as stor: size_mock = mock.Mock() size_mock.return_value = False stor._check_storage_size = size_mock stor.put(test_input) self.assertEqual(stor.get(), None)
def test_get(self): now = _now() with LocalFileStorage(os.path.join(TEST_FOLDER, "foo")) as stor: stor.put((1, 2, 3), lease_period=10) with mock.patch("azure.monitor.opentelemetry.exporter._storage._now") as m: m.return_value = now - _seconds(30 * 24 * 60 * 60) stor.put((1, 2, 3)) stor.put((1, 2, 3), lease_period=10) with mock.patch("os.rename"): stor.put((1, 2, 3)) with mock.patch("os.rename"): stor.put((1, 2, 3)) with mock.patch("os.remove", side_effect=throw(Exception)): with mock.patch("os.rename", side_effect=throw(Exception)): self.assertIsNone(stor.get()) self.assertIsNone(stor.get())
class BaseExporter: """Azure Monitor base exporter for OpenTelemetry.""" def __init__(self, **kwargs: Any) -> None: """Azure Monitor base exporter for OpenTelemetry. :keyword str api_version: The service API version used. Defaults to latest. :rtype: None """ parsed_connection_string = ConnectionStringParser(kwargs.get('connection_string')) self._instrumentation_key = parsed_connection_string.instrumentation_key self._timeout = 10.0 # networking timeout in seconds self._api_version = kwargs.get('api_version') or _SERVICE_API_LATEST self._consecutive_redirects = 0 # To prevent circular redirects config = AzureMonitorClientConfiguration( parsed_connection_string.endpoint, **kwargs) policies = [ RequestIdPolicy(**kwargs), config.headers_policy, config.user_agent_policy, config.proxy_policy, ContentDecodePolicy(**kwargs), # Handle redirects in exporter, set new endpoint if redirected RedirectPolicy(permit_redirects=False), config.retry_policy, config.authentication_policy, config.custom_hook_policy, config.logging_policy, # Explicitly disabling to avoid infinite loop of Span creation when data is exported # DistributedTracingPolicy(**kwargs), config.http_logging_policy or HttpLoggingPolicy(**kwargs) ] self.client = AzureMonitorClient( host=parsed_connection_string.endpoint, connection_timeout=self._timeout, policies=policies, **kwargs) temp_suffix = self._instrumentation_key or "" default_storage_path = os.path.join( tempfile.gettempdir(), _TEMPDIR_PREFIX + temp_suffix ) self.storage = LocalFileStorage( path=default_storage_path, max_size=50 * 1024 * 1024, # Maximum size in bytes. maintenance_period=60, # Maintenance interval in seconds. retention_period=7 * 24 * 60 * 60, # Retention period in seconds ) def _transmit_from_storage(self) -> None: for blob in self.storage.gets(): # give a few more seconds for blob lease operation # to reduce the chance of race (for perf consideration) if blob.lease(self._timeout + 5): envelopes = [TelemetryItem(**x) for x in blob.get()] result = self._transmit(list(envelopes)) if result == ExportResult.FAILED_RETRYABLE: blob.lease(1) else: blob.delete() # pylint: disable=too-many-branches # pylint: disable=too-many-nested-blocks # pylint: disable=too-many-return-statements def _transmit(self, envelopes: List[TelemetryItem]) -> ExportResult: """ Transmit the data envelopes to the ingestion service. Returns an ExportResult, this function should never throw an exception. """ if len(envelopes) > 0: try: track_response = self.client.track(envelopes) if not track_response.errors: self._consecutive_redirects = 0 logger.info("Transmission succeeded: Item received: %s. Items accepted: %s", track_response.items_received, track_response.items_accepted) return ExportResult.SUCCESS resend_envelopes = [] for error in track_response.errors: if _is_retryable_code(error.status_code): resend_envelopes.append( envelopes[error.index] ) else: logger.error( "Data drop %s: %s %s.", error.status_code, error.message, envelopes[error.index] if error.index is not None else "", ) if resend_envelopes: envelopes_to_store = [x.as_dict() for x in resend_envelopes] self.storage.put(envelopes_to_store) self._consecutive_redirects = 0 return ExportResult.FAILED_RETRYABLE except HttpResponseError as response_error: if _is_retryable_code(response_error.status_code): return ExportResult.FAILED_RETRYABLE if _is_redirect_code(response_error.status_code): self._consecutive_redirects = self._consecutive_redirects + 1 if self._consecutive_redirects < self.client._config.redirect_policy.max_redirects: # pylint: disable=W0212 if response_error.response and response_error.response.headers: location = response_error.response.headers.get("location") if location: url = urlparse(location) if url.scheme and url.netloc: # Change the host to the new redirected host self.client._config.host = "{}://{}".format(url.scheme, url.netloc) # pylint: disable=W0212 # Attempt to export again return self._transmit(envelopes) logger.error( "Error parsing redirect information." ) return ExportResult.FAILED_NOT_RETRYABLE logger.error( "Error sending telemetry because of circular redirects." \ "Please check the integrity of your connection string." ) return ExportResult.FAILED_NOT_RETRYABLE return ExportResult.FAILED_NOT_RETRYABLE except ServiceRequestError as request_error: # Errors when we're fairly sure that the server did not receive the # request, so it should be safe to retry. logger.warning( "Retrying due to server request error: %s.", request_error ) return ExportResult.FAILED_RETRYABLE except Exception as ex: logger.error( "Envelopes could not be exported and are not retryable: %s.", ex ) return ExportResult.FAILED_NOT_RETRYABLE return ExportResult.FAILED_NOT_RETRYABLE # No spans to export self._consecutive_redirects = 0 return ExportResult.SUCCESS
def test_get_nothing(self): with LocalFileStorage(os.path.join(TEST_FOLDER, "test", "a")) as stor: pass with LocalFileStorage(os.path.join(TEST_FOLDER, "test")) as stor: self.assertIsNone(stor.get())
def test_maintanence_routine(self): with mock.patch("os.makedirs") as m: with LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) as stor: m.return_value = None with mock.patch("os.makedirs", side_effect=throw(Exception)): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) stor.close() with mock.patch("os.listdir", side_effect=throw(Exception)): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) stor.close() with LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) as stor: with mock.patch("os.listdir", side_effect=throw(Exception)): stor._maintenance_routine() with mock.patch("os.path.isdir", side_effect=throw(Exception)): stor._maintenance_routine()
def test_check_storage_size_no_files(self): with LocalFileStorage(os.path.join(TEST_FOLDER, "asd3"), 1000) as stor: self.assertTrue(stor._check_storage_size())
def test_check_storage_size_not_full(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "asd3"), 1000) as stor: stor.put(test_input) self.assertTrue(stor._check_storage_size())