def test_self_monitoring_metrics_with_zero_values(): self_monitoring = SelfMonitoring(execution_time=execution_time) self_monitoring.dynatrace_connectivities = [DynatraceConnectivity.Ok] self_monitoring.too_old_records = 0 self_monitoring.parsing_errors = 0 self_monitoring.all_requests = 1 self_monitoring.processing_time = 0.0878758430480957 self_monitoring.sending_time = 0.3609178066253662 self_monitoring.too_long_content_size = [] metric_data = self_monitoring.prepare_metric_data() assert metric_data == expected_metric_data_without_zeros_metrics
def test_all_self_monitoring_metrics(): self_monitoring = SelfMonitoring(execution_time=execution_time) self_monitoring.dynatrace_connectivities = [ DynatraceConnectivity.Other, DynatraceConnectivity.Other, DynatraceConnectivity.TooManyRequests ] self_monitoring.too_old_records = 6 self_monitoring.parsing_errors = 3 self_monitoring.all_requests = 3 self_monitoring.processing_time = 0.0878758430480957 self_monitoring.sending_time = 0.3609178066253662 self_monitoring.too_long_content_size = [2000, 5000, 6000, 40000] metric_data = self_monitoring.prepare_metric_data() assert metric_data == all_expected_metric_data
def test_content_with_exact_len_not_trimmed(): message = "WALTHAM, Mass.--(BUSINESS WIRE)-- Software intelligence company Dynatrace (NYSE: DT)" content_length_limit_backup = logs_ingest.main.content_length_limit # given log_entry = create_log_entry(message) logs_ingest.main.content_length_limit = len(json.dumps(log_entry)) # when try: actual_output = parse_record( log_entry, SelfMonitoring(execution_time=datetime.utcnow())) finally: # restore original value logs_ingest.main.content_length_limit = content_length_limit_backup # then expected_output = { "cloud.provider": "Azure", "severity": "INFO", "content": '{"content": "WALTHAM, Mass.--(BUSINESS WIRE)-- Software intelligence company Dynatrace (NYSE: DT)"}' } assert actual_output == expected_output
def send_logs(dynatrace_url: str, dynatrace_token: str, logs: List[Dict], self_monitoring: SelfMonitoring): # pylint: disable=R0912 start_time = time.perf_counter() log_ingest_url = urlparse(dynatrace_url.rstrip("/") + "/api/v2/logs/ingest").geturl() batches = prepare_serialized_batches(logs) number_of_http_errors = 0 for batch in batches: batch_logs = batch[0] number_of_logs_in_batch = batch[1] encoded_body_bytes = batch_logs.encode("UTF-8") display_payload_size = round((len(encoded_body_bytes) / 1024), 3) logging.info(f'Log ingest payload size: {display_payload_size} kB') sent = False try: sent = _send_logs(dynatrace_token, encoded_body_bytes, log_ingest_url, self_monitoring, sent) except HTTPError as e: raise e except Exception as e: logging.exception("Failed to ingest logs", "ingesting-logs-exception") self_monitoring.dynatrace_connectivities.append(DynatraceConnectivity.Other) number_of_http_errors += 1 # all http requests failed and this is the last batch, raise this exception to trigger retry if number_of_http_errors == len(batches): raise e finally: self_monitoring.sending_time = time.perf_counter() - start_time if sent: self_monitoring.log_ingest_payload_size += display_payload_size self_monitoring.sent_log_entries += number_of_logs_in_batch
def test_content_trimmed(): content_length_limit = 100 content_length_limit_backup = logs_ingest.main.content_length_limit # given log_entry = create_log_entry(log_message) logs_ingest.main.content_length_limit = content_length_limit # when try: actual_output = parse_record( log_entry, SelfMonitoring(execution_time=datetime.utcnow())) finally: # restore original value logs_ingest.main.content_length_limit = content_length_limit_backup # then expected_content = "{\"content\": \"WALTHAM, Mass.--(BUSINESS WIRE)-- Software intelligence company Dynatrace (N[TRUNCATED]" assert len(actual_output["content"]) == content_length_limit assert actual_output["content"] == expected_content
def test_log_forwarder_setup(): cloud_log_forwarder_backup = logs_ingest.main.cloud_log_forwarder logs_ingest.main.cloud_log_forwarder = "MyLogForwarderSetup" # given test_record = { "cloud.provider": "Azure", "severity": "INFO", "content": '{"content": "WALTHAM, Mass.--(BUSINESS WIRE)-- Software intelligence company Dynatrace (NYSE: DT)"}' } # when try: actual_output = parse_record( test_record, SelfMonitoring(execution_time=datetime.utcnow())) finally: logs_ingest.main.cloud_log_forwarder = cloud_log_forwarder_backup # then assert actual_output['cloud.log_forwarder'] == "MyLogForwarderSetup"
def self_monitoring(): return SelfMonitoring(execution_time=datetime.utcnow())
def test_trimming_attribute_values(monkeypatch: MonkeyPatchFixture): monkeypatch.setattr(main, 'attribute_value_length_limit', 4) actual_output = main.parse_record( record, SelfMonitoring(execution_time=datetime.utcnow())) assert actual_output == expected_output_attribute_values_trimmed
def test_default(): actual_output = main.parse_record( record, SelfMonitoring(execution_time=datetime.utcnow())) assert actual_output == expected_output
def test_not_known_category(): actual_output = parse_record( not_known_category_record, SelfMonitoring(execution_time=datetime.utcnow())) assert actual_output == not_known_category_expected_output
def test_function_app_logs(): actual_output = parse_record( function_app_logs_record, SelfMonitoring(execution_time=datetime.utcnow())) assert actual_output == function_app_logs_expected_output
def test_api_management_service(): actual_output = parse_record( record, SelfMonitoring(execution_time=datetime.utcnow())) assert actual_output == expected_output
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import time from datetime import datetime from logs_ingest.dynatrace_client import send_logs # script for testing sending implementation and logs ingest endpoint responses, # loads dynatrace URL and token from local.settings.json from logs_ingest.self_monitoring import SelfMonitoring source_directory = os.path.dirname(os.path.realpath(__file__)) local_settings_json_path = os.path.join(source_directory, "../../local.settings.json") with open(local_settings_json_path) as local_settings_json_file: local_settings_json = json.load(local_settings_json_file) logs = [ { "cloud.provider": "Azure", "timestamp": time.time(), "content": "TOO_LONG" * 8192 } for i in range(1) ] send_logs(local_settings_json["Values"]["DYNATRACE_URL"], local_settings_json["Values"]["DYNATRACE_ACCESS_KEY"], logs, SelfMonitoring(execution_time=datetime.utcnow()))
def test_event_hub_namespace(): actual_output = parse_record( record, SelfMonitoring(execution_time=datetime.utcnow())) assert actual_output == expected_output
def test_kube_audit(): output = parse_record(kube_audit_record, SelfMonitoring(execution_time=datetime.utcnow())) assert output == kube_audit_expected_output
def test_kube_controller_manager(): output = parse_record(kube_controller_manager_record, SelfMonitoring(execution_time=datetime.utcnow())) assert output == kube_controller_manager_expected_output
def send_logs(dynatrace_url: str, dynatrace_token: str, logs: List[Dict], self_monitoring: SelfMonitoring): # pylint: disable=R0912 start_time = time.time() log_ingest_url = urlparse(dynatrace_url + "/api/v2/logs/ingest").geturl() batches = prepare_serialized_batches(logs) number_of_http_errors = 0 for batch in batches: try: encoded_body_bytes = batch.encode("UTF-8") logging.info('Log ingest payload size: {} kB'.format( round((len(encoded_body_bytes) / 1024), 3))) self_monitoring.all_requests += 1 status, reason, response = _perform_http_request( method="POST", url=log_ingest_url, encoded_body_bytes=encoded_body_bytes, headers={ "Authorization": f"Api-Token {dynatrace_token}", "Content-Type": "application/json; charset=utf-8" }) if status > 299: logging.error( f'Log ingest error: {status}, reason: {reason}, url: {log_ingest_url}, body: "{response}"' ) if status == 400: self_monitoring.dynatrace_connectivities.append( DynatraceConnectivity.InvalidInput) elif status == 401: self_monitoring.dynatrace_connectivities.append( DynatraceConnectivity.ExpiredToken) elif status == 403: self_monitoring.dynatrace_connectivities.append( DynatraceConnectivity.WrongToken) elif status in (404, 405): self_monitoring.dynatrace_connectivities.append( DynatraceConnectivity.WrongURL) elif status in (413, 429): self_monitoring.dynatrace_connectivities.append( DynatraceConnectivity.TooManyRequests) raise HTTPError(log_ingest_url, 429, "Dynatrace throttling response", "", "") elif status == 500: self_monitoring.dynatrace_connectivities.append( DynatraceConnectivity.Other) raise HTTPError(log_ingest_url, 500, "Dynatrace server error", "", "") else: self_monitoring.dynatrace_connectivities.append( DynatraceConnectivity.Ok) logging.info("Log ingest payload pushed successfully") except HTTPError as e: raise e except Exception as e: logging.exception("Failed to ingest logs") self_monitoring.dynatrace_connectivities.append( DynatraceConnectivity.Other) number_of_http_errors += 1 # all http requests failed and this is the last batch, raise this exception to trigger retry if number_of_http_errors == len(batches): raise e finally: self_monitoring.sending_time = time.time() - start_time