예제 #1
0
def _create_dt_log_payload(context: LogsContext,
                           message_data: str) -> Optional[Dict]:
    record = json.loads(message_data)
    parsed_record = {}

    _metadata_engine.apply(context, record, parsed_record)

    parsed_timestamp = parsed_record.get(ATTRIBUTE_TIMESTAMP, None)
    if _is_log_too_old(parsed_timestamp):
        context.log(
            f"Skipping message due to too old timestamp: {parsed_timestamp}")
        context.self_monitoring.too_old_records += 1
        return None

    for attribute_key, attribute_value in parsed_record.items():
        if attribute_key not in ["content", "severity", "timestamp"
                                 ] and attribute_value:
            string_attribute_value = attribute_value
            if not isinstance(attribute_value, str):
                string_attribute_value = str(attribute_value)
            parsed_record[
                attribute_key] = string_attribute_value[:
                                                        ATTRIBUTE_VALUE_LENGTH_LIMIT]

    content = parsed_record.get(ATTRIBUTE_CONTENT, None)
    if content:
        if not isinstance(content, str):
            parsed_record[ATTRIBUTE_CONTENT] = json.dumps(
                parsed_record[ATTRIBUTE_CONTENT])
        if len(parsed_record[ATTRIBUTE_CONTENT]) >= CONTENT_LENGTH_LIMIT:
            parsed_record[ATTRIBUTE_CONTENT] = parsed_record[
                ATTRIBUTE_CONTENT][:CONTENT_LENGTH_LIMIT]
            context.self_monitoring.records_with_too_long_content += 1

    return parsed_record
def put_sfm_into_queue(context: LogsContext):
    try:
        context.sfm_queue.put_nowait(context.self_monitoring)
    except Exception as exception:
        if isinstance(exception, queue.Full):
            context.error(
                "Failed to add self-monitoring metric to queue due to full sfm queue, rejecting the sfm"
            )
def send_logs(context: LogsContext, logs: List[LogProcessingJob], batch: str):
    # pylint: disable=R0912
    context.self_monitoring = aggregate_self_monitoring_metrics(
        LogSelfMonitoring(), [log.self_monitoring for log in logs])
    context.self_monitoring.sending_time_start = time.perf_counter()
    log_ingest_url = urlparse(context.dynatrace_url +
                              "/api/v2/logs/ingest").geturl()

    try:
        encoded_body_bytes = batch.encode("UTF-8")
        context.self_monitoring.all_requests += 1
        status, reason, response = _perform_http_request(
            method="POST",
            url=log_ingest_url,
            encoded_body_bytes=encoded_body_bytes,
            headers={
                "Authorization": f"Api-Token {context.dynatrace_api_key}",
                "Content-Type": "application/json; charset=utf-8"
            })
        if status > 299:
            context.t_error(
                f'Log ingest error: {status}, reason: {reason}, url: {log_ingest_url}, body: "{response}"'
            )
            if status == 400:
                context.self_monitoring.dynatrace_connectivity.append(
                    DynatraceConnectivity.InvalidInput)
            elif status == 401:
                context.self_monitoring.dynatrace_connectivity.append(
                    DynatraceConnectivity.ExpiredToken)
            elif status == 403:
                context.self_monitoring.dynatrace_connectivity.append(
                    DynatraceConnectivity.WrongToken)
            elif status == 404 or status == 405:
                context.self_monitoring.dynatrace_connectivity.append(
                    DynatraceConnectivity.WrongURL)
            elif status == 413 or status == 429:
                context.self_monitoring.dynatrace_connectivity.append(
                    DynatraceConnectivity.TooManyRequests)
            elif status == 500:
                context.self_monitoring.dynatrace_connectivity.append(
                    DynatraceConnectivity.Other)

            raise HTTPError(log_ingest_url, status, reason, "", "")
        else:
            context.self_monitoring.dynatrace_connectivity.append(
                DynatraceConnectivity.Ok)
    except Exception as e:
        # Handle non-HTTP Errors
        if not isinstance(e, HTTPError):
            context.self_monitoring.dynatrace_connectivity.append(
                DynatraceConnectivity.Other)
        raise e
    finally:
        context.self_monitoring.calculate_sending_time()
        put_sfm_into_queue(context)
예제 #4
0
def create_logs_context(sfm_queue: Queue):
    dynatrace_api_key = get_dynatrace_api_key_from_env()
    dynatrace_url = get_dynatrace_log_ingest_url_from_env()
    project_id_owner = get_project_id_from_environment()

    return LogsContext(project_id_owner=project_id_owner,
                       dynatrace_api_key=dynatrace_api_key,
                       dynatrace_url=dynatrace_url,
                       scheduled_execution_id=str(int(time.time()))[-8:],
                       sfm_queue=sfm_queue)
    ATTRIBUTE_CLOUD_REGION: 'europe-central2',
    ATTRIBUTE_GCP_REGION: 'europe-central2',
    ATTRIBUTE_GCP_PROJECT_ID: 'dynatrace-gcp-extension',
    ATTRIBUTE_GCP_RESOURCE_TYPE: 'cloud_function',
    ATTRIBUTE_GCP_INSTANCE_NAME: 'dynatrace-gcp-function',
    ATTRIBUTE_TIMESTAMP: timestamp,
    ATTRIBUTE_CONTENT:
    "Build failed: build succeeded but did not produce the class \"com.example.Example\" specified as the function target: Error: class not found: com.example.Example; Error ID: 108a9950",
    ATTRIBUTE_DT_LOGPATH:
    'projects/dynatrace-gcp-extension/logs/cloudaudit.googleapis.com%2Factivity',
    'faas.name': 'dynatrace-gcp-function'
}

logs_context = LogsContext(project_id_owner="",
                           dynatrace_api_key="",
                           dynatrace_url="",
                           scheduled_execution_id="",
                           sfm_queue=Queue())


def test_extraction_debug_text():
    actual_output = _create_dt_log_payload(logs_context,
                                           json.dumps(debug_text_record))
    assert actual_output == debug_text_record_expected_output


def test_extraction_notice_json():
    actual_output = _create_dt_log_payload(logs_context,
                                           json.dumps(notice_json_record))
    assert actual_output == notice_json_record_expected_output