Exemple #1
0
async def async_dynatrace_gcp_extension(
        project_ids: Optional[List[str]] = None,
        services: Optional[List[GCPService]] = None):
    """
    Used in docker or for tests
    """
    timestamp_utc = datetime.utcnow()
    timestamp_utc_iso = timestamp_utc.isoformat()
    execution_identifier = hashlib.md5(
        timestamp_utc_iso.encode("UTF-8")).hexdigest()
    logging_context = LoggingContext(execution_identifier)
    logging_context.log(f'Starting execution for project(s): {project_ids}'
                        if project_ids else "Starting execution")
    event_context = {
        'timestamp': timestamp_utc_iso,
        'event_id': timestamp_utc.timestamp(),
        'event_type': 'test',
        'execution_id': execution_identifier
    }
    data = {'data': '', 'publishTime': timestamp_utc_iso}

    start_time = time.time()
    await handle_event(data, event_context, project_ids, services)
    elapsed_time = time.time() - start_time
    logging_context.log(f"Execution took {elapsed_time}\n")
Exemple #2
0
 def _load_configs(self):
     context = LoggingContext("ME startup")
     working_directory = os.path.dirname(os.path.realpath(__file__))
     config_directory = os.path.join(working_directory, "../../config_logs")
     config_files = [
         file for file in listdir(config_directory)
         if isfile(os.path.join(config_directory, file))
         and _is_json_file(file)
     ]
     for file in config_files:
         config_file_path = os.path.join(config_directory, file)
         try:
             with open(config_file_path) as config_file:
                 config_json = json.load(config_file)
                 if config_json.get("name", "") == DEFAULT_RULE_NAME:
                     self.default_rule = _create_config_rules(
                         context, config_json)[0]
                 elif config_json.get("name", "") == COMMON_RULE_NAME:
                     self.common_rule = _create_config_rules(
                         context, config_json)[0]
                 elif config_json.get("name",
                                      "").startswith(AUDIT_LOGS_RULE):
                     self.audit_logs_rules = _create_config_rules(
                         context, config_json)
                 else:
                     self.rules.extend(
                         _create_config_rules(context, config_json))
         except Exception as e:
             context.exception(
                 f"Failed to load configuration file: '{config_file_path}'")
Exemple #3
0
def generate_metadata():
    toc = []
    units = set()
    unmapped_units = set()
    # some metrics are used for multiple services and script will encounter them multiple times
    visited_metric_keys = set()
    supported_services = load_supported_services(LoggingContext(None), [])

    prepare_metric_metadata_dir()

    for supported_service in supported_services:
        print(f"\n => {supported_service.name}")
        for metric in supported_service.metrics:
            print(f"{metric.dynatrace_name}")
            if metric.dynatrace_name in visited_metric_keys:
                print(" - Already mapped, skipping")
                continue
            else:
                visited_metric_keys.add(metric.dynatrace_name)

            metadata = create_metadata(metric, unmapped_units)

            if not metadata:
                continue

            filename = write_metadata(metadata, metric)

            units.add(metric.unit)
            toc.append(filename)

    write_toc(toc)

    print(f"\nFound units: {units}")
    print(f"\nFailed to map units: {unmapped_units}")
def generate_ddu_estimation():
    supported_services = load_supported_services(LoggingContext(None), [])
    print("|| name || data points rate (/min) || estimated DDU rate (/min) (1 data point = 0.001 DDU)||")
    for supported_service in supported_services:
        data_points_per_minute = 0

        for metric in supported_service.metrics:
            dimensions_multiplier = (ASSUMED_AVG_DIMENSION_VALUES ** len(metric.dimensions))
            rate_per_minute = (metric.sample_period_seconds.seconds / 60.0)
            data_points_per_minute += rate_per_minute * dimensions_multiplier

        ddu_estimation = round(data_points_per_minute * DATA_POINT_WEIGHT, DECIMAL_PLACES)
        data_points_rate_estimation = round(data_points_per_minute, 0)
        print(f"| {supported_service.name} | {data_points_rate_estimation} | {ddu_estimation} |")
def run_ack_logs(worker_name: str, sfm_queue: Queue):
    logging_context = LoggingContext(worker_name)
    subscriber_client = pubsub.SubscriberClient()
    subscription_path = subscriber_client.subscription_path(
        LOGS_SUBSCRIPTION_PROJECT, LOGS_SUBSCRIPTION_ID)
    logging_context.log(f"Starting processing")

    worker_state = WorkerState(worker_name)
    while True:
        try:
            perform_pull(worker_state, sfm_queue, subscriber_client,
                         subscription_path)
        except Exception as e:
            logging_context.exception("Failed to pull messages")
async def test_execution_expired_token():
    expected_cluster_response_code = 401
    expected_sent_requests = 3

    response(expected_cluster_response_code, "Expired token")

    ack_queue = Queue()
    sfm_queue = Queue()
    mock_subscriber_client = MockSubscriberClient(ack_queue)

    expected_ack_ids = [f"ACK_ID_{i}" for i in range(0, 10)]

    message_data_json = json.loads(LOG_MESSAGE_DATA)
    message_data_json["timestamp"] = datetime.utcnow().isoformat() + "Z"
    fresh_message_data = json.dumps(message_data_json)

    for ack_id in expected_ack_ids:
        message = create_fake_message(ack_id=ack_id, message_data=fresh_message_data)
        mock_subscriber_client.add_message(message)

    worker_state = WorkerState("TEST")
    perform_pull(worker_state, sfm_queue, mock_subscriber_client, "")
    # Flush down rest of messages
    perform_flush(worker_state, sfm_queue, mock_subscriber_client, "")

    metadata = InstanceMetadata(
        project_id="",
        container_name="",
        token_scopes="",
        service_account="",
        audience="",
        hostname="local deployment 2",
        zone="us-east1"
    )

    self_monitoring = LogSelfMonitoring()
    await log_self_monitoring._loop_single_period(self_monitoring, sfm_queue, LoggingContext("TEST"), metadata)
    sfm_queue.join()

    assert ack_queue.qsize() == 0

    verify_requests(expected_cluster_response_code, expected_sent_requests)

    assert self_monitoring.too_old_records == 0
    assert self_monitoring.parsing_errors == 0
    assert self_monitoring.records_with_too_long_content == 0
    assert Counter(self_monitoring.dynatrace_connectivity) == {DynatraceConnectivity.ExpiredToken: 3}
    assert self_monitoring.processing_time > 0
    assert self_monitoring.sending_time > 0
def create_dimension(
    name: str, value: Any,
    context: LoggingContext = LoggingContext(None)) -> DimensionValue:
    string_value = str(value)

    if len(name) > MAX_DIMENSION_NAME_LENGTH:
        context.log(
            f'MINT rejects dimension names longer that {MAX_DIMENSION_NAME_LENGTH} chars. Dimension name \"{name}\" "has been truncated'
        )
        name = name[:MAX_DIMENSION_NAME_LENGTH]
    if len(string_value) > MAX_DIMENSION_VALUE_LENGTH:
        context.log(
            f'MINT rejects dimension values longer that {MAX_DIMENSION_VALUE_LENGTH} chars. Dimension value \"{string_value}\" has been truncated'
        )
        string_value = string_value[:MAX_DIMENSION_VALUE_LENGTH]

    return DimensionValue(name, string_value)
async def async_dynatrace_gcp_extension():
    timestamp_utc = datetime.utcnow()
    timestamp_utc_iso = timestamp_utc.isoformat()
    execution_identifier = hashlib.md5(timestamp_utc_iso.encode("UTF-8")).hexdigest()
    logging_context = LoggingContext(execution_identifier)
    logging_context.log(f"Starting execution")
    event_context = {
        'timestamp': timestamp_utc_iso,
        'event_id': timestamp_utc.timestamp(),
        'event_type': 'test',
        'execution_id': execution_identifier
    }
    data = {'data': '', 'publishTime': timestamp_utc_iso}

    start_time = time.time()
    await handle_event(data, event_context, "dynatrace-gcp-extension")
    elapsed_time = time.time() - start_time
    logging_context.log(f"Execution took {elapsed_time}\n")
async def test_empty_activation_config(mocker: MockerFixture,
                                       monkeypatch: MonkeyPatchFixture):
    # NO filestore/default configured
    monkeypatch.setenv("ACTIVATION_CONFIG", "{services: []}")

    dt_session = ClientSession()
    mocker.patch.object(dt_session, 'get', side_effect=mocked_get)

    extensions_fetcher = ExtensionsFetcher(dt_session, "", "",
                                           LoggingContext("TEST"))
    result = await extensions_fetcher.execute()
    assert_that(result).is_not_none()
    feature_sets_to_filter_conditions = {
        f"{gcp_service_config.name}/{gcp_service_config.feature_set}":
        gcp_service_config.monitoring_filter
        for gcp_service_config in result.services
    }
    assert_that(feature_sets_to_filter_conditions).is_equal_to({})
Exemple #10
0
def run_ack_logs(worker_name: str, sfm_queue: Queue):
    logging_context = LoggingContext(worker_name)
    subscriber_client = pubsub.SubscriberClient()
    subscription_path = subscriber_client.subscription_path(
        LOGS_SUBSCRIPTION_PROJECT, LOGS_SUBSCRIPTION_ID)
    logging_context.log(f"Starting processing")

    worker_state = WorkerState(worker_name)
    while True:
        try:
            perform_pull(worker_state, sfm_queue, subscriber_client,
                         subscription_path)
        except Exception as e:
            if isinstance(e, Forbidden):
                logging_context.error(
                    f"{e} Please check whether assigned service account has permission to fetch Pub/Sub messages."
                )
            else:
                logging_context.exception("Failed to pull messages")
            # Backoff for 1 minute to avoid spamming requests and logs
            time.sleep(60)
def generate_ddu_estimation():
    supported_services = load_supported_services(LoggingContext(None), [])

    print("|| Service name || Configuration || DDU per minute per instance ||")

    for supported_service in supported_services:
        data_points_per_minute = 0

        for metric in supported_service.metrics:
            dimensions_multiplier = (ASSUMED_AVG_DIMENSION_VALUES**len(
                metric.dimensions))
            rate_per_minute = (metric.sample_period_seconds.seconds / 60.0)
            data_points_per_minute += rate_per_minute * dimensions_multiplier

        ddu_estimation = round(data_points_per_minute * DATA_POINT_WEIGHT,
                               DECIMAL_PLACES)
        data_points_rate_estimation = round(data_points_per_minute, 0)
        feature_set = "/" + supported_service.feature_set

        print(
            f"| {supported_service.technology_name} | {supported_service.name}{feature_set} | {ddu_estimation} |"
        )
async def test_execute(mocker: MockerFixture, monkeypatch: MonkeyPatchFixture):
    # NO filestore/default configured
    monkeypatch.setenv("ACTIVATION_CONFIG", ACTIVATION_CONFIG)
    dt_session = ClientSession()
    mocker.patch.object(dt_session, 'get', side_effect=mocked_get)

    extensions_fetcher = ExtensionsFetcher(dt_session, "", "",
                                           LoggingContext("TEST"))
    result = await extensions_fetcher.execute()
    assert_that(result).is_not_none()
    feature_sets_to_filter_conditions = {
        f"{gcp_service_config.name}/{gcp_service_config.feature_set}":
        gcp_service_config.monitoring_filter
        for gcp_service_config in result.services
    }
    assert_that(feature_sets_to_filter_conditions).is_equal_to({
        "cloudsql_database/default_metrics":
        "",
        "gce_instance/default_metrics":
        "resource.labels.instance_name=starts_with(\"test\")",
        "gce_instance/agent":
        "resource.labels.instance_name=starts_with(\"test\")"
    })
Exemple #13
0
async def run_worker_with_messages(
    messages: List[ReceivedMessage],
    expected_ack_ids: List[str],
) -> LogSelfMonitoring:
    ack_queue = Queue()
    sfm_queue = Queue()
    mock_subscriber_client = MockSubscriberClient(ack_queue, messages)

    test_worker_state = WorkerState("TEST")
    perform_pull(test_worker_state, sfm_queue, mock_subscriber_client, "")
    # Flush down rest of messages
    perform_flush(test_worker_state, sfm_queue, mock_subscriber_client, "")

    metadata = InstanceMetadata(project_id="",
                                container_name="",
                                token_scopes="",
                                service_account="",
                                audience="",
                                hostname="local deployment 1",
                                zone="us-east1")

    self_monitoring = LogSelfMonitoring()
    await log_self_monitoring._loop_single_period(self_monitoring, sfm_queue,
                                                  LoggingContext("TEST"),
                                                  metadata)
    sfm_queue.join()

    assert ack_queue.qsize() == len(expected_ack_ids)
    while ack_queue.qsize() > 0:
        ack_id = ack_queue.get_nowait()
        assert ack_id in expected_ack_ids
        expected_ack_ids.remove(ack_id)

    assert len(expected_ack_ids) == 0

    return self_monitoring
print(",,                                        ,,,,,,,,,,,,,,,,")
print(",,,,,,,,,,,,,,,,,                        .,,,,,,,,,,,,,,,,")
print(",,,,,,,,,,,,,,,,,                        .,,,,,,,,,,,,,,,,.")
print(",,,,,,,,,,,,,,,,,       Dynatrace        .,,,,,,,,,,,,,,,,.")
print(",,,,,,,,,,,,,,,,, dynatrace-gcp-function .,,,,,,,,,,,,,,,,,")
print(",,,,,,,,,,,,,,,,,                        .,,,,,,,,,,,,,,,,,")
print(",,,,,,,,,,,,,,,,,                        ,,,,,,,,,,,,,,,,,,")
print(",,,,,,,,,,,,,,,,,                        ,,,,,,,,,,,,,,,,,,")
print(".,,,,,,,,,,,,,,,                         ,,,,,,,,,,,,,,,,,")
print(".,,,,,,,,,,,,,    .,,,,,,,,,,,,,,,,,,.   ,,,,,,,,,,,,,,,")
print(" ,,,,,,,,,,     ,,,,,,,,,,,,,,,,,,,,,,  .,,,,,,,,,,,,.")
print(" ,,,,,,,     ,,,,,,,,,,,,,,,,,,,,,,,,,  ,,,,,,,,,,,")
print(" ,,,,,    .,,,,,,,,,,,,,,,,,,,,,,,,,,.  ,,,,,,,,")
print("  ,     ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,  ,,,,,,,")
print("     ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,  ,,,,")
print("")

logging_context = LoggingContext(None)

logging_context.log(
    "Dynatrace function for Google Cloud Platform monitoring\n")

if "GCP_SERVICES" in os.environ:
    services = os.environ.get("GCP_SERVICES", "")
    print(f"Running with configured services: {services}")

logging_context.log("Setting up... \n")
loop = asyncio.get_event_loop()
loop.create_task(scheduling_loop())
loop.run_forever()
async def handle_event(event: Dict,
                       event_context,
                       project_id_owner: Optional[str],
                       projects_ids: Optional[List[str]] = None):
    if isinstance(event_context, Dict):
        context = LoggingContext(event_context.get("execution_id", None))
    else:
        context = LoggingContext(None)

    selected_services = None
    if "GCP_SERVICES" in os.environ:
        selected_services_string = os.environ.get("GCP_SERVICES", "")
        selected_services = selected_services_string.split(
            ",") if selected_services_string else []
        #set default featureset if featureset not present in env variable
        for i, service in enumerate(selected_services):
            if "/" not in service:
                selected_services[i] = f"{service}/default"

    services = load_supported_services(context, selected_services)

    async with init_gcp_client_session(
    ) as gcp_session, init_dt_client_session() as dt_session:
        setup_start_time = time.time()
        token = await create_token(context, gcp_session)

        if token is None:
            context.log(
                "Cannot proceed without authorization token, stopping the execution"
            )
            return
        if not isinstance(token, str):
            raise Exception(
                f"Failed to fetch access token, got non string value: {token}")

        context.log("Successfully obtained access token")

        if not project_id_owner:
            project_id_owner = get_project_id_from_environment()

        dynatrace_api_key = await fetch_dynatrace_api_key(
            gcp_session=gcp_session, project_id=project_id_owner, token=token)
        dynatrace_url = await fetch_dynatrace_url(gcp_session=gcp_session,
                                                  project_id=project_id_owner,
                                                  token=token)

        print_metric_ingest_input = \
            "PRINT_METRIC_INGEST_INPUT" in os.environ and os.environ["PRINT_METRIC_INGEST_INPUT"].upper() == "TRUE"

        self_monitoring_enabled = os.environ.get('SELF_MONITORING_ENABLED',
                                                 "False").upper() == "TRUE"

        context = MetricsContext(
            gcp_session=gcp_session,
            dt_session=dt_session,
            project_id_owner=project_id_owner,
            token=token,
            execution_time=datetime.utcnow(),
            execution_interval_seconds=60 * 1,
            dynatrace_api_key=dynatrace_api_key,
            dynatrace_url=dynatrace_url,
            print_metric_ingest_input=print_metric_ingest_input,
            self_monitoring_enabled=self_monitoring_enabled,
            scheduled_execution_id=context.scheduled_execution_id)

        if not projects_ids:
            projects_ids = await get_all_accessible_projects(
                context, gcp_session, token)

        setup_time = (time.time() - setup_start_time)
        context.setup_execution_time = {
            project_id: setup_time
            for project_id in projects_ids
        }

        context.start_processing_timestamp = time.time()

        process_project_metrics_tasks = [
            process_project_metrics(context, project_id, services)
            for project_id in projects_ids
        ]
        await asyncio.gather(*process_project_metrics_tasks,
                             return_exceptions=True)
        context.log(
            f"Fetched and pushed GCP data in {time.time() - context.start_processing_timestamp} s"
        )

        log_self_monitoring_data(context)
        if context.self_monitoring_enabled:
            await push_self_monitoring(context)

        await gcp_session.close()
        await dt_session.close()
async def test_execution_successful():
    expected_cluster_response_code = 200

    response(expected_cluster_response_code, "Success")

    ack_queue = Queue()
    sfm_queue = Queue()
    mock_subscriber_client = MockSubscriberClient(ack_queue)

    expected_ack_ids = [f"ACK_ID_{i}" for i in range(0, 13)]
    expected_ack_ids_of_valid_messages = [f"ACK_ID_{i}" for i in range(0, 10)]

    message_data_json = json.loads(LOG_MESSAGE_DATA)
    message_data_json["timestamp"] = datetime.utcnow().isoformat() + "Z"
    fresh_message_data = json.dumps(message_data_json)

    for ack_id in expected_ack_ids_of_valid_messages:
        message = create_fake_message(ack_id=ack_id, message_data=fresh_message_data)
        mock_subscriber_client.add_message(message)

    message_data_json["content"] = "CONTENT"*100
    too_long_content_message_data = json.dumps(message_data_json)
    message = create_fake_message(ack_id='ACK_ID_10', message_data=too_long_content_message_data)
    mock_subscriber_client.add_message(message)

    message_data_json = json.loads(LOG_MESSAGE_DATA)
    too_old_message_data = json.dumps(message_data_json)
    message = create_fake_message(ack_id='ACK_ID_11', message_data=too_old_message_data)
    mock_subscriber_client.add_message(message)

    invalid_message_data_json = json.loads(INVALID_LOG_MESSAGE_DATA)
    invalid_message_data = json.dumps(invalid_message_data_json)
    message = create_fake_message(ack_id='ACK_ID_12', message_data=invalid_message_data)
    mock_subscriber_client.add_message(message)

    worker_state = WorkerState("TEST")
    perform_pull(worker_state, sfm_queue, mock_subscriber_client, "")
    # Flush down rest of messages
    perform_flush(worker_state, sfm_queue, mock_subscriber_client, "")

    metadata = InstanceMetadata(
        project_id="",
        container_name="",
        token_scopes="",
        service_account="",
        audience="",
        hostname="local deployment 1",
        zone="us-east1"
    )

    self_monitoring = LogSelfMonitoring()
    await log_self_monitoring._loop_single_period(self_monitoring, sfm_queue, LoggingContext("TEST"), metadata)
    sfm_queue.join()

    assert ack_queue.qsize() == len(expected_ack_ids)
    while ack_queue.qsize() > 0:
        ack_id = ack_queue.get_nowait()
        assert ack_id in expected_ack_ids
        expected_ack_ids.remove(ack_id)

    verify_requests(expected_cluster_response_code, 3)

    assert self_monitoring.too_old_records == 1
    assert self_monitoring.parsing_errors == 1
    assert self_monitoring.records_with_too_long_content == 1
    assert Counter(self_monitoring.dynatrace_connectivity) == {DynatraceConnectivity.Ok: 3}
    assert self_monitoring.processing_time > 0
    assert self_monitoring.sending_time > 0
    assert self_monitoring.sent_logs_entries == 11
Exemple #17
0
async def handle_event(event: Dict,
                       event_context,
                       projects_ids: Optional[List[str]] = None,
                       services: Optional[List[GCPService]] = None):
    if isinstance(event_context, Dict):
        # for k8s installation
        context = LoggingContext(event_context.get("execution_id", None))
    else:
        context = LoggingContext(None)

    if not services:
        # load services for GCP Function
        services = load_supported_services(context)

    async with init_gcp_client_session(
    ) as gcp_session, init_dt_client_session() as dt_session:
        setup_start_time = time.time()
        token = await create_token(context, gcp_session)

        if token is None:
            context.log(
                "Cannot proceed without authorization token, stopping the execution"
            )
            return
        if not isinstance(token, str):
            raise Exception(
                f"Failed to fetch access token, got non string value: {token}")

        context.log("Successfully obtained access token")

        project_id_owner = get_project_id_from_environment()

        dynatrace_api_key = await fetch_dynatrace_api_key(
            gcp_session=gcp_session, project_id=project_id_owner, token=token)
        dynatrace_url = await fetch_dynatrace_url(gcp_session=gcp_session,
                                                  project_id=project_id_owner,
                                                  token=token)
        check_version(logging_context=context)
        await check_dynatrace(logging_context=context,
                              project_id=project_id_owner,
                              dt_session=dt_session,
                              dynatrace_url=dynatrace_url,
                              dynatrace_access_key=dynatrace_api_key)
        query_interval_min = get_query_interval_minutes()

        print_metric_ingest_input = os.environ.get(
            "PRINT_METRIC_INGEST_INPUT", "FALSE").upper() in ["TRUE", "YES"]
        self_monitoring_enabled = os.environ.get(
            'SELF_MONITORING_ENABLED', "FALSE").upper() in ["TRUE", "YES"]

        context = MetricsContext(
            gcp_session=gcp_session,
            dt_session=dt_session,
            project_id_owner=project_id_owner,
            token=token,
            execution_time=datetime.utcnow(),
            execution_interval_seconds=60 * query_interval_min,
            dynatrace_api_key=dynatrace_api_key,
            dynatrace_url=dynatrace_url,
            print_metric_ingest_input=print_metric_ingest_input,
            self_monitoring_enabled=self_monitoring_enabled,
            scheduled_execution_id=context.scheduled_execution_id)

        if not projects_ids:
            projects_ids = await get_all_accessible_projects(
                context, gcp_session, token)

        disabled_apis = {}
        disabled_projects = []
        for project_id in projects_ids:
            await check_x_goog_user_project_header_permissions(
                context, project_id)
            disabled_apis = {
                project_id: await get_all_disabled_apis(context, project_id)
            }
            if 'monitoring.googleapis.com' in disabled_apis[project_id]:
                disabled_projects.append(project_id)

        if disabled_projects:
            context.log(
                f"monitoring.googleapis.com API disabled in the projects: " +
                ", ".join(disabled_projects) +
                ", that projects will not be monitored")
            for disabled_project in disabled_projects:
                projects_ids.remove(disabled_project)

        setup_time = (time.time() - setup_start_time)
        context.setup_execution_time = {
            project_id: setup_time
            for project_id in projects_ids
        }

        context.start_processing_timestamp = time.time()

        process_project_metrics_tasks = [
            process_project_metrics(context, project_id, services,
                                    disabled_apis.get(project_id, set()))
            for project_id in projects_ids
        ]
        await asyncio.gather(*process_project_metrics_tasks,
                             return_exceptions=True)
        context.log(
            f"Fetched and pushed GCP data in {time.time() - context.start_processing_timestamp} s"
        )

        log_self_monitoring_data(context)
        if context.self_monitoring_enabled:
            await push_self_monitoring(context)

        await gcp_session.close()
        await dt_session.close()
async def handle_event(event: Dict, event_context, project_id_owner: Optional[str]):
    if event_context is Dict:
        context = LoggingContext(event_context.get("execution_id", None))
    else:
        context = LoggingContext(None)

    selected_services = None
    if "GCP_SERVICES" in os.environ:
        selected_services_string = os.environ.get("GCP_SERVICES", "")
        selected_services = selected_services_string.split(",") if selected_services_string else []
    services = load_supported_services(context, selected_services)

    async with aiohttp.ClientSession() as session:
        setup_start_time = time.time()
        token = await create_token(context, session)

        if token is None:
            context.log("Cannot proceed without authorization token, stopping the execution")
            return
        if not isinstance(token, str):
            raise Exception(f"Failed to fetch access token, got non string value: {token}")

        context.log("Successfully obtained access token")

        if not project_id_owner:
            project_id_owner = get_project_id_from_environment()

        dynatrace_api_key = await fetch_dynatrace_api_key(session=session, project_id=project_id_owner, token=token)
        dynatrace_url = await fetch_dynatrace_url(session=session, project_id=project_id_owner, token=token)

        print_metric_ingest_input = \
            "PRINT_METRIC_INGEST_INPUT" in os.environ and os.environ["PRINT_METRIC_INGEST_INPUT"].upper() == "TRUE"

        context = Context(
            session=session,
            project_id_owner=project_id_owner,
            token=token,
            execution_time=datetime.utcnow(),
            execution_interval_seconds=60 * 1,
            dynatrace_api_key=dynatrace_api_key,
            dynatrace_url=dynatrace_url,
            print_metric_ingest_input=print_metric_ingest_input,
            scheduled_execution_id=context.scheduled_execution_id
        )

        projects_ids = await get_all_accessible_projects(context, session)

        context.setup_execution_time = (time.time() - setup_start_time)

        fetch_gcp_data_start_time = time.time()

        fetch_ingest_lines_tasks = [fetch_ingest_lines_task(context, project_id, services) for project_id in projects_ids]
        ingest_lines_per_project = await asyncio.gather(*fetch_ingest_lines_tasks, return_exceptions=True)
        ingest_lines = [ingest_line for sublist in ingest_lines_per_project for ingest_line in sublist]

        context.fetch_gcp_data_execution_time = time.time() - fetch_gcp_data_start_time

        context.log(f"Fetched GCP data in {context.fetch_gcp_data_execution_time} s")

        await push_ingest_lines(context, ingest_lines)
        await push_self_monitoring_time_series(context)

        await session.close()
Exemple #19
0
#
#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
from lib.context import LoggingContext
from lib.logs.metadata_engine import SourceMatcher, _create_config_rule

context = LoggingContext("TEST")


def test_resource_type_eq_source_matcher():
    matcher = SourceMatcher(context, "resourceType", "$eq('TEST')")
    assert matcher.match({}, {"gcp.resource.type": "TEST"})
    assert not matcher.match({}, {"gcp.resource.type": "NOT_TEST"})
    assert not matcher.match({}, {})


def test_resource_type_contains_source_matcher():
    matcher = SourceMatcher(context, "resourceType", "$contains('TEST')")
    assert matcher.match({}, {"gcp.resource.type": "GCP_TEST"})
    assert not matcher.match({}, {"gcp.resource.type": "BEST"})
    assert not matcher.match({}, {})