async def get_dynatrace_token_metadata(dt_session: ClientSession, context: LoggingContext, dynatrace_url: str, dynatrace_api_key: str, timeout: Optional[int] = 2) -> dict: try: response = await dt_session.post( url=f"{dynatrace_url.rstrip('/')}/api/v1/tokens/lookup", headers={ "Authorization": f"Api-Token {dynatrace_api_key}", "Content-Type": "application/json; charset=utf-8" }, json={"token": dynatrace_api_key}, verify_ssl=get_should_require_valid_certificate(), timeout=timeout) if response.status != 200: context.log( f'Unable to get Dynatrace token metadata: {response.status}, url: {response.url}, reason: {response.reason}' ) return {} return await response.json() except Exception as e: context.log( f'Unable to get Dynatrace token metadata. Error details: {e}') return {}
async def async_dynatrace_gcp_extension( project_ids: Optional[List[str]] = None, services: Optional[List[GCPService]] = None): """ Used in docker or for tests """ timestamp_utc = datetime.utcnow() timestamp_utc_iso = timestamp_utc.isoformat() execution_identifier = hashlib.md5( timestamp_utc_iso.encode("UTF-8")).hexdigest() logging_context = LoggingContext(execution_identifier) logging_context.log(f'Starting execution for project(s): {project_ids}' if project_ids else "Starting execution") event_context = { 'timestamp': timestamp_utc_iso, 'event_id': timestamp_utc.timestamp(), 'event_type': 'test', 'execution_id': execution_identifier } data = {'data': '', 'publishTime': timestamp_utc_iso} start_time = time.time() await handle_event(data, event_context, project_ids, services) elapsed_time = time.time() - start_time logging_context.log(f"Execution took {elapsed_time}\n")
async def _loop_single_period(self_monitoring: LogSelfMonitoring, sfm_queue: Queue, context: LoggingContext, instance_metadata: InstanceMetadata): try: sfm_list = _pull_sfm(sfm_queue) if sfm_list: async with init_gcp_client_session() as gcp_session: context = await _create_sfm_logs_context( sfm_queue, context, gcp_session, instance_metadata) self_monitoring = aggregate_self_monitoring_metrics( self_monitoring, sfm_list) _log_self_monitoring_data(self_monitoring, context) if context.self_monitoring_enabled: if context.token is None: context.log( "Cannot proceed without authorization token, failed to send log self monitoring" ) return if not isinstance(context.token, str): context.log( f"Failed to fetch access token, got non string value: {context.token}" ) return time_series = create_self_monitoring_time_series( self_monitoring, context) await push_self_monitoring_time_series( context, time_series) for _ in sfm_list: sfm_queue.task_done() except Exception: context.exception("Log SFM Loop Exception:")
def load_supported_services(context: LoggingContext, selected_services: List[str]) -> List[GCPService]: working_directory = os.path.dirname(os.path.realpath(__file__)) config_directory = os.path.join(working_directory, "config") config_files = [ file for file in listdir(config_directory) if isfile(os.path.join(config_directory, file)) and is_yaml_file(file) ] services = [] for file in config_files: config_file_path = os.path.join(config_directory, file) try: with open(config_file_path, encoding="utf-8") as config_file: config_yaml = yaml.safe_load(config_file) technology_name = extract_technology_name(config_yaml) for service_yaml in config_yaml.get("gcp", {}): # If whitelist of services exists and current service is not present in it, skip should_skip = selected_services and \ (service_yaml.get("service", "None") not in selected_services) if should_skip: continue services.append(GCPService(tech_name=technology_name, **service_yaml)) except Exception as error: context.log(f"Failed to load configuration file: '{config_file_path}'. Error details: {error}") continue services_names = [service.name for service in services] context.log("Selected services: " + ",".join(services_names)) return services
def _create_config_rule(context: LoggingContext, entity_name: str, rule_json: Dict) -> Optional[ConfigRule]: sources_json = rule_json.get("sources", []) if entity_name not in SPECIAL_RULE_NAMES and not sources_json: context.log(f"Encountered invalid rule with missing sources for config entry named {entity_name}") return None sources = _create_sources(context, sources_json) if entity_name not in SPECIAL_RULE_NAMES and not sources: context.log(f"Encountered invalid rule with invalid sources for config entry named {entity_name}: {sources_json}") return None attributes = _create_attributes(context, rule_json.get("attributes", [])) return ConfigRule(entity_type_name=entity_name, source_matchers=sources, attributes=attributes)
async def get_all_accessible_projects(context: LoggingContext, session: ClientSession, token: str): url = "https://cloudresourcemanager.googleapis.com/v1/projects" headers = {"Authorization": "Bearer {token}".format(token=token)} response = await session.get(url, headers=headers) response_json = await response.json() all_projects = [ project["projectId"] for project in response_json.get("projects", []) ] context.log("Access to following projects: " + ", ".join(all_projects)) return all_projects
def _check_configuration_flags(logging_context: LoggingContext, flags_to_check: List[str]): configuration_flag_values = [] for key in flags_to_check: value = os.environ.get(key, None) if value is None: configuration_flag_values.append(f"{key} is None") else: configuration_flag_values.append(f"{key} = '{value}'") logging_context.log( f"Found configuration flags: {', '.join(configuration_flag_values)}")
def _create_attributes(context: LoggingContext, attributes_json: List[Dict]) -> List[Attribute]: result = [] for source_json in attributes_json: key = source_json.get("key", None) pattern = source_json.get("pattern", None) if key and pattern: result.append(Attribute(key, pattern)) else: context.log(f"Encountered invalid rule attribute with missing parameter, parameters were: key = {key}, pattern = {pattern}") return result
def run_ack_logs(worker_name: str, sfm_queue: Queue): logging_context = LoggingContext(worker_name) subscriber_client = pubsub.SubscriberClient() subscription_path = subscriber_client.subscription_path( LOGS_SUBSCRIPTION_PROJECT, LOGS_SUBSCRIPTION_ID) logging_context.log(f"Starting processing") worker_state = WorkerState(worker_name) while True: try: perform_pull(worker_state, sfm_queue, subscriber_client, subscription_path) except Exception as e: logging_context.exception("Failed to pull messages")
async def get_all_accessible_projects(context: LoggingContext, session: ClientSession, token: str): url = _CLOUD_RESOURCE_MANAGER_ROOT + "/projects?filter=lifecycleState%3AACTIVE" headers = {"Authorization": "Bearer {token}".format(token=token)} response = await session.get(url, headers=headers) response_json = await response.json() all_projects = [ project["projectId"] for project in response_json.get("projects", []) ] if all_projects: context.log("Access to following projects: " + ", ".join(all_projects)) else: context.log( "There is no access to any projects. Check service account configuration." ) return all_projects
def create_dimension( name: str, value: Any, context: LoggingContext = LoggingContext(None)) -> DimensionValue: string_value = str(value) if len(name) > MAX_DIMENSION_NAME_LENGTH: context.log( f'MINT rejects dimension names longer that {MAX_DIMENSION_NAME_LENGTH} chars. Dimension name \"{name}\" "has been truncated' ) name = name[:MAX_DIMENSION_NAME_LENGTH] if len(string_value) > MAX_DIMENSION_VALUE_LENGTH: context.log( f'MINT rejects dimension values longer that {MAX_DIMENSION_VALUE_LENGTH} chars. Dimension value \"{string_value}\" has been truncated' ) string_value = string_value[:MAX_DIMENSION_VALUE_LENGTH] return DimensionValue(name, string_value)
def __init__(self, context: LoggingContext, source: str, condition: str): self.source = source self.condition = condition for key in _CONDITION_COMPARATOR_MAP: if condition.startswith(key): self._evaluator = _CONDITION_COMPARATOR_MAP[key] break operands = re.findall(r"'(.*?)'", condition, re.DOTALL) self._operand = operands[0] if operands else None self._source_value_extractor = _SOURCE_VALUE_EXTRACTOR_MAP.get(source.casefold(), None) if not self._source_value_extractor: context.log(f"Unsupported source type: '{source}'") self.valid = False if not self._evaluator or not self._operand: context.log(f"Failed to parse condition macro for expression: '{condition}'") self.valid = False
async def create_token(context: LoggingContext, session: ClientSession): credentials_path = os.environ[ 'GOOGLE_APPLICATION_CREDENTIALS'] if 'GOOGLE_APPLICATION_CREDENTIALS' in os.environ.keys( ) else "" if credentials_path: context.log(f"Using credentials from {credentials_path}") with open(credentials_path) as key_file: credentials_data = json.load(key_file) return await get_token(key=credentials_data['private_key'], service=credentials_data['client_email'], uri=credentials_data['token_uri'], session=session) else: context.log("Trying to use default service account") return await create_default_service_account_token(context, session)
async def async_dynatrace_gcp_extension(): timestamp_utc = datetime.utcnow() timestamp_utc_iso = timestamp_utc.isoformat() execution_identifier = hashlib.md5(timestamp_utc_iso.encode("UTF-8")).hexdigest() logging_context = LoggingContext(execution_identifier) logging_context.log(f"Starting execution") event_context = { 'timestamp': timestamp_utc_iso, 'event_id': timestamp_utc.timestamp(), 'event_type': 'test', 'execution_id': execution_identifier } data = {'data': '', 'publishTime': timestamp_utc_iso} start_time = time.time() await handle_event(data, event_context, "dynatrace-gcp-extension") elapsed_time = time.time() - start_time logging_context.log(f"Execution took {elapsed_time}\n")
def _create_sources(context: LoggingContext, sources_json: List[Dict]) -> List[SourceMatcher]: result = [] for source_json in sources_json: source = source_json.get("source", None) condition = source_json.get("condition", None) source_matcher = None if source and condition: source_matcher = SourceMatcher(context, source, condition) if source_matcher and source_matcher.valid: result.append(source_matcher) else: context.log(f"Encountered invalid rule source, parameters were: source= {source}, condition = {condition}") return [] return result
def run_ack_logs(worker_name: str, sfm_queue: Queue): logging_context = LoggingContext(worker_name) subscriber_client = pubsub.SubscriberClient() subscription_path = subscriber_client.subscription_path( LOGS_SUBSCRIPTION_PROJECT, LOGS_SUBSCRIPTION_ID) logging_context.log(f"Starting processing") worker_state = WorkerState(worker_name) while True: try: perform_pull(worker_state, sfm_queue, subscriber_client, subscription_path) except Exception as e: if isinstance(e, Forbidden): logging_context.error( f"{e} Please check whether assigned service account has permission to fetch Pub/Sub messages." ) else: logging_context.exception("Failed to pull messages") # Backoff for 1 minute to avoid spamming requests and logs time.sleep(60)
async def create_default_service_account_token(context: LoggingContext, session: ClientSession): """ For reference check out https://github.com/googleapis/google-auth-library-python/tree/master/google/auth/compute_engine :param session: :return: """ url = _METADATA_ROOT + "/instance/service-accounts/{0}/token".format( "default") try: response = await session.get(url, headers=_METADATA_HEADERS) if response.status >= 300: body = await response.text() context.log( f"Failed to authorize with Service Account from Metadata Service, response is {response.status} => {body}" ) return None response_json = await response.json() return response_json["access_token"] except Exception as e: context.log( f"Failed to authorize with Service Account from Metadata Service due to '{e}'" ) return None
def load_supported_services(context: LoggingContext) -> List[GCPService]: activation_yaml = read_activation_yaml() activation_config_per_service = get_activation_config_per_service( activation_yaml) feature_sets_from_activation_config = load_activated_feature_sets( context, activation_yaml) working_directory = os.path.dirname(os.path.realpath(__file__)) config_directory = os.path.join(working_directory, "config") config_files = [ file for file in listdir(config_directory) if isfile(os.path.join(config_directory, file)) and is_yaml_file(file) ] services = [] for file in config_files: config_file_path = os.path.join(config_directory, file) try: with open(config_file_path, encoding="utf-8") as config_file: config_yaml = yaml.safe_load(config_file) technology_name = extract_technology_name(config_yaml) for service_yaml in config_yaml.get("gcp", {}): service_name = service_yaml.get("service", "None") featureSet = service_yaml.get("featureSet", "default_metrics") # If whitelist of services exists and current service is not present in it, skip # If whitelist is empty - no services explicitly selected - load all available whitelist_exists = feature_sets_from_activation_config.__len__( ) > 0 if f'{service_name}/{featureSet}' in feature_sets_from_activation_config or not whitelist_exists: activation = activation_config_per_service.get( service_name, {}) services.append( GCPService(tech_name=technology_name, **service_yaml, activation=activation)) except Exception as error: context.log( f"Failed to load configuration file: '{config_file_path}'. Error details: {error}" ) continue featureSets = [ f"{service.name}/{service.feature_set}" for service in services ] if featureSets: context.log("Selected feature sets: " + ", ".join(featureSets)) else: context.log("Empty feature sets. GCP services not monitored.") return services
async def handle_event(event: Dict, event_context, project_id_owner: Optional[str], projects_ids: Optional[List[str]] = None): if isinstance(event_context, Dict): context = LoggingContext(event_context.get("execution_id", None)) else: context = LoggingContext(None) selected_services = None if "GCP_SERVICES" in os.environ: selected_services_string = os.environ.get("GCP_SERVICES", "") selected_services = selected_services_string.split( ",") if selected_services_string else [] #set default featureset if featureset not present in env variable for i, service in enumerate(selected_services): if "/" not in service: selected_services[i] = f"{service}/default" services = load_supported_services(context, selected_services) async with init_gcp_client_session( ) as gcp_session, init_dt_client_session() as dt_session: setup_start_time = time.time() token = await create_token(context, gcp_session) if token is None: context.log( "Cannot proceed without authorization token, stopping the execution" ) return if not isinstance(token, str): raise Exception( f"Failed to fetch access token, got non string value: {token}") context.log("Successfully obtained access token") if not project_id_owner: project_id_owner = get_project_id_from_environment() dynatrace_api_key = await fetch_dynatrace_api_key( gcp_session=gcp_session, project_id=project_id_owner, token=token) dynatrace_url = await fetch_dynatrace_url(gcp_session=gcp_session, project_id=project_id_owner, token=token) print_metric_ingest_input = \ "PRINT_METRIC_INGEST_INPUT" in os.environ and os.environ["PRINT_METRIC_INGEST_INPUT"].upper() == "TRUE" self_monitoring_enabled = os.environ.get('SELF_MONITORING_ENABLED', "False").upper() == "TRUE" context = MetricsContext( gcp_session=gcp_session, dt_session=dt_session, project_id_owner=project_id_owner, token=token, execution_time=datetime.utcnow(), execution_interval_seconds=60 * 1, dynatrace_api_key=dynatrace_api_key, dynatrace_url=dynatrace_url, print_metric_ingest_input=print_metric_ingest_input, self_monitoring_enabled=self_monitoring_enabled, scheduled_execution_id=context.scheduled_execution_id) if not projects_ids: projects_ids = await get_all_accessible_projects( context, gcp_session, token) setup_time = (time.time() - setup_start_time) context.setup_execution_time = { project_id: setup_time for project_id in projects_ids } context.start_processing_timestamp = time.time() process_project_metrics_tasks = [ process_project_metrics(context, project_id, services) for project_id in projects_ids ] await asyncio.gather(*process_project_metrics_tasks, return_exceptions=True) context.log( f"Fetched and pushed GCP data in {time.time() - context.start_processing_timestamp} s" ) log_self_monitoring_data(context) if context.self_monitoring_enabled: await push_self_monitoring(context) await gcp_session.close() await dt_session.close()
def check_version(logging_context: LoggingContext): script_directory = os.path.dirname(os.path.realpath(__file__)) version_file_path = os.path.join(script_directory, "./../version.txt") with open(version_file_path) as version_file: _version = version_file.readline() logging_context.log(f"Found version: {_version}")
def _log_self_monitoring_data(self_monitoring: LogSelfMonitoring, logging_context: LoggingContext): dynatrace_connectivity = Counter(self_monitoring.dynatrace_connectivity) dynatrace_connectivity = [ f"{connectivity.name}:{count}" for connectivity, count in dynatrace_connectivity.items() ] dynatrace_connectivity = ", ".join(dynatrace_connectivity) logging_context.log( "SFM", f"Number of all log ingest requests sent to Dynatrace: {self_monitoring.all_requests}" ) logging_context.log("SFM", f"Dynatrace connectivity: {dynatrace_connectivity}") logging_context.log( "SFM", f"Number of log records with missing/invalid timestamp (used publish time): {self_monitoring.publish_time_fallback_records}" ) logging_context.log( "SFM", f"Number of invalid log records due to too old timestamp: {self_monitoring.too_old_records}" ) logging_context.log( "SFM", f"Number of errors occurred during parsing logs: {self_monitoring.parsing_errors}" ) logging_context.log( "SFM", f"Number of records with too long content: {self_monitoring.records_with_too_long_content}" ) logging_context.log( "SFM", f"Total logs processing time [s]: {self_monitoring.processing_time}") logging_context.log( "SFM", f"Total logs sending time [s]: {self_monitoring.sending_time}") logging_context.log( "SFM", f"Log ingest payload size [kB]: {self_monitoring.log_ingest_payload_size}" ) logging_context.log( "SFM", f"Number of sent logs entries: {self_monitoring.sent_logs_entries}")
print(",,,,,,,,,,,,,,,,, dynatrace-gcp-function .,,,,,,,,,,,,,,,,,") print(",,,,,,,,,,,,,,,,, .,,,,,,,,,,,,,,,,,") print(",,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,,") print(",,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,,") print(".,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,") print(".,,,,,,,,,,,,, .,,,,,,,,,,,,,,,,,,. ,,,,,,,,,,,,,,,") print(" ,,,,,,,,,, ,,,,,,,,,,,,,,,,,,,,,, .,,,,,,,,,,,,.") print(" ,,,,,,, ,,,,,,,,,,,,,,,,,,,,,,,,, ,,,,,,,,,,,") print(" ,,,,, .,,,,,,,,,,,,,,,,,,,,,,,,,,. ,,,,,,,,") print(" , ,,,,,,,,,,,,,,,,,,,,,,,,,,,,, ,,,,,,,") print(" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, ,,,,") print("") logging_context = LoggingContext(None) logging_context.log( "Dynatrace function for Google Cloud Platform monitoring\n") logging_context.log("Setting up... \n") app = web.Application() app.add_routes([web.get('/health', health)]) # setup webapp runner = web.AppRunner(app) loop.run_until_complete(runner.setup()) site = web.TCPSite(runner, '0.0.0.0', HEALTH_CHECK_PORT) loop.run_until_complete(site.start()) instance_metadata = loop.run_until_complete(run_instance_metadata_check()) logging_context.log(f"Operation mode: {OPERATION_MODE.name}")
async def handle_event(event: Dict, event_context, projects_ids: Optional[List[str]] = None, services: Optional[List[GCPService]] = None): if isinstance(event_context, Dict): # for k8s installation context = LoggingContext(event_context.get("execution_id", None)) else: context = LoggingContext(None) if not services: # load services for GCP Function services = load_supported_services(context) async with init_gcp_client_session( ) as gcp_session, init_dt_client_session() as dt_session: setup_start_time = time.time() token = await create_token(context, gcp_session) if token is None: context.log( "Cannot proceed without authorization token, stopping the execution" ) return if not isinstance(token, str): raise Exception( f"Failed to fetch access token, got non string value: {token}") context.log("Successfully obtained access token") project_id_owner = get_project_id_from_environment() dynatrace_api_key = await fetch_dynatrace_api_key( gcp_session=gcp_session, project_id=project_id_owner, token=token) dynatrace_url = await fetch_dynatrace_url(gcp_session=gcp_session, project_id=project_id_owner, token=token) check_version(logging_context=context) await check_dynatrace(logging_context=context, project_id=project_id_owner, dt_session=dt_session, dynatrace_url=dynatrace_url, dynatrace_access_key=dynatrace_api_key) query_interval_min = get_query_interval_minutes() print_metric_ingest_input = os.environ.get( "PRINT_METRIC_INGEST_INPUT", "FALSE").upper() in ["TRUE", "YES"] self_monitoring_enabled = os.environ.get( 'SELF_MONITORING_ENABLED', "FALSE").upper() in ["TRUE", "YES"] context = MetricsContext( gcp_session=gcp_session, dt_session=dt_session, project_id_owner=project_id_owner, token=token, execution_time=datetime.utcnow(), execution_interval_seconds=60 * query_interval_min, dynatrace_api_key=dynatrace_api_key, dynatrace_url=dynatrace_url, print_metric_ingest_input=print_metric_ingest_input, self_monitoring_enabled=self_monitoring_enabled, scheduled_execution_id=context.scheduled_execution_id) if not projects_ids: projects_ids = await get_all_accessible_projects( context, gcp_session, token) disabled_apis = {} disabled_projects = [] for project_id in projects_ids: await check_x_goog_user_project_header_permissions( context, project_id) disabled_apis = { project_id: await get_all_disabled_apis(context, project_id) } if 'monitoring.googleapis.com' in disabled_apis[project_id]: disabled_projects.append(project_id) if disabled_projects: context.log( f"monitoring.googleapis.com API disabled in the projects: " + ", ".join(disabled_projects) + ", that projects will not be monitored") for disabled_project in disabled_projects: projects_ids.remove(disabled_project) setup_time = (time.time() - setup_start_time) context.setup_execution_time = { project_id: setup_time for project_id in projects_ids } context.start_processing_timestamp = time.time() process_project_metrics_tasks = [ process_project_metrics(context, project_id, services, disabled_apis.get(project_id, set())) for project_id in projects_ids ] await asyncio.gather(*process_project_metrics_tasks, return_exceptions=True) context.log( f"Fetched and pushed GCP data in {time.time() - context.start_processing_timestamp} s" ) log_self_monitoring_data(context) if context.self_monitoring_enabled: await push_self_monitoring(context) await gcp_session.close() await dt_session.close()
async def check_dynatrace(logging_context: LoggingContext, project_id, dt_session: ClientSession, dynatrace_url, dynatrace_access_key): try: if not dynatrace_url or not dynatrace_access_key: logging_context.log( f'ERROR No Dynatrace secrets: DYNATRACE_URL, DYNATRACE_ACCESS_KEY for project: {project_id}.' f'Add required secrets to Secret Manager.') return None logging_context.log( f"Using [DYNATRACE_URL] Dynatrace endpoint: {dynatrace_url}") logging_context.log( f'Using [DYNATRACE_ACCESS_KEY]: {obfuscate_dynatrace_access_key(dynatrace_access_key)}.' ) token_metadata = await get_dynatrace_token_metadata( dt_session, logging_context, dynatrace_url, dynatrace_access_key) if token_metadata.get('name', None): logging_context.log(f"Token name: {token_metadata.get('name')}.") if token_metadata.get( 'revoked', None) or not valid_dynatrace_scopes(token_metadata): logging_context.log( f'Dynatrace API Token for project: \'{project_id}\' is not valid. ' f'Check expiration time and required token scopes: {DYNATRACE_REQUIRED_TOKEN_SCOPES}' ) except Exception as e: logging_context.log( f'Unable to get Dynatrace Secrets for project: {project_id}. Error details: {e}' )
async def handle_event(event: Dict, event_context, project_id_owner: Optional[str]): if event_context is Dict: context = LoggingContext(event_context.get("execution_id", None)) else: context = LoggingContext(None) selected_services = None if "GCP_SERVICES" in os.environ: selected_services_string = os.environ.get("GCP_SERVICES", "") selected_services = selected_services_string.split(",") if selected_services_string else [] services = load_supported_services(context, selected_services) async with aiohttp.ClientSession() as session: setup_start_time = time.time() token = await create_token(context, session) if token is None: context.log("Cannot proceed without authorization token, stopping the execution") return if not isinstance(token, str): raise Exception(f"Failed to fetch access token, got non string value: {token}") context.log("Successfully obtained access token") if not project_id_owner: project_id_owner = get_project_id_from_environment() dynatrace_api_key = await fetch_dynatrace_api_key(session=session, project_id=project_id_owner, token=token) dynatrace_url = await fetch_dynatrace_url(session=session, project_id=project_id_owner, token=token) print_metric_ingest_input = \ "PRINT_METRIC_INGEST_INPUT" in os.environ and os.environ["PRINT_METRIC_INGEST_INPUT"].upper() == "TRUE" context = Context( session=session, project_id_owner=project_id_owner, token=token, execution_time=datetime.utcnow(), execution_interval_seconds=60 * 1, dynatrace_api_key=dynatrace_api_key, dynatrace_url=dynatrace_url, print_metric_ingest_input=print_metric_ingest_input, scheduled_execution_id=context.scheduled_execution_id ) projects_ids = await get_all_accessible_projects(context, session) context.setup_execution_time = (time.time() - setup_start_time) fetch_gcp_data_start_time = time.time() fetch_ingest_lines_tasks = [fetch_ingest_lines_task(context, project_id, services) for project_id in projects_ids] ingest_lines_per_project = await asyncio.gather(*fetch_ingest_lines_tasks, return_exceptions=True) ingest_lines = [ingest_line for sublist in ingest_lines_per_project for ingest_line in sublist] context.fetch_gcp_data_execution_time = time.time() - fetch_gcp_data_start_time context.log(f"Fetched GCP data in {context.fetch_gcp_data_execution_time} s") await push_ingest_lines(context, ingest_lines) await push_self_monitoring_time_series(context) await session.close()
print(",, ,,,,,,,,,,,,,,,,") print(",,,,,,,,,,,,,,,,, .,,,,,,,,,,,,,,,,") print(",,,,,,,,,,,,,,,,, .,,,,,,,,,,,,,,,,.") print(",,,,,,,,,,,,,,,,, Dynatrace .,,,,,,,,,,,,,,,,.") print(",,,,,,,,,,,,,,,,, dynatrace-gcp-function .,,,,,,,,,,,,,,,,,") print(",,,,,,,,,,,,,,,,, .,,,,,,,,,,,,,,,,,") print(",,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,,") print(",,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,,") print(".,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,") print(".,,,,,,,,,,,,, .,,,,,,,,,,,,,,,,,,. ,,,,,,,,,,,,,,,") print(" ,,,,,,,,,, ,,,,,,,,,,,,,,,,,,,,,, .,,,,,,,,,,,,.") print(" ,,,,,,, ,,,,,,,,,,,,,,,,,,,,,,,,, ,,,,,,,,,,,") print(" ,,,,, .,,,,,,,,,,,,,,,,,,,,,,,,,,. ,,,,,,,,") print(" , ,,,,,,,,,,,,,,,,,,,,,,,,,,,,, ,,,,,,,") print(" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, ,,,,") print("") logging_context = LoggingContext(None) logging_context.log( "Dynatrace function for Google Cloud Platform monitoring\n") if "GCP_SERVICES" in os.environ: services = os.environ.get("GCP_SERVICES", "") print(f"Running with configured services: {services}") logging_context.log("Setting up... \n") loop = asyncio.get_event_loop() loop.create_task(scheduling_loop()) loop.run_forever()