def _cost_type_handler(self, settings): if settings is None: return False else: cost_type = settings try: stored_cost_type = get_selected_cost_type_or_setup(self.schema) except Exception as exp: LOG.warning( f"Failed to retrieve cost_type for schema {self.schema}. Reason: {exp}" ) return False if stored_cost_type == cost_type: return False try: LOG.info(f"Updating cost_type to: " + settings) set_cost_type(self.schema, settings) except Exception as exp: LOG.warning( f"Failed to store new cost_type settings for schema {self.schema}. Reason: {exp}" ) return False invalidate_view_cache_for_tenant_and_source_type( self.schema, Provider.PROVIDER_AWS) return True
def refresh_materialized_views(schema_name, provider_type, manifest_id=None): """Refresh the database's materialized views for reporting.""" materialized_views = () if provider_type in (Provider.PROVIDER_AWS, Provider.PROVIDER_AWS_LOCAL): materialized_views = (AWS_MATERIALIZED_VIEWS + OCP_ON_AWS_MATERIALIZED_VIEWS + OCP_ON_INFRASTRUCTURE_MATERIALIZED_VIEWS) elif provider_type in (Provider.PROVIDER_OCP): materialized_views = (OCP_MATERIALIZED_VIEWS + OCP_ON_AWS_MATERIALIZED_VIEWS + OCP_ON_AZURE_MATERIALIZED_VIEWS + OCP_ON_INFRASTRUCTURE_MATERIALIZED_VIEWS) elif provider_type in (Provider.PROVIDER_AZURE, Provider.PROVIDER_AZURE_LOCAL): materialized_views = (AZURE_MATERIALIZED_VIEWS + OCP_ON_AZURE_MATERIALIZED_VIEWS + OCP_ON_INFRASTRUCTURE_MATERIALIZED_VIEWS) with schema_context(schema_name): for view in materialized_views: table_name = view._meta.db_table with connection.cursor() as cursor: cursor.execute( f"REFRESH MATERIALIZED VIEW CONCURRENTLY {table_name}") LOG.info(f"Refreshed {table_name}.") invalidate_view_cache_for_tenant_and_source_type(schema_name, provider_type) if manifest_id: # Processing for this monifest should be complete after this step with ReportManifestDBAccessor() as manifest_accessor: manifest = manifest_accessor.get_manifest_by_id(manifest_id) manifest_accessor.mark_manifest_as_completed(manifest)
def update_summary_tables(self, start_date, end_date): """ Update report summary tables. Args: start_date (str, datetime): When to start. end_date (str, datetime): When to end. manifest_id (str): The particular manifest to use. Returns: None """ start_date, end_date = self._format_dates(start_date, end_date) LOG.info("Using start date: %s", start_date) LOG.info("Using end date: %s", end_date) start_date, end_date = self._updater.update_summary_tables( start_date, end_date) try: self._ocp_cloud_updater.update_summary_tables(start_date, end_date) except Exception as ex: raise ReportSummaryUpdaterCloudError(str(ex)) invalidate_view_cache_for_tenant_and_source_type( self._schema, self._provider.type)
def update_summary_tables(self, start_date, end_date, tracing_id): """ Update report summary tables. Args: start_date (str, datetime): When to start. end_date (str, datetime): When to end. tracing_id (str): The tracing_id. Returns: None """ msg = f"Summary processing starting for source {self._provider_uuid}" LOG.info(log_json(self._tracing_id, msg)) start_date, end_date = self._format_dates(start_date, end_date) LOG.info(log_json(tracing_id, f"Using start date: {start_date}")) LOG.info(log_json(tracing_id, f"Using end date: {end_date}")) start_date, end_date = self._updater.update_summary_tables(start_date, end_date) msg = f"Summary processing completed for source {self._provider_uuid}" LOG.info(log_json(self._tracing_id, msg)) invalidate_view_cache_for_tenant_and_source_type(self._schema, self._provider.type) return start_date, end_date
def refresh_materialized_views( # noqa: C901 schema_name, provider_type, manifest_id=None, provider_uuid="", synchronous=False, queue_name=None, tracing_id=None, ): """Refresh the database's materialized views for reporting.""" task_name = "masu.processor.tasks.refresh_materialized_views" cache_args = [schema_name, provider_type, provider_uuid] if not synchronous: worker_cache = WorkerCache() if worker_cache.single_task_is_running(task_name, cache_args): msg = f"Task {task_name} already running for {cache_args}. Requeuing." LOG.info(log_json(tracing_id, msg)) refresh_materialized_views.s( schema_name, provider_type, manifest_id=manifest_id, provider_uuid=provider_uuid, synchronous=synchronous, queue_name=queue_name, tracing_id=tracing_id, ).apply_async(queue=queue_name or REFRESH_MATERIALIZED_VIEWS_QUEUE) return worker_cache.lock_single_task(task_name, cache_args, timeout=settings.WORKER_CACHE_TIMEOUT) materialized_views = () try: with schema_context(schema_name): for view in materialized_views: table_name = view._meta.db_table with connection.cursor() as cursor: cursor.execute( f"REFRESH MATERIALIZED VIEW CONCURRENTLY {table_name}") LOG.info(log_json(tracing_id, f"Refreshed {table_name}.")) invalidate_view_cache_for_tenant_and_source_type( schema_name, provider_type) if provider_uuid: ProviderDBAccessor(provider_uuid).set_data_updated_timestamp() if manifest_id: # Processing for this monifest should be complete after this step with ReportManifestDBAccessor() as manifest_accessor: manifest = manifest_accessor.get_manifest_by_id(manifest_id) manifest_accessor.mark_manifest_as_completed(manifest) except Exception as ex: if not synchronous: worker_cache.release_single_task(task_name, cache_args) raise ex if not synchronous: worker_cache.release_single_task(task_name, cache_args)
def _tag_key_handler(self, settings): """ Handle setting results Args: (String) name - unique name for switch. Returns: (Bool) - True, if a setting had an effect, False otherwise """ updated = [False] * len(obtainTagKeysProvidersParams) for ix, providerName in enumerate(obtainTagKeysProvidersParams): provider_in_settings = settings.get(providerName) if provider_in_settings is None: continue enabled_tags = provider_in_settings.get("enabled", []) tag_view = obtainTagKeysProvidersParams[providerName]["tag_view"] query_handler = obtainTagKeysProvidersParams[providerName][ "query_handler"] enabled_tag_keys = obtainTagKeysProvidersParams[providerName][ "enabled_tag_keys"] provider = obtainTagKeysProvidersParams[providerName]["provider"] available, _ = self._obtain_tag_keys(tag_view, query_handler, enabled_tag_keys) invalid_keys = [ tag_key for tag_key in enabled_tags if tag_key not in available ] if invalid_keys: key = "settings" message = f"Invalid tag keys provided: {', '.join(invalid_keys)}." raise ValidationError(error_obj(key, message)) if "aws" in providerName: updated[ix] = update_enabled_keys(self.schema, enabled_tag_keys, enabled_tags) else: remove_tags = [] with schema_context(self.schema): existing_enabled_tags = enabled_tag_keys.objects.all() for existing_tag in existing_enabled_tags: if existing_tag.key in enabled_tags: enabled_tags.remove(existing_tag.key) else: remove_tags.append(existing_tag) updated[ix] = True for rm_tag in remove_tags: rm_tag.delete() for new_tag in enabled_tags: enabled_tag_keys.objects.create(key=new_tag) updated[ix] = True if updated[ix]: invalidate_view_cache_for_tenant_and_source_type( self.schema, provider) return any(updated)
def refresh_materialized_views(schema_name, provider_type, manifest_id=None, provider_uuid=None, synchronous=False): """Refresh the database's materialized views for reporting.""" task_name = "masu.processor.tasks.refresh_materialized_views" cache_args = [schema_name] if not synchronous: worker_cache = WorkerCache() while worker_cache.single_task_is_running(task_name, cache_args): time.sleep(5) worker_cache.lock_single_task(task_name, cache_args) materialized_views = () if provider_type in (Provider.PROVIDER_AWS, Provider.PROVIDER_AWS_LOCAL): materialized_views = (AWS_MATERIALIZED_VIEWS + OCP_ON_AWS_MATERIALIZED_VIEWS + OCP_ON_INFRASTRUCTURE_MATERIALIZED_VIEWS) elif provider_type in (Provider.PROVIDER_OCP): materialized_views = (OCP_MATERIALIZED_VIEWS + OCP_ON_AWS_MATERIALIZED_VIEWS + OCP_ON_AZURE_MATERIALIZED_VIEWS + OCP_ON_INFRASTRUCTURE_MATERIALIZED_VIEWS) elif provider_type in (Provider.PROVIDER_AZURE, Provider.PROVIDER_AZURE_LOCAL): materialized_views = (AZURE_MATERIALIZED_VIEWS + OCP_ON_AZURE_MATERIALIZED_VIEWS + OCP_ON_INFRASTRUCTURE_MATERIALIZED_VIEWS) with schema_context(schema_name): for view in materialized_views: table_name = view._meta.db_table with connection.cursor() as cursor: cursor.execute( f"REFRESH MATERIALIZED VIEW CONCURRENTLY {table_name}") LOG.info(f"Refreshed {table_name}.") invalidate_view_cache_for_tenant_and_source_type(schema_name, provider_type) if provider_uuid: ProviderDBAccessor(provider_uuid).set_data_updated_timestamp() if manifest_id: # Processing for this monifest should be complete after this step with ReportManifestDBAccessor() as manifest_accessor: manifest = manifest_accessor.get_manifest_by_id(manifest_id) manifest_accessor.mark_manifest_as_completed(manifest) if not synchronous: worker_cache.release_single_task(task_name, cache_args)
def update_cost_model_costs(self, start_date=None, end_date=None): """ Update usage charge information. Args: start_date (String) - Start date of range to update derived cost. end_date (String) - End date of range to update derived cost. Returns: None """ if self._updater: self._updater.update_summary_cost_model_costs(start_date, end_date) invalidate_view_cache_for_tenant_and_source_type( self._schema, self._provider.type)
def update_cost_summary_table(self, start_date, end_date): """ Update cost summary tables. Args: start_date (str, datetime): When to start. end_date (str, datetime): When to end. Returns: None """ start_date, end_date = self._format_dates(start_date, end_date) self._ocp_cloud_updater.update_cost_summary_table(start_date, end_date) invalidate_view_cache_for_tenant_and_source_type(self._schema, self._provider.type)
def update_daily_tables(self, start_date, end_date): """ Update report daily rollup tables. Args: start_date (str, datetime): When to start. end_date (str, datetime): When to end. manifest_id (str): The particular manifest to use. Returns: (str, str): The start and end date strings used in the daily SQL. """ start_date, end_date = self._format_dates(start_date, end_date) start_date, end_date = self._updater.update_daily_tables(start_date, end_date) invalidate_view_cache_for_tenant_and_source_type(self._schema, self._provider.type) return start_date, end_date
def _tag_key_handler(self, settings): """ Handle setting results Args: (String) name - unique name for switch. Returns: (Bool) - True, if a setting had an effect, False otherwise """ updated = False enabled_tags = settings.get("tag-management", {}).get("enabled", []) remove_tags = [] available, _ = self._obtain_tag_keys() invalid_keys = [ tag_key for tag_key in enabled_tags if tag_key not in available ] if invalid_keys: key = "settings" message = f"Invalid tag keys provided: {', '.join(invalid_keys)}." raise ValidationError(error_obj(key, message)) with schema_context(self.schema): existing_enabled_tags = OCPEnabledTagKeys.objects.all() for existing_tag in existing_enabled_tags: if existing_tag.key in enabled_tags: enabled_tags.remove(existing_tag.key) else: remove_tags.append(existing_tag) updated = True for rm_tag in remove_tags: rm_tag.delete() for new_tag in enabled_tags: OCPEnabledTagKeys.objects.create(key=new_tag) updated = True if updated: invalidate_view_cache_for_tenant_and_source_type( self.schema, Provider.PROVIDER_OCP) return updated
def update_openshift_on_cloud_summary_tables( self, start_date, end_date, ocp_provider_uuid, infra_provider_uuid, infra_provider_type, tracing_id ): """ Update report summary tables. Args: start_date (str, datetime): When to start. end_date (str, datetime): When to end. tracing_id (str): The tracing_id. Returns: None """ if self._provider.type in Provider.OPENSHIFT_ON_CLOUD_PROVIDER_LIST: msg = f"OpenShift on {infra_provider_type} summary processing starting for source {self._provider_uuid}" LOG.info(log_json(self._tracing_id, msg)) start_date, end_date = self._format_dates(start_date, end_date) LOG.info(log_json(tracing_id, f"Using start date: {start_date}")) LOG.info(log_json(tracing_id, f"Using end date: {end_date}")) try: self._ocp_cloud_updater.update_summary_tables( start_date, end_date, ocp_provider_uuid, infra_provider_uuid, infra_provider_type ) msg = ( f"OpenShift on {infra_provider_type} summary processing completed", f" for source {self._provider_uuid}", ) LOG.info(log_json(self._tracing_id, msg)) invalidate_view_cache_for_tenant_and_source_type(self._schema, self._provider.type) except Exception as ex: raise ReportSummaryUpdaterCloudError(str(ex)) else: msg = ( f"{infra_provider_type} is not in {Provider.OPENSHIFT_ON_CLOUD_PROVIDER_LIST}.", "Not running OpenShift on Cloud summary.", ) LOG.info(log_json(self._tracing_id, msg))
def update_daily_tables(self, start_date, end_date): """ Update report daily rollup tables. Args: start_date (str, datetime): When to start. end_date (str, datetime): When to end. Returns: (str, str): The start and end date strings used in the daily SQL. """ msg = f"Daily summary starting for source {self._provider_uuid}" LOG.info(log_json(self._tracing_id, msg)) start_date, end_date = self._format_dates(start_date, end_date) start_date, end_date = self._updater.update_daily_tables(start_date, end_date) invalidate_view_cache_for_tenant_and_source_type(self._schema, self._provider.type) msg = f"Daily summary completed for source {self._provider_uuid}" LOG.info(log_json(self._tracing_id, msg)) return start_date, end_date
def test_invalidate_view_cache_for_tenant_and_source_type(self): """Test that all views for a source type and tenant are invalidated.""" aws_cache_key_prefixes = (AWS_CACHE_PREFIX, OPENSHIFT_AWS_CACHE_PREFIX, OPENSHIFT_ALL_CACHE_PREFIX) aws_cache_data = {} for prefix in aws_cache_key_prefixes: aws_cache_data.update({f"{self.schema_name}:{prefix}": "value"}) self.cache.set_many(aws_cache_data) invalidate_view_cache_for_tenant_and_source_type( self.schema_name, "AWS") for key in aws_cache_data: self.assertIsNone(self.cache.get(key)) openshift_cache_key_prefixes = ( OPENSHIFT_CACHE_PREFIX, OPENSHIFT_AWS_CACHE_PREFIX, OPENSHIFT_AZURE_CACHE_PREFIX, OPENSHIFT_ALL_CACHE_PREFIX, ) openshift_cache_data = {} for prefix in openshift_cache_key_prefixes: openshift_cache_data.update( {f"{self.schema_name}:{prefix}": "value"}) self.cache.set_many(openshift_cache_data) invalidate_view_cache_for_tenant_and_source_type( self.schema_name, "OCP") for key in openshift_cache_data: self.assertIsNone(self.cache.get(key)) azure_cache_key_prefixes = (AZURE_CACHE_PREFIX, OPENSHIFT_AZURE_CACHE_PREFIX, OPENSHIFT_ALL_CACHE_PREFIX) azure_cache_data = {} for prefix in azure_cache_key_prefixes: azure_cache_data.update({f"{self.schema_name}:{prefix}": "value"}) self.cache.set_many(azure_cache_data) invalidate_view_cache_for_tenant_and_source_type( self.schema_name, "Azure") for key in azure_cache_data: self.assertIsNone(self.cache.get(key))
def _tag_key_handler(self, settings): tag_delimiter = "-" updated = [False] * len(obtainTagKeysProvidersParams) for ix, provider_name in enumerate(obtainTagKeysProvidersParams): enabled_tags_no_abbr = [] tag_view = obtainTagKeysProvidersParams[provider_name]["tag_view"] query_handler = obtainTagKeysProvidersParams[provider_name][ "query_handler"] enabled_tag_keys = obtainTagKeysProvidersParams[provider_name][ "enabled_tag_keys"] provider = obtainTagKeysProvidersParams[provider_name]["provider"] available, _ = self._obtain_tag_keys(tag_view, query_handler, enabled_tag_keys) # build a list of enabled tags for a given provider, removing the provider name prefix for enabled_tag in settings.get("enabled", []): if enabled_tag.startswith(provider_name + tag_delimiter): enabled_tags_no_abbr.append( enabled_tag.split(tag_delimiter, 1)[1]) invalid_keys = [ tag_key for tag_key in enabled_tags_no_abbr if tag_key not in available ] if invalid_keys: key = "settings" message = f"Invalid tag keys provided: {', '.join(invalid_keys)}." raise ValidationError(error_obj(key, message)) if "aws" in provider_name: existing_enabled_tags = list( enabled_tag_keys.objects.filter(enabled=True).values_list( "key", flat=True)) if enabled_tags_no_abbr != existing_enabled_tags: updated[ix] = update_enabled_keys(self.schema, enabled_tag_keys, enabled_tags_no_abbr) else: remove_tags = [] with schema_context(self.schema): existing_enabled_tags = enabled_tag_keys.objects.all() for existing_tag in existing_enabled_tags: if existing_tag.key in enabled_tags_no_abbr: enabled_tags_no_abbr.remove(existing_tag.key) else: remove_tags.append(existing_tag) updated[ix] = True if len(remove_tags): LOG.info(f"Updating %d %s key(s) to DISABLED", len(remove_tags), provider_name) for rm_tag in remove_tags: rm_tag.delete() updated[ix] = True if len(enabled_tags_no_abbr): LOG.info(f"Updating %d %s key(s) to ENABLED", len(enabled_tags_no_abbr), provider_name) for new_tag in enabled_tags_no_abbr: enabled_tag_keys.objects.create(key=new_tag) updated[ix] = True if updated[ix]: invalidate_view_cache_for_tenant_and_source_type( self.schema, provider) return any(updated)