def get_resources(self): print("** ElastiCache START **") resources = [] start_time = time.time() # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) for region_name in self.region_names: # print(f'[ {region_name} ]') self.reset_region(region_name) cache_clusters = [cluster for cluster in self.describe_clusters()] for memcached_vo in self.get_memcached_data( region_name, cache_clusters): if getattr(memcached_vo, 'set_cloudwatch', None): memcached_vo.cloudwatch = CloudWatchModel( memcached_vo.set_cloudwatch(region_name)) resources.append( MemcachedResponse({ 'resource': MemcachedResource({ 'data': memcached_vo, 'tags': [{ 'key': tag.key, 'value': tag.value } for tag in memcached_vo.tags], 'region_code': region_name, 'reference': ReferenceModel(memcached_vo.reference(region_name)) }) })) for redis_vo in self.get_redis_data(region_name, cache_clusters): if getattr(redis_vo, 'set_cloudwatch', None): redis_vo.cloudwatch = CloudWatchModel( redis_vo.set_cloudwatch(region_name)) resources.append( RedisResponse({ 'resource': RedisResource({ 'data': redis_vo, 'region_code': region_name, 'reference': ReferenceModel(redis_vo.reference(region_name)) }) })) print(f' ElastiCache Finished {time.time() - start_time} Seconds') return resources
def get_resources(self): print("** Cloud Front START **") resources = [] start_time = time.time() for cst in CLOUD_SERVICE_TYPES: resources.append(cst) try: for data in self.request_data(): # print(f"[ CloudFront DATA ]") if getattr(data, 'set_cloudwatch', None): data.cloudwatch = CloudWatchModel(data.set_cloudwatch()) resources.append(self.response_schema( {'resource': DistributionResource({ 'name': data.domain_name, 'data': data, 'reference': ReferenceModel(data.reference()), 'region_code': 'global'}) })) except Exception as e: print(f'[ERROR {self.service_name}] {e}') print(f' Cloud Front Finished {time.time() - start_time} Seconds') return resources
def get_resources(self): print("** Cloud Front START **") resources = [] start_time = time.time() for cst in CLOUD_SERVICE_TYPES: resources.append(cst) try: for data in self.request_data(): # print(f"[ CloudFront DATA ]") resources.append( self.response_schema({ 'resource': DistributionResource({ 'data': data, 'reference': ReferenceModel(data.reference) }) })) except Exception as e: print(f'[ERROR {self.service_name}] {e}') print(f' Cloud Front Finished {time.time() - start_time} Seconds') return resources
def get_resources(self) -> List[BucketResource]: print("** S3 START **") resources = [] start_time = time.time() try: # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) # merge data for data, name in self.request_data(): # This is Global API, yet set up its region for bucket if getattr(data, 'set_cloudwatch', None): data.cloudwatch = CloudWatchModel(data.set_cloudwatch()) bucket_resource = { 'name': name, 'data': data, 'reference': ReferenceModel(data.reference()) } if data.get('region_name'): bucket_resource.update({ 'region_code': data.get('region_name'), }) resources.append(self.response_schema({'resource': BucketResource(bucket_resource)})) except Exception as e: print(f'[ERROR {self.service_name}] {e}') print(f' S3 Finished {time.time() - start_time} Seconds') return resources
def get_resources(self) -> List[HostedZoneResource]: print("** Route53 START **") resources = [] start_time = time.time() try: # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) # merge data for data in self.request_data(): if getattr(data, 'set_cloudwatch', None): data.cloudwatch = CloudWatchModel(data.set_cloudwatch()) resources.append( self.response_schema({ 'resource': HostedZoneResource({ 'data': data, 'reference': ReferenceModel(data.reference()), 'region_code': 'global' }) })) except Exception as e: print(f'[ERROR {self.service_name}] {e}') print(f' Route53 Finished {time.time() - start_time} Seconds') return resources
def collect_data_by_region(self, service_name, region_name, collect_resource_info): ''' collect_resource_info = { 'request_method': self.request_something_like_data, 'resource': ResourceClass, 'response_schema': ResponseClass, 'kwargs': {} } ''' resources = [] try: for data in collect_resource_info['request_method']( region_name, **collect_resource_info.get('kwargs', {})): resources.append(collect_resource_info['response_schema']({ 'resource': collect_resource_info['resource']({ 'data': data, 'reference': ReferenceModel(data.reference) }) })) except Exception as e: print(f'[ERROR {service_name}] REGION : {region_name} {e}') return resources
def get_resources(self) -> List[BucketResource]: print("** S3 START **") resources = [] start_time = time.time() try: # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) # merge data for data in self.request_data(): resources.append( self.response_schema({ 'resource': BucketResource({ 'data': data, 'reference': ReferenceModel(data.reference) }) })) except Exception as e: print(f'[ERROR {self.service_name}] {e}') print(f' S3 Finished {time.time() - start_time} Seconds') return resources
def get_resources(self): _LOGGER.debug("[get_resources] START: Cloudfront") resources = [] start_time = time.time() resources.extend(self.set_service_code_in_cloud_service_type()) try: for data in self.request_data(): if getattr(data, 'resource_type', None) and data.resource_type == 'inventory.ErrorResource': # Error Resource resources.append(data) else: if getattr(data, 'set_cloudwatch', None): data.cloudwatch = CloudWatchModel(data.set_cloudwatch()) resources.append(self.response_schema( {'resource': DistributionResource({ 'name': data.domain_name, 'data': data, 'account': self.account_id, 'reference': ReferenceModel(data.reference()), 'region_code': 'global'}) })) except Exception as e: resource_id = '' resources.append(self.generate_error('global', resource_id, e)) _LOGGER.debug(f'[get_resources] Cloud Front Finished {time.time() - start_time} Seconds') return resources
def get_resources(self): print("** RDS START **") resources = [] start_time = time.time() collect_resources = [ { 'request_method': self.instance_data, 'resource': InstanceResource, 'response_schema': InstanceResponse }, { 'request_method': self.snapshot_request_data, 'resource': SnapshotResource, 'response_schema': SnapshotResponse }, { 'request_method': self.subnet_group_request_data, 'resource': SubnetGroupResource, 'response_schema': SubnetGroupResponse }, { 'request_method': self.parameter_group_request_data, 'resource': ParameterGroupResource, 'response_schema': ParameterGroupResponse }, { 'request_method': self.option_group_request_data, 'resource': OptionGroupResource, 'response_schema': OptionGroupResponse } ] # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) for region_name in self.region_names: # print(f'[ {region_name} ]') self.reset_region(region_name) # For Database for database_vo, resource in self.db_cluster_data(region_name): if getattr(database_vo, 'set_cloudwatch', None): database_vo.cloudwatch = CloudWatchModel(database_vo.set_cloudwatch(region_name)) resources.append(DatabaseResponse( {'resource': resource( {'data': database_vo, 'tags': [{'key':tag.key, 'value': tag.value} for tag in database_vo.tags], 'region_code': region_name, 'reference': ReferenceModel(database_vo.reference(region_name))})} )) # For All except Database for collect_resource in collect_resources: resources.extend(self.collect_data_by_region(self.service_name, region_name, collect_resource)) print(f' RDS Finished {time.time() - start_time} Seconds') return resources
def get_resources(self): print("** Direct Connect START **") resources = [] start_time = time.time() collect_resources = [ { 'request_method': self.connection_request_data, 'resource': ConnectionResource, 'response_schema': ConnectionResponse }, { 'request_method': self.virtual_private_gateway_request_data, 'resource': VirtualPrivateGatewayResource, 'response_schema': VirtualPrivateGatewayResponse }, { 'request_method': self.lag_request_data, 'resource': LAGResource, 'response_schema': LAGResponse }, ] # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) # merge data for region_name in self.region_names: self.reset_region(region_name) for collect_resource in collect_resources: resources.extend( self.collect_data_by_region(self.service_name, region_name, collect_resource)) try: for data in self.direct_connect_gateway_request_data(): resources.append( self.dcgw_response_schema({ 'resource': DirectConnectGatewayResource({ 'name': data.direct_connect_gateway_name, 'data': data, 'reference': ReferenceModel(data.reference) }) })) except Exception as e: print(f'[ERROR {self.service_name}] {e}') print(f' Direct Connect Finished {time.time() - start_time} Seconds') return resources
def get_resources(self) -> List[RedisResource]: print("** ElastiCache START **") resources = [] start_time = time.time() # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) # merge data for func in self.request_data(): resources.append(self.redis_response_schema( {'resource': RedisResource({'data': func, 'reference': ReferenceModel(func.reference)})})) resources.append(self.redis_response_schema( {'resource': RedisResource({'data': func, 'reference': ReferenceModel(func.reference)})})) print(f' ElastiCache Finished {time.time() - start_time} Seconds') return resources
def get_resources(self) -> List[DistributionResource]: resources = [] # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) # merge data for data in self.request_data(): resources.append(self.response_schema( {'resource': DistributionResource({'data': data, 'reference': ReferenceModel(data.reference)})})) return resources
def get_resources(self): _LOGGER.debug("[get_resources] START: EKS") resources = [] self.node_groups = [] start_time = time.time() resources.extend(self.set_service_code_in_cloud_service_type()) collect_resource = { 'request_method': self.request_data, 'resource': ClusterResource, 'response_schema': ClusterResponse } for region_name in self.region_names: self.reset_region(region_name) resources.extend( self.collect_data_by_region(self.service_name, region_name, collect_resource)) # For Node Group for node_group_vo in self.node_groups: resources.append( NodeGroupResponse({ 'resource': NodeGroupResource({ 'name': node_group_vo.nodegroup_name, 'account': self.account_id, 'launched_at': self.datetime_to_iso8601(node_group_vo.created_at), 'data': node_group_vo, 'tags': [{ 'key': tag.key, 'value': tag.value } for tag in node_group_vo.tags], 'region_code': region_name, 'reference': ReferenceModel( node_group_vo.reference(region_name)) }) })) self.node_groups = [] _LOGGER.debug( f'[get_resources] FINISHED: EKS ({time.time() - start_time} sec)') return resources
def get_resources(self): print("** EKS START **") resources = [] self.node_groups = [] start_time = time.time() collect_resource = { 'request_method': self.request_data, 'resource': ClusterResource, 'response_schema': ClusterResponse } # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) for region_name in self.region_names: self.reset_region(region_name) resources.extend( self.collect_data_by_region(self.service_name, region_name, collect_resource)) try: # For Node Group for node_group_vo in self.node_groups: resources.append( NodeGroupResponse({ 'resource': NodeGroupResource({ 'name': node_group_vo.nodegroup_name, 'data': node_group_vo, 'tags': [{ 'key': tag.key, 'value': tag.value } for tag in node_group_vo.tags], 'region_code': region_name, 'reference': ReferenceModel( node_group_vo.reference(region_name)) }) })) except Exception as e: print(f'[ERROR EKS] REGION : {region_name} {e}') self.node_groups = [] print(f' EKS Finished {time.time() - start_time} Seconds') return resources
def get_memcached_data(self, region_name, cache_clusters): self.cloud_service_type = 'Memcached' for cluster in cache_clusters: try: if cluster.get('Engine') == 'memcached': cluster.update({ 'configuration_endpoint_display': self.set_configuration_endpoint_display( cluster.get('ConfigurationEndpoint')), 'nodes': self.get_memcached_nodes(cluster), 'tags': self.list_tags(cluster['ARN']), 'account_id': self.account_id, }) memcached_vo = Memcached(cluster, strict=False) yield MemcachedResource({ 'data': memcached_vo, 'name': memcached_vo.cache_cluster_id, 'instance_type': memcached_vo.cache_node_type, 'launched_at': self.datetime_to_iso8601( memcached_vo.cache_cluster_create_time), 'account': self.account_id, 'tags': [{ 'key': tag.key, 'value': tag.value } for tag in memcached_vo.tags], 'region_code': region_name, 'reference': ReferenceModel(memcached_vo.reference(region_name)) }) except Exception as e: resource_id = cluster.get('ARN', '') error_resource_response = self.generate_error( region_name, resource_id, e) yield {'data': error_resource_response}
def collect_data_by_region(self, service_name, region_name, collect_resource_info): ''' collect_resource_info = { 'request_method': self.request_something_like_data, 'resource': ResourceClass, 'response_schema': ResponseClass, 'kwargs': {} } ''' resources = [] additional_data = ['name', 'type', 'size', 'launched_at'] try: for collected_dict in collect_resource_info['request_method'](region_name, **collect_resource_info.get('kwargs', {})): data = collected_dict['data'] if getattr(data, 'resource_type', None) and data.resource_type == 'inventory.ErrorResource': # Error Resource resources.append(data) else: # Cloud Service Resource if getattr(data, 'set_cloudwatch', None): data.cloudwatch = CloudWatchModel(data.set_cloudwatch(region_name)) resource_dict = { 'data': data, 'account': collected_dict.get('account'), 'instance_size': float(collected_dict.get('instance_size', 0)), 'instance_type': collected_dict.get('instance_type', ''), 'launched_at': str(collected_dict.get('launched_at', '')), 'tags': self.get_resource_tags(getattr(data, 'tags', [])), 'region_code': region_name, 'reference': ReferenceModel(data.reference(region_name)) } for add_field in additional_data: if add_field in collected_dict: resource_dict.update({add_field: collected_dict[add_field]}) resources.append(collect_resource_info['response_schema']( {'resource': collect_resource_info['resource'](resource_dict)})) except Exception as e: resource_id = '' error_resource_response = self.generate_error(region_name, resource_id, e) resources.append(error_resource_response) return resources
def collect_data_by_region(self, service_name, region_name, collect_resource_info): ''' collect_resource_info = { 'request_method': self.request_something_like_data, 'resource': ResourceClass, 'response_schema': ResponseClass, 'kwargs': {} } ''' resources = [] try: for data, name in collect_resource_info['request_method']( region_name, **collect_resource_info.get('kwargs', {})): if getattr(data, 'set_cloudwatch', None): data.cloudwatch = CloudWatchModel( data.set_cloudwatch(region_name)) resources.append(collect_resource_info['response_schema']({ 'resource': collect_resource_info['resource']({ 'name': name, 'data': data, 'tags': self.get_resource_tags(getattr(data, 'tags', [])), 'region_code': region_name, 'reference': ReferenceModel(data.reference(region_name)) }) })) except Exception as e: print(f'[ERROR {service_name}] REGION : {region_name} {e}') return resources
def get_resources(self) -> List[BucketResource]: _LOGGER.debug("[get_resources] START: S3") resources = [] start_time = time.time() try: resources.extend(self.set_service_code_in_cloud_service_type()) # merge data for data in self.request_data(): if getattr(data, 'resource_type', None) and data.resource_type == 'inventory.ErrorResource': # Error Resource resources.append(data) else: # This is Global API, yet set up its region for bucket if getattr(data, 'set_cloudwatch', None): data.cloudwatch = CloudWatchModel(data.set_cloudwatch()) bucket_resource = { 'name': data.name, 'data': data, 'account': self.account_id, 'instance_size': float(data.size), 'reference': ReferenceModel(data.reference()) } if data.get('region_name'): bucket_resource.update({ 'region_code': data.get('region_name'), }) resources.append(self.response_schema({'resource': BucketResource(bucket_resource)})) except Exception as e: resource_id = '' resources.append(self.generate_error('global', resource_id, e)) _LOGGER.debug(f'[get_resources] FINISHED: S3 ({time.time() - start_time} sec)') return resources
def get_resources(self): _LOGGER.debug("[get_resources] START: IAM") start_time = time.time() resources = [] policy_errors = [] user_errors = [] resources.extend(self.set_service_code_in_cloud_service_type()) try: policies, policy_errors = self.list_local_managed_policies() users, user_errors = self.request_user_data(policies) for role in self.request_role_data(policies): if getattr(role, 'resource_type', None) and role.resource_type == 'inventory.ErrorResource': # Error Resource resources.append(role) else: resources.append(self.role_response_schema( {'resource': RoleResource({ 'name': role.role_name, 'data': role, 'account': self.account_id, 'reference': ReferenceModel(role.reference()), 'region_code': 'global'})})) for user in users: if getattr(user, 'resource_type', None) and user.resource_type == 'inventory.ErrorResource': # Error Resource resources.append(user) else: resources.append(self.user_response_schema( {'resource': UserResource({ 'name': user.user_name, 'data': user, 'account': self.account_id, 'reference': ReferenceModel(user.reference()), 'region_code': 'global'})})) for group in self.request_group_data(users, policies): if getattr(group, 'resource_type', None) and group.resource_type == 'inventory.ErrorResource': # Error Resource resources.append(group) else: resources.append(self.group_response_schema( {'resource': GroupResource({ 'name': group.group_name, 'data': group, 'account': self.account_id, 'reference': ReferenceModel(group.reference()), 'region_code': 'global'})})) for policy in policies: if getattr(policy, 'resource_type', None) and policy.resource_type == 'inventory.ErrorResource': # Error Resource resources.append(policy) else: resources.append(self.policy_response_schema( {'resource': PolicyResource({ 'name': policy.policy_name, 'data': policy, 'account': self.account_id, 'reference': ReferenceModel(policy.reference()), 'region_code': 'global'})})) for identity_provider in self.request_identity_provider_data(): if getattr(identity_provider, 'resource_type', None) and identity_provider.resource_type == 'inventory.ErrorResource': # Error Resource resources.append(identity_provider) else: resources.append(self.identity_provider_response_schema( {'resource': IdentityProviderResource({ 'name': identity_provider.url, 'data': identity_provider, 'account': self.account_id, 'reference': ReferenceModel(identity_provider.reference()), 'region_code': 'global'})})) except Exception as e: resource_id = '' resources.append(self.generate_error('global', resource_id, e)) resources.extend(policy_errors) resources.extend(user_errors) _LOGGER.debug(f'[get_resources] FINISHED: IAM ({time.time() - start_time} sec)') return resources
def get_redis_data(self, region_name, cache_clusters): self.cloud_service_type = 'Redis' for replication_group in self.describe_replication_groups(): try: replication_group.update({ 'mode': self.set_redis_mode( replication_group.get('ClusterEnabled')), 'engine': 'redis', 'engine_version': self.get_engine_version(replication_group, cache_clusters), 'shard_count': self.get_shard_count( replication_group.get('NodeGroups', [])), 'availability_zones': self.get_redis_availability_zones( replication_group.get('NodeGroups', [])), 'subnet_group_name': self.get_redis_subnet_group_name(replication_group, cache_clusters), 'parameter_group_name': self.get_redis_parameter_group_name( replication_group, cache_clusters), 'node_count': self.get_node_count( replication_group.get('MemberClusters', [])), 'nodes': self.get_redis_nodes_info(replication_group, cache_clusters), 'account_id': self.account_id }) if replication_group.get('mode') == 'Redis': replication_group.update({ 'primary_endpoint': self.get_redis_primary_endpoint(replication_group), 'reader_endpoint': self.get_redis_reader_endpoint(replication_group) }) elif replication_group.get('mode') == 'Clustered Redis': replication_group.update({ 'shards': self.get_redis_shards_info(replication_group, cache_clusters) }) redis_vo = Redis(replication_group, strict=False) yield RedisResource({ 'data': redis_vo, 'name': redis_vo.replication_group_id, 'instance_type': redis_vo.cache_node_type, 'account': self.account_id, 'region_code': region_name, 'reference': ReferenceModel(redis_vo.reference(region_name)) }) except Exception as e: resource_id = replication_group.get('ARN', '') error_resource_response = self.generate_error( region_name, resource_id, e) yield {'data': error_resource_response}
def get_resources(self): print("** IAM START **") start_time = time.time() resources = [] # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) try: policies = self.list_local_managed_policies() users = self.request_user_data(policies) for data in self.request_role_data(policies): resources.append( self.role_response_schema({ 'resource': RoleResource({ 'data': data, 'reference': ReferenceModel(data.reference) }) })) for data in users: resources.append( self.user_response_schema({ 'resource': UserResource({ 'data': data, 'reference': ReferenceModel(data.reference) }) })) for data in self.request_group_data(users, policies): resources.append( self.group_response_schema({ 'resource': GroupResource({ 'data': data, 'reference': ReferenceModel(data.reference) }) })) for data in policies: resources.append( self.policy_response_schema({ 'resource': PolicyResource({ 'data': data, 'reference': ReferenceModel(data.reference) }) })) for data in self.request_identity_provider_data(): resources.append( self.identity_provider_response_schema({ 'resource': IdentityProviderResource({ 'data': data, 'reference': ReferenceModel(data.reference) }) })) except Exception as e: print(traceback.format_exc()) print(f'[ERROR {self.service_name}] {e}') print(f' IAM Finished {time.time() - start_time} Seconds') return resources
def get_resources(self): print("** ELB START **") resources = [] start_time = time.time() # init cloud service type for cst in CLOUD_SERVICE_TYPES: resources.append(cst) for region_name in self.region_names: self.reset_region(region_name) try: # Target Groups raw_tgs = self.request_target_group(region_name) tg_arns = [ raw_tg.get('TargetGroupArn') for raw_tg in raw_tgs if raw_tg.get('TargetGroupArn') ] if len(tg_arns) > 0: all_tags = self.request_tags(tg_arns) for raw_tg in raw_tgs: match_tags = self.search_tags(all_tags, raw_tg.get('TargetGroupArn')) raw_tg.update({ 'region_name': region_name, 'account_id': self.account_id, 'tags': list( map( lambda match_tag: Tags(match_tag, strict=False ), match_tags)) }) target_group = TargetGroup(raw_tg, strict=False) resources.append( self.tg_response_schema({ 'resource': TargetGroupResource({ 'data': target_group, 'reference': ReferenceModel(target_group.reference) }) })) # Load Balancers all_tags = [] raw_lbs = self.request_loadbalancer(region_name) lb_arns = [ raw_lb.get('LoadBalancerArn') for raw_lb in raw_lbs if raw_lb.get('LoadBalancerArn') ] if len(lb_arns) > 0: all_tags = self.request_tags(lb_arns) for raw_lb in raw_lbs: match_tags = self.search_tags( all_tags, raw_lb.get('LoadBalancerArn')) raw_listeners = self.request_listeners( raw_lb.get('LoadBalancerArn')) raw_lb.update({ 'region_name': region_name, 'account_id': self.account_id, 'listeners': list( map( lambda _listener: Listener(_listener, strict=False), raw_listeners)), 'tags': list( map( lambda match_tag: Tags(match_tag, strict=False ), match_tags)) }) load_balancer = LoadBalancer(raw_lb, strict=False) resources.append( self.lb_response_schema({ 'resource': LoadBalancerResource({ 'data': load_balancer, 'reference': ReferenceModel(load_balancer.reference) }) })) except Exception as e: print(f'[ERROR {self.service_name}] {e}') print(f' ELB Finished {time.time() - start_time} Seconds') return resources
def get_resources(self): _LOGGER.debug("[get_resources] START: RDS") resources = [] start_time = time.time() collect_resources = [{ 'request_method': self.instance_request_data, 'resource': InstanceResource, 'response_schema': InstanceResponse }, { 'request_method': self.snapshot_request_data, 'resource': SnapshotResource, 'response_schema': SnapshotResponse }, { 'request_method': self.subnet_group_request_data, 'resource': SubnetGroupResource, 'response_schema': SubnetGroupResponse }, { 'request_method': self.parameter_group_request_data, 'resource': ParameterGroupResource, 'response_schema': ParameterGroupResponse }, { 'request_method': self.option_group_request_data, 'resource': OptionGroupResource, 'response_schema': OptionGroupResponse }] resources.extend(self.set_service_code_in_cloud_service_type()) for region_name in self.region_names: self.reset_region(region_name) try: # For Database for database_vo, resource, identifier in self.db_cluster_data( region_name): if getattr( database_vo, 'resource_type', None ) and database_vo.resource_type == 'inventory.ErrorResource': # Error Resource resources.append(database_vo) else: if getattr(database_vo, 'set_cloudwatch', None): database_vo.cloudwatch = CloudWatchModel( database_vo.set_cloudwatch(region_name)) resources.append( DatabaseResponse({ 'resource': resource({ 'name': identifier, 'data': database_vo, 'instance_type': database_vo.engine, 'tags': [{ 'key': tag.key, 'value': tag.value } for tag in database_vo.tags], 'region_code': region_name, 'account': self.account_id, 'reference': ReferenceModel( database_vo.reference(region_name)) }) })) except Exception as e: resource_id = '' resources.append( self.generate_error(region_name, resource_id, e)) # For All except Database for collect_resource in collect_resources: resources.extend( self.collect_data_by_region(self.service_name, region_name, collect_resource)) _LOGGER.debug( f'[get_resources] FINISHED: RDS ({time.time() - start_time} sec)') return resources