def __init__(self, region_name): self.region_name = region_name self.groups = dict() # { logGroupName: LogGroup} self.filters = MetricFilters() self.queries = dict() self.resource_policies = dict()
class LogsBackend(BaseBackend): def __init__(self, region_name): self.region_name = region_name self.groups = dict() # { logGroupName: LogGroup} self.filters = MetricFilters() self.queries = dict() self.resource_policies = dict() def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) def create_log_group(self, log_group_name, tags, **kwargs): if log_group_name in self.groups: raise ResourceAlreadyExistsException() if len(log_group_name) > 512: raise InvalidParameterException( constraint="Member must have length less than or equal to 512", parameter="logGroupName", value=log_group_name, ) self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags, **kwargs) return self.groups[log_group_name] def ensure_log_group(self, log_group_name, tags): if log_group_name in self.groups: return self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def delete_log_group(self, log_group_name): if log_group_name not in self.groups: raise ResourceNotFoundException() del self.groups[log_group_name] def describe_log_groups(self, limit, log_group_name_prefix, next_token): if limit > 50: raise InvalidParameterException( constraint="Member must have value less than or equal to 50", parameter="limit", value=limit, ) if log_group_name_prefix is None: log_group_name_prefix = "" groups = [ group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix) ] groups = sorted(groups, key=lambda x: x["logGroupName"]) index_start = 0 if next_token: try: index_start = (next(index for (index, d) in enumerate(groups) if d["logGroupName"] == next_token) + 1) except StopIteration: index_start = 0 # AWS returns an empty list if it receives an invalid token. groups = [] index_end = index_start + limit if index_end > len(groups): index_end = len(groups) groups_page = groups[index_start:index_end] next_token = None if groups_page and index_end < len(groups): next_token = groups_page[-1]["logGroupName"] return groups_page, next_token def create_log_stream(self, log_group_name, log_stream_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.create_log_stream(log_stream_name) def delete_log_stream(self, log_group_name, log_stream_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.delete_log_stream(log_stream_name) def describe_log_streams( self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by, ): if log_group_name not in self.groups: raise ResourceNotFoundException() if limit > 50: raise InvalidParameterException( constraint="Member must have value less than or equal to 50", parameter="limit", value=limit, ) if order_by not in ["LogStreamName", "LastEventTime"]: raise InvalidParameterException( constraint= "Member must satisfy enum value set: [LogStreamName, LastEventTime]", parameter="orderBy", value=order_by, ) if order_by == "LastEventTime" and log_stream_name_prefix: raise InvalidParameterException( msg="Cannot order by LastEventTime with a logStreamNamePrefix." ) log_group = self.groups[log_group_name] return log_group.describe_log_streams( descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by, ) def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): # TODO: add support for sequence_tokens if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) def get_log_events( self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head, ): if log_group_name not in self.groups: raise ResourceNotFoundException() if limit and limit > 1000: raise InvalidParameterException( constraint="Member must have value less than or equal to 10000", parameter="limit", value=limit, ) log_group = self.groups[log_group_name] return log_group.get_log_events( log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head, ) def filter_log_events( self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved, ): if log_group_name not in self.groups: raise ResourceNotFoundException() if limit and limit > 1000: raise InvalidParameterException( constraint="Member must have value less than or equal to 10000", parameter="limit", value=limit, ) log_group = self.groups[log_group_name] return log_group.filter_log_events( log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved, ) def put_retention_policy(self, log_group_name, retention_in_days): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.set_retention_policy(retention_in_days) def delete_retention_policy(self, log_group_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.set_retention_policy(None) def describe_resource_policies(self, next_token, limit): # pylint: disable=unused-argument """Return list of resource policies. The next_token and limit arguments are ignored. The maximum number of resource policies per region is a small number (less than 50), so pagination isn't needed. """ limit = limit or MAX_RESOURCE_POLICIES_PER_REGION policies = [] for policy_name, policy_info in self.resource_policies.items(): policies.append({ "policyName": policy_name, "policyDocument": policy_info["policyDocument"], "lastUpdatedTime": policy_info["lastUpdatedTime"], }) return policies def put_resource_policy(self, policy_name, policy_doc): """Create resource policy and return dict of policy name and doc.""" if len(self.resource_policies) == MAX_RESOURCE_POLICIES_PER_REGION: raise LimitExceededException() policy = { "policyName": policy_name, "policyDocument": policy_doc, "lastUpdatedTime": int(unix_time_millis()), } self.resource_policies[policy_name] = policy return {"resourcePolicy": policy} def delete_resource_policy(self, policy_name): """Remove resource policy with a policy name matching given name.""" if policy_name not in self.resource_policies: raise ResourceNotFoundException( msg=f"Policy with name [{policy_name}] does not exist") del self.resource_policies[policy_name] return "" def list_tags_log_group(self, log_group_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.list_tags() def tag_log_group(self, log_group_name, tags): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] log_group.tag(tags) def untag_log_group(self, log_group_name, tags): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] log_group.untag(tags) def put_metric_filter(self, filter_name, filter_pattern, log_group_name, metric_transformations): self.filters.add_filter(filter_name, filter_pattern, log_group_name, metric_transformations) def describe_metric_filters(self, prefix=None, log_group_name=None, metric_name=None, metric_namespace=None): filters = self.filters.get_matching_filters(prefix, log_group_name, metric_name, metric_namespace) return filters def delete_metric_filter(self, filter_name=None, log_group_name=None): self.filters.delete_filter(filter_name, log_group_name) def describe_subscription_filters(self, log_group_name): log_group = self.groups.get(log_group_name) if not log_group: raise ResourceNotFoundException() return log_group.describe_subscription_filters() def put_subscription_filter(self, log_group_name, filter_name, filter_pattern, destination_arn, role_arn): log_group = self.groups.get(log_group_name) if not log_group: raise ResourceNotFoundException() service = destination_arn.split(":")[2] if service == "lambda": from moto.awslambda import ( # pylint: disable=import-outside-toplevel lambda_backends, ) lambda_func = lambda_backends[self.region_name].get_function( destination_arn) # no specific permission check implemented if not lambda_func: raise InvalidParameterException( "Could not execute the lambda function. Make sure you " "have given CloudWatch Logs permission to execute your " "function.") elif service == "firehose": from moto.firehose import ( # pylint: disable=import-outside-toplevel firehose_backends, ) firehose = firehose_backends[ self.region_name].lookup_name_from_arn(destination_arn) if not firehose: raise InvalidParameterException( "Could not deliver test message to specified Firehose " "stream. Check if the given Firehose stream is in ACTIVE " "state.") else: # TODO: support Kinesis stream destinations raise InvalidParameterException( f"Service '{service}' has not implemented for " f"put_subscription_filter()") log_group.put_subscription_filter(filter_name, filter_pattern, destination_arn, role_arn) def delete_subscription_filter(self, log_group_name, filter_name): log_group = self.groups.get(log_group_name) if not log_group: raise ResourceNotFoundException() log_group.delete_subscription_filter(filter_name) def start_query(self, log_group_names, start_time, end_time, query_string): for log_group_name in log_group_names: if log_group_name not in self.groups: raise ResourceNotFoundException() query_id = uuid.uuid1() self.queries[query_id] = LogQuery(query_id, start_time, end_time, query_string) return query_id
class LogsBackend(BaseBackend): def __init__(self, region_name): self.region_name = region_name self.groups = dict() # { logGroupName: LogGroup} self.filters = MetricFilters() self.queries = dict() self.resource_policies = dict() def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) @staticmethod def default_vpc_endpoint_service(service_region, zones): """Default VPC endpoint service.""" return BaseBackend.default_vpc_endpoint_service_factory( service_region, zones, "logs") def create_log_group(self, log_group_name, tags, **kwargs): if log_group_name in self.groups: raise ResourceAlreadyExistsException() if len(log_group_name) > 512: raise InvalidParameterException( constraint="Member must have length less than or equal to 512", parameter="logGroupName", value=log_group_name, ) self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags, **kwargs) return self.groups[log_group_name] def ensure_log_group(self, log_group_name, tags): if log_group_name in self.groups: return self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def delete_log_group(self, log_group_name): if log_group_name not in self.groups: raise ResourceNotFoundException() del self.groups[log_group_name] @paginate(pagination_model=PAGINATION_MODEL) def describe_log_groups(self, log_group_name_prefix=None, limit=None, next_token=None): if log_group_name_prefix is None: log_group_name_prefix = "" groups = [ group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix) ] groups = sorted(groups, key=lambda x: x["logGroupName"]) return groups def create_log_stream(self, log_group_name, log_stream_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.create_log_stream(log_stream_name) def delete_log_stream(self, log_group_name, log_stream_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.delete_log_stream(log_stream_name) def describe_log_streams( self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by, ): if log_group_name not in self.groups: raise ResourceNotFoundException() if limit > 50: raise InvalidParameterException( constraint="Member must have value less than or equal to 50", parameter="limit", value=limit, ) if order_by not in ["LogStreamName", "LastEventTime"]: raise InvalidParameterException( constraint= "Member must satisfy enum value set: [LogStreamName, LastEventTime]", parameter="orderBy", value=order_by, ) if order_by == "LastEventTime" and log_stream_name_prefix: raise InvalidParameterException( msg="Cannot order by LastEventTime with a logStreamNamePrefix." ) log_group = self.groups[log_group_name] return log_group.describe_log_streams( descending=descending, limit=limit, log_group_name=log_group_name, log_stream_name_prefix=log_stream_name_prefix, next_token=next_token, order_by=order_by, ) def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): # TODO: add support for sequence_tokens if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] # Only events from the last 14 days or 2 hours in the future are accepted rejected_info = {} allowed_events = [] last_timestamp = None oldest = int(unix_time_millis(datetime.utcnow() - timedelta(days=14))) newest = int(unix_time_millis(datetime.utcnow() + timedelta(hours=2))) for idx, event in enumerate(log_events): if last_timestamp and last_timestamp > event["timestamp"]: raise InvalidParameterException( "Log events in a single PutLogEvents request must be in chronological order." ) if event["timestamp"] < oldest: rejected_info["tooOldLogEventEndIndex"] = idx elif event["timestamp"] > newest: rejected_info["tooNewLogEventStartIndex"] = idx else: allowed_events.append(event) last_timestamp = event["timestamp"] token = log_group.put_log_events(log_group_name, log_stream_name, allowed_events, sequence_token) return token, rejected_info def get_log_events( self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head, ): if log_group_name not in self.groups: raise ResourceNotFoundException() if limit and limit > 1000: raise InvalidParameterException( constraint="Member must have value less than or equal to 10000", parameter="limit", value=limit, ) log_group = self.groups[log_group_name] return log_group.get_log_events( log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head, ) def filter_log_events( self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved, ): if log_group_name not in self.groups: raise ResourceNotFoundException() if limit and limit > 1000: raise InvalidParameterException( constraint="Member must have value less than or equal to 10000", parameter="limit", value=limit, ) log_group = self.groups[log_group_name] return log_group.filter_log_events( log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved, ) def put_retention_policy(self, log_group_name, retention_in_days): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.set_retention_policy(retention_in_days) def delete_retention_policy(self, log_group_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.set_retention_policy(None) def describe_resource_policies(self, next_token, limit): # pylint: disable=unused-argument """Return list of resource policies. The next_token and limit arguments are ignored. The maximum number of resource policies per region is a small number (less than 50), so pagination isn't needed. """ limit = limit or MAX_RESOURCE_POLICIES_PER_REGION return list(self.resource_policies.values()) def put_resource_policy(self, policy_name, policy_doc): """Creates/updates resource policy and return policy object""" if policy_name in self.resource_policies: policy = self.resource_policies[policy_name] policy.update(policy_doc) return policy if len(self.resource_policies) == MAX_RESOURCE_POLICIES_PER_REGION: raise LimitExceededException() policy = LogResourcePolicy(policy_name, policy_doc) self.resource_policies[policy_name] = policy return policy def delete_resource_policy(self, policy_name): """Remove resource policy with a policy name matching given name.""" if policy_name not in self.resource_policies: raise ResourceNotFoundException( msg=f"Policy with name [{policy_name}] does not exist") del self.resource_policies[policy_name] return "" def list_tags_log_group(self, log_group_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.list_tags() def tag_log_group(self, log_group_name, tags): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] log_group.tag(tags) def untag_log_group(self, log_group_name, tags): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] log_group.untag(tags) def put_metric_filter(self, filter_name, filter_pattern, log_group_name, metric_transformations): self.filters.add_filter(filter_name, filter_pattern, log_group_name, metric_transformations) def describe_metric_filters(self, prefix=None, log_group_name=None, metric_name=None, metric_namespace=None): filters = self.filters.get_matching_filters(prefix, log_group_name, metric_name, metric_namespace) return filters def delete_metric_filter(self, filter_name=None, log_group_name=None): self.filters.delete_filter(filter_name, log_group_name) def describe_subscription_filters(self, log_group_name): log_group = self.groups.get(log_group_name) if not log_group: raise ResourceNotFoundException() return log_group.describe_subscription_filters() def put_subscription_filter(self, log_group_name, filter_name, filter_pattern, destination_arn, role_arn): log_group = self.groups.get(log_group_name) if not log_group: raise ResourceNotFoundException() service = destination_arn.split(":")[2] if service == "lambda": from moto.awslambda import ( # pylint: disable=import-outside-toplevel lambda_backends, ) lambda_func = lambda_backends[self.region_name].get_function( destination_arn) # no specific permission check implemented if not lambda_func: raise InvalidParameterException( "Could not execute the lambda function. Make sure you " "have given CloudWatch Logs permission to execute your " "function.") elif service == "firehose": from moto.firehose import ( # pylint: disable=import-outside-toplevel firehose_backends, ) firehose = firehose_backends[ self.region_name].lookup_name_from_arn(destination_arn) if not firehose: raise InvalidParameterException( "Could not deliver test message to specified Firehose " "stream. Check if the given Firehose stream is in ACTIVE " "state.") else: # TODO: support Kinesis stream destinations raise InvalidParameterException( f"Service '{service}' has not implemented for " f"put_subscription_filter()") log_group.put_subscription_filter(filter_name, filter_pattern, destination_arn, role_arn) def delete_subscription_filter(self, log_group_name, filter_name): log_group = self.groups.get(log_group_name) if not log_group: raise ResourceNotFoundException() log_group.delete_subscription_filter(filter_name) def start_query(self, log_group_names, start_time, end_time, query_string): for log_group_name in log_group_names: if log_group_name not in self.groups: raise ResourceNotFoundException() query_id = uuid.uuid1() self.queries[query_id] = LogQuery(query_id, start_time, end_time, query_string) return query_id
def __init__(self, region_name): self.region_name = region_name self.groups = dict() # { logGroupName: LogGroup} self.filters = MetricFilters()
class LogsBackend(BaseBackend): def __init__(self, region_name): self.region_name = region_name self.groups = dict() # { logGroupName: LogGroup} self.filters = MetricFilters() def put_metric_filter(self, filter_name, filter_pattern, log_group_name, metric_transformations): self.filters.add_filter(filter_name, filter_pattern, log_group_name, metric_transformations) def describe_metric_filters(self, prefix=None, log_group_name=None): filters = self.filters.get_matching_filters(prefix, log_group_name) return filters def delete_metric_filter(self, filter_name=None, log_group_name=None): self.filters.delete_filter(filter_name, log_group_name) def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) def create_log_group(self, log_group_name, tags): if log_group_name in self.groups: raise ResourceAlreadyExistsException() self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def ensure_log_group(self, log_group_name, tags): if log_group_name in self.groups: return self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def delete_log_group(self, log_group_name): if log_group_name not in self.groups: raise ResourceNotFoundException() del self.groups[log_group_name] def describe_log_groups(self, limit, log_group_name_prefix, next_token): if log_group_name_prefix is None: log_group_name_prefix = "" if next_token is None: next_token = 0 groups = [ group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix) ] groups = sorted(groups, key=lambda x: x["creationTime"], reverse=True) groups_page = groups[next_token:next_token + limit] next_token += limit if next_token >= len(groups): next_token = None return groups_page, next_token def create_log_stream(self, log_group_name, log_stream_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.create_log_stream(log_stream_name) def delete_log_stream(self, log_group_name, log_stream_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.delete_log_stream(log_stream_name) def describe_log_streams( self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by, ): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.describe_log_streams( descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by, ) def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): # TODO: add support for sequence_tokens if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) def get_log_events( self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head, ): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.get_log_events( log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head, ) def filter_log_events( self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved, ): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.filter_log_events( log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved, ) def put_retention_policy(self, log_group_name, retention_in_days): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.set_retention_policy(retention_in_days) def delete_retention_policy(self, log_group_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.set_retention_policy(None) def list_tags_log_group(self, log_group_name): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.list_tags() def tag_log_group(self, log_group_name, tags): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] log_group.tag(tags) def untag_log_group(self, log_group_name, tags): if log_group_name not in self.groups: raise ResourceNotFoundException() log_group = self.groups[log_group_name] log_group.untag(tags)
def __init__(self, region_name, account_id): super().__init__(region_name, account_id) self.groups = dict() # { logGroupName: LogGroup} self.filters = MetricFilters() self.queries = dict() self.resource_policies = dict()