class ProxyListenerEvents(ProxyListener): svc = TaggingService() def forward_request(self, method, path, data, headers): if method == 'OPTIONS': return 200 action = headers.get('X-Amz-Target') if method == 'POST' and path == '/': parsed_data = json.loads(to_str(data)) if action == 'AWSEvents.PutRule': return handle_put_rule(parsed_data) elif action == 'AWSEvents.DeleteRule': handle_delete_rule(rule_name=parsed_data.get('Name', None)) elif action == 'AWSEvents.ListTagsForResource': return self.svc.list_tags_for_resource( parsed_data['ResourceARN']) or {} elif action == 'AWSEvents.TagResource': self.svc.tag_resource(parsed_data['ResourceARN'], parsed_data['Tags']) return {} elif action == 'AWSEvents.UntagResource': self.svc.untag_resource(parsed_data['ResourceARN'], parsed_data['TagKeys']) return {} return True def return_response(self, method, path, data, headers, response, request_handler=None): if response.content: # fix hardcoded account ID in ARNs returned from this API _fix_account_id(response) # fix dates returned from this API (fixes an issue with Terraform) _fix_date_format(response) # fix content-length header response.headers['content-length'] = len(response._content)
class ProxyListenerEvents(ProxyListener): svc = TaggingService() def forward_request(self, method, path, data, headers): if method == "OPTIONS": return 200 if method == "POST" and path == "/": action = headers.get("X-Amz-Target", "").split(".")[-1] parsed_data = json.loads(to_str(data)) if action == "PutRule": return handle_put_rule(parsed_data) elif action == "DeleteRule": handle_delete_rule(rule_name=parsed_data.get("Name", None)) elif action == "DisableRule": handle_disable_rule(rule_name=parsed_data.get("Name", None)) elif action == "PutEvents": # keep track of events for local integration testing if os.environ.get(ENV_INTERNAL_TEST_RUN): TEST_EVENTS_CACHE.extend(parsed_data.get("Entries", [])) return True def return_response(self, method, path, data, headers, response, request_handler=None): if response.content: # fix hardcoded account ID in ARNs returned from this API fix_account_id(response) # fix dates returned from this API (fixes an issue with Terraform) fix_date_format(response) # fix Content-Length header response.headers["Content-Length"] = len(response._content)
class ProxyListenerEvents(ProxyListener): svc = TaggingService() def forward_request(self, method, path, data, headers): if method == "OPTIONS": return 200 action = headers.get("X-Amz-Target") if method == "POST" and path == "/": parsed_data = json.loads(to_str(data)) if action == "AWSEvents.PutRule": return handle_put_rule(parsed_data) elif action == "AWSEvents.DeleteRule": handle_delete_rule(rule_name=parsed_data.get("Name", None)) elif action == "AWSEvents.DisableRule": handle_disable_rule(rule_name=parsed_data.get("Name", None)) return True def return_response(self, method, path, data, headers, response, request_handler=None): if response.content: # fix hardcoded account ID in ARNs returned from this API fix_account_id(response) # fix dates returned from this API (fixes an issue with Terraform) fix_date_format(response) # fix Content-Length header response.headers["Content-Length"] = len(response._content)
API_PREFIX = '/2015-01-01' DEFAULT_ES_VERSION = '7.7' ES_DOMAINS = {} DEFAULT_ES_CLUSTER_CONFIG = { 'InstanceType': 'm3.medium.elasticsearch', 'InstanceCount': 1, 'DedicatedMasterEnabled': True, 'ZoneAwarenessEnabled': False, 'DedicatedMasterType': 'm3.medium.elasticsearch', 'DedicatedMasterCount': 1 } TAGS = TaggingService() app = Flask(APP_NAME) app.url_map.strict_slashes = False def error_response(error_type, code=400, message='Unknown error.'): if not message: if error_type == 'ResourceNotFoundException': message = 'Resource not found.' elif error_type == 'ResourceAlreadyExistsException': message = 'Resource already exists.' response = make_response(jsonify({'error': message})) response.headers['x-amzn-errortype'] = error_type return response, code
class ProxyListenerEvents(ProxyListener): svc = TaggingService() def forward_request(self, method, path, data, headers): action = headers.get('X-Amz-Target') if method == 'POST' and path == '/': parsed_data = json.loads(to_str(data)) if action == 'AWSEvents.PutEvents': events_with_added_uuid = list( map( lambda event: { 'event': event, 'uuid': str(uuid.uuid4()) }, parsed_data['Entries'])) content = { 'Entries': list( map(lambda event: {'EventId': event['uuid']}, events_with_added_uuid)) } self._create_and_register_temp_dir() self._dump_events_to_files(events_with_added_uuid) return make_response(content) elif action == 'AWSEvents.ListTagsForResource': return make_response( self.svc.list_tags_for_resource( parsed_data['ResourceARN'])) elif action == 'AWSEvents.TagResource': self.svc.tag_resource(parsed_data['ResourceARN'], parsed_data['Tags']) return make_response() elif action == 'AWSEvents.UntagResource': self.svc.untag_resource(parsed_data['ResourceARN'], parsed_data['TagKeys']) return make_response() if method == 'OPTIONS': return 200 return True def return_response(self, method, path, data, headers, response, request_handler=None): if response.content: # fix hardcoded account ID in ARNs returned from this API self._fix_account_id(response) # fix dates returned from this API (fixes an issue with Terraform) self._fix_date_format(response) # fix content-length header response.headers['content-length'] = len(response._content) def _create_and_register_temp_dir(self): if EVENTS_TMP_DIR not in TMP_FILES: mkdir(EVENTS_TMP_DIR) TMP_FILES.append(EVENTS_TMP_DIR) def _dump_events_to_files(self, events_with_added_uuid): current_time_millis = int(round(time.time() * 1000)) for event in events_with_added_uuid: save_file( os.path.join(EVENTS_TMP_DIR, '%s_%s' % (current_time_millis, event['uuid'])), json.dumps(event['event'])) def _fix_date_format(self, response): """ Normalize date to format '2019-06-13T18:10:09.1234Z' """ pattern = r'<CreateDate>([^<]+) ([^<+]+)(\+[^<]*)?</CreateDate>' replacement = r'<CreateDate>\1T\2Z</CreateDate>' self._replace(response, pattern, replacement) def _fix_account_id(self, response): return aws_stack.fix_account_id_in_arns(response, existing=MOTO_ACCOUNT_ID, replace=TEST_AWS_ACCOUNT_ID) def _replace(self, response, pattern, replacement): content = to_str(response.content) response._content = re.sub(pattern, replacement, content)
def __init__(self): self.tags = TaggingService()
class CloudwatchProvider(CloudwatchApi, ServiceLifecycleHook): """ Cloudwatch provider. LIMITATIONS: - no alarm rule evaluation """ def __init__(self): self.tags = TaggingService() def on_after_init(self): ROUTER.add(PATH_GET_RAW_METRICS, self.get_raw_metrics) def get_raw_metrics(self, request: Request): region = aws_stack.extract_region_from_auth_header(request.headers) backend = cloudwatch_backends.get(region) if backend: result = backend.metric_data else: result = [] result = [{ "ns": r.namespace, "n": r.name, "v": r.value, "t": r.timestamp, "d": [{ "n": d.name, "v": d.value } for d in r.dimensions], } for r in result] return {"metrics": result} def list_tags_for_resource( self, context: RequestContext, resource_arn: AmazonResourceName) -> ListTagsForResourceOutput: tags = self.tags.list_tags_for_resource(resource_arn) return ListTagsForResourceOutput(Tags=tags.get("Tags", [])) def untag_resource(self, context: RequestContext, resource_arn: AmazonResourceName, tag_keys: TagKeyList) -> UntagResourceOutput: self.tags.untag_resource(resource_arn, tag_keys) return UntagResourceOutput() def tag_resource(self, context: RequestContext, resource_arn: AmazonResourceName, tags: TagList) -> TagResourceOutput: self.tags.tag_resource(resource_arn, tags) return TagResourceOutput() @handler("PutMetricAlarm", expand=False) def put_metric_alarm( self, context: RequestContext, request: PutMetricAlarmInput, ) -> None: moto.call_moto(context) name = request.get("AlarmName") arn = aws_stack.cloudwatch_alarm_arn(name) self.tags.tag_resource(arn, request.get("Tags")) @handler("PutCompositeAlarm", expand=False) def put_composite_alarm( self, context: RequestContext, request: PutCompositeAlarmInput, ) -> None: pass backend = cloudwatch_backends[context.region] backend.put_metric_alarm( name=request.get("AlarmName"), namespace=None, metric_name=None, metric_data_queries=None, comparison_operator=None, evaluation_periods=None, datapoints_to_alarm=None, period=None, threshold=None, statistic=None, extended_statistic=None, description=request.get("AlarmDescription"), dimensions=[], alarm_actions=request.get("AlarmActions", []), ok_actions=request.get("OKActions", []), insufficient_data_actions=request.get("InsufficientDataActions", []), unit=None, actions_enabled=request.get("ActionsEnabled"), treat_missing_data=None, evaluate_low_sample_count_percentile=None, threshold_metric_id=None, rule=request.get("AlarmRule"), tags=request.get("Tags", []), )
class LambdaServiceBackend(RegionBackend): # name => Function; Account/region are implicit through the Backend functions: Dict[str, Function] = {} # static tagging service instance TAGS = TaggingService()
def __init__(self): self.tags = TaggingService() self.alarm_scheduler = None
class CloudwatchProvider(CloudwatchApi, ServiceLifecycleHook): """ Cloudwatch provider. LIMITATIONS: - no alarm rule evaluation """ def __init__(self): self.tags = TaggingService() self.alarm_scheduler = None def on_after_init(self): ROUTER.add(PATH_GET_RAW_METRICS, self.get_raw_metrics) self.alarm_scheduler = AlarmScheduler() def on_before_start(self): # re-schedule alarms for persistence use-case def restart_alarms(*args): poll_condition(lambda: SERVICE_PLUGINS.is_running("cloudwatch")) self.alarm_scheduler.restart_existing_alarms() start_worker_thread(restart_alarms) def on_before_stop(self): self.alarm_scheduler.shutdown_scheduler() def delete_alarms(self, context: RequestContext, alarm_names: AlarmNames) -> None: moto.call_moto(context) for alarm_name in alarm_names: arn = aws_stack.cloudwatch_alarm_arn(alarm_name) self.alarm_scheduler.delete_scheduler_for_alarm(arn) def get_raw_metrics(self, request: Request): region = aws_stack.extract_region_from_auth_header(request.headers) backend = cloudwatch_backends.get(region) if backend: result = backend.metric_data else: result = [] result = [{ "ns": r.namespace, "n": r.name, "v": r.value, "t": r.timestamp, "d": [{ "n": d.name, "v": d.value } for d in r.dimensions], } for r in result] return {"metrics": result} def list_tags_for_resource( self, context: RequestContext, resource_arn: AmazonResourceName) -> ListTagsForResourceOutput: tags = self.tags.list_tags_for_resource(resource_arn) return ListTagsForResourceOutput(Tags=tags.get("Tags", [])) def untag_resource(self, context: RequestContext, resource_arn: AmazonResourceName, tag_keys: TagKeyList) -> UntagResourceOutput: self.tags.untag_resource(resource_arn, tag_keys) return UntagResourceOutput() def tag_resource(self, context: RequestContext, resource_arn: AmazonResourceName, tags: TagList) -> TagResourceOutput: self.tags.tag_resource(resource_arn, tags) return TagResourceOutput() @handler("PutMetricAlarm", expand=False) def put_metric_alarm( self, context: RequestContext, request: PutMetricAlarmInput, ) -> None: # missing will be the default, when not set (but it will not explicitly be set) if not request.get("TreatMissingData", "missing") in [ "breaching", "notBreaching", "ignore", "missing", ]: raise ValidationError( f"The value {request['TreatMissingData']} is not supported for TreatMissingData parameter. Supported values are [breaching, notBreaching, ignore, missing]." ) # do some sanity checks: if request.get("Period"): # Valid values are 10, 30, and any multiple of 60. value = request.get("Period") if value not in (10, 30): if value % 60 != 0: raise ValidationError( "Period must be 10, 30 or a multiple of 60") if request.get("Statistic"): if not request.get("Statistic") in [ "SampleCount", "Average", "Sum", "Minimum", "Maximum", ]: raise ValidationError( f"Value '{request.get('Statistic')}' at 'statistic' failed to satisfy constraint: Member must satisfy enum value set: [Maximum, SampleCount, Sum, Minimum, Average]" ) moto.call_moto(context) name = request.get("AlarmName") arn = aws_stack.cloudwatch_alarm_arn(name) self.tags.tag_resource(arn, request.get("Tags")) self.alarm_scheduler.schedule_metric_alarm(arn) @handler("PutCompositeAlarm", expand=False) def put_composite_alarm( self, context: RequestContext, request: PutCompositeAlarmInput, ) -> None: backend = cloudwatch_backends[context.region] backend.put_metric_alarm( name=request.get("AlarmName"), namespace=None, metric_name=None, metric_data_queries=None, comparison_operator=None, evaluation_periods=None, datapoints_to_alarm=None, period=None, threshold=None, statistic=None, extended_statistic=None, description=request.get("AlarmDescription"), dimensions=[], alarm_actions=request.get("AlarmActions", []), ok_actions=request.get("OKActions", []), insufficient_data_actions=request.get("InsufficientDataActions", []), unit=None, actions_enabled=request.get("ActionsEnabled"), treat_missing_data=None, evaluate_low_sample_count_percentile=None, threshold_metric_id=None, rule=request.get("AlarmRule"), tags=request.get("Tags", []), ) LOG.warning( "Composite Alarms configuration is not yet supported, alarm state will not be evaluated" )