class Shard(BaseModel): def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() @property def shard_id(self): return "shardId-{0}".format(str(self._shard_id).zfill(12)) def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number if len(results) == limit: break return results, last_sequence_id def put_record(self, partition_key, data, explicit_hash_key): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record(partition_key, data, sequence_number, explicit_hash_key) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def to_json(self): return { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash) }, "SequenceNumberRange": { "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id }
class Shard(BaseModel): def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() @property def shard_id(self): return "shardId-{0}".format(str(self._shard_id).zfill(12)) def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number if len(results) == limit: break return results, last_sequence_id def put_record(self, partition_key, data, explicit_hash_key): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record( partition_key, data, sequence_number, explicit_hash_key) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def to_json(self): return { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash) }, "SequenceNumberRange": { "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id }
class SNSBackend(BaseBackend): def __init__(self): self.topics = OrderedDict() self.subscriptions = OrderedDict() def create_topic(self, name): topic = Topic(name, self) self.topics[topic.arn] = topic return topic def _get_values_nexttoken(self, values_map, next_token=None): if next_token is None: next_token = 0 next_token = int(next_token) values = list(values_map.values())[next_token:next_token + DEFAULT_PAGE_SIZE] if len(values) == DEFAULT_PAGE_SIZE: next_token = next_token + DEFAULT_PAGE_SIZE else: next_token = None return values, next_token def list_topics(self, next_token=None): return self._get_values_nexttoken(self.topics, next_token) def delete_topic(self, arn): self.topics.pop(arn) def get_topic(self, arn): return self.topics[arn] def set_topic_attribute(self, topic_arn, attribute_name, attribute_value): topic = self.get_topic(topic_arn) setattr(topic, attribute_name, attribute_value) def subscribe(self, topic_arn, endpoint, protocol): topic = self.get_topic(topic_arn) subscription = Subscription(topic, endpoint, protocol) self.subscriptions[subscription.arn] = subscription return subscription def unsubscribe(self, subscription_arn): self.subscriptions.pop(subscription_arn) def list_subscriptions(self, topic_arn=None, next_token=None): if topic_arn: topic = self.get_topic(topic_arn) filtered = OrderedDict([(k, sub) for k, sub in self.subscriptions.items() if sub.topic == topic]) return self._get_values_nexttoken(filtered, next_token) else: return self._get_values_nexttoken(self.subscriptions, next_token) def publish(self, topic_arn, message): topic = self.get_topic(topic_arn) message_id = topic.publish(message) return message_id
class SNSBackend(BaseBackend): def __init__(self): self.topics = OrderedDict() self.subscriptions = OrderedDict() def create_topic(self, name): topic = Topic(name, self) self.topics[topic.arn] = topic return topic def _get_values_nexttoken(self, values_map, next_token=None): if next_token is None: next_token = 0 next_token = int(next_token) values = list(values_map.values())[next_token: next_token + DEFAULT_PAGE_SIZE] if len(values) == DEFAULT_PAGE_SIZE: next_token = next_token + DEFAULT_PAGE_SIZE else: next_token = None return values, next_token def list_topics(self, next_token=None): return self._get_values_nexttoken(self.topics, next_token) def delete_topic(self, arn): self.topics.pop(arn) def get_topic(self, arn): return self.topics[arn] def set_topic_attribute(self, topic_arn, attribute_name, attribute_value): topic = self.get_topic(topic_arn) setattr(topic, attribute_name, attribute_value) def subscribe(self, topic_arn, endpoint, protocol): topic = self.get_topic(topic_arn) subscription = Subscription(topic, endpoint, protocol) self.subscriptions[subscription.arn] = subscription return subscription def unsubscribe(self, subscription_arn): self.subscriptions.pop(subscription_arn) def list_subscriptions(self, topic_arn=None, next_token=None): if topic_arn: topic = self.get_topic(topic_arn) filtered = OrderedDict([(k, sub) for k, sub in self.subscriptions.items() if sub.topic == topic]) return self._get_values_nexttoken(filtered, next_token) else: return self._get_values_nexttoken(self.subscriptions, next_token) def publish(self, topic_arn, message): topic = self.get_topic(topic_arn) message_id = topic.publish(message) return message_id
class Shard(object): def __init__(self, shard_id): self.shard_id = shard_id self.records = OrderedDict() def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number if len(results) == limit: break return results, last_sequence_id def put_record(self, partition_key, data): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record(partition_key, data, sequence_number) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def to_json(self): return { "HashKeyRange": { "EndingHashKey": "113427455640312821154458202477256070484", "StartingHashKey": "0" }, "SequenceNumberRange": { "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id }
class SNSBackend(BaseBackend): def __init__(self, region_name): super(SNSBackend, self).__init__() self.topics = OrderedDict() self.subscriptions = OrderedDict() self.applications = {} self.platform_endpoints = {} self.region_name = region_name self.sms_attributes = {} self.sms_messages = OrderedDict() self.opt_out_numbers = [ "+447420500600", "+447420505401", "+447632960543", "+447632960028", "+447700900149", "+447700900550", "+447700900545", "+447700900907", ] def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) def update_sms_attributes(self, attrs): self.sms_attributes.update(attrs) def create_topic(self, name, attributes=None, tags=None): if attributes is None: attributes = {} if ( attributes.get("FifoTopic") and attributes.get("FifoTopic").lower() == "true" ): fails_constraints = not re.match(r"^[a-zA-Z0-9_-]{1,256}\.fifo$", name) msg = "Fifo Topic names must end with .fifo and must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long." else: fails_constraints = not re.match(r"^[a-zA-Z0-9_-]{1,256}$", name) msg = "Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long." if fails_constraints: raise InvalidParameterValue(msg) candidate_topic = Topic(name, self) if attributes: for attribute in attributes: setattr( candidate_topic, camelcase_to_underscores(attribute), attributes[attribute], ) if tags: candidate_topic._tags = tags if candidate_topic.arn in self.topics: return self.topics[candidate_topic.arn] else: self.topics[candidate_topic.arn] = candidate_topic return candidate_topic def _get_values_nexttoken(self, values_map, next_token=None): if next_token is None or not next_token: next_token = 0 next_token = int(next_token) values = list(values_map.values())[next_token : next_token + DEFAULT_PAGE_SIZE] if len(values) == DEFAULT_PAGE_SIZE: next_token = next_token + DEFAULT_PAGE_SIZE else: next_token = None return values, next_token def _get_topic_subscriptions(self, topic): return [sub for sub in self.subscriptions.values() if sub.topic == topic] def list_topics(self, next_token=None): return self._get_values_nexttoken(self.topics, next_token) def delete_topic_subscriptions(self, topic): for key, value in self.subscriptions.items(): if value.topic == topic: self.subscriptions.pop(key) def delete_topic(self, arn): try: topic = self.get_topic(arn) self.delete_topic_subscriptions(topic) self.topics.pop(arn) except KeyError: raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) def get_topic(self, arn): try: return self.topics[arn] except KeyError: raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) def set_topic_attribute(self, topic_arn, attribute_name, attribute_value): topic = self.get_topic(topic_arn) setattr(topic, attribute_name, attribute_value) def subscribe(self, topic_arn, endpoint, protocol): if protocol == "sms": if re.search(r"[./-]{2,}", endpoint) or re.search( r"(^[./-]|[./-]$)", endpoint ): raise SNSInvalidParameter("Invalid SMS endpoint: {}".format(endpoint)) reduced_endpoint = re.sub(r"[./-]", "", endpoint) if not is_e164(reduced_endpoint): raise SNSInvalidParameter("Invalid SMS endpoint: {}".format(endpoint)) # AWS doesn't create duplicates old_subscription = self._find_subscription(topic_arn, endpoint, protocol) if old_subscription: return old_subscription topic = self.get_topic(topic_arn) subscription = Subscription(topic, endpoint, protocol) attributes = { "PendingConfirmation": "false", "ConfirmationWasAuthenticated": "true", "Endpoint": endpoint, "TopicArn": topic_arn, "Protocol": protocol, "SubscriptionArn": subscription.arn, "Owner": DEFAULT_ACCOUNT_ID, "RawMessageDelivery": "false", } if protocol in ["http", "https"]: attributes["EffectiveDeliveryPolicy"] = topic.effective_delivery_policy subscription.attributes = attributes self.subscriptions[subscription.arn] = subscription return subscription def _find_subscription(self, topic_arn, endpoint, protocol): for subscription in self.subscriptions.values(): if ( subscription.topic.arn == topic_arn and subscription.endpoint == endpoint and subscription.protocol == protocol ): return subscription return None def unsubscribe(self, subscription_arn): self.subscriptions.pop(subscription_arn, None) def list_subscriptions(self, topic_arn=None, next_token=None): if topic_arn: topic = self.get_topic(topic_arn) filtered = OrderedDict( [(sub.arn, sub) for sub in self._get_topic_subscriptions(topic)] ) return self._get_values_nexttoken(filtered, next_token) else: return self._get_values_nexttoken(self.subscriptions, next_token) def publish( self, message, arn=None, phone_number=None, subject=None, message_attributes=None, ): if subject is not None and len(subject) > 100: # Note that the AWS docs around length are wrong: https://github.com/spulec/moto/issues/1503 raise ValueError("Subject must be less than 100 characters") if phone_number: # This is only an approximation. In fact, we should try to use GSM-7 or UCS-2 encoding to count used bytes if len(message) > MAXIMUM_SMS_MESSAGE_BYTES: raise ValueError("SMS message must be less than 1600 bytes") message_id = six.text_type(uuid.uuid4()) self.sms_messages[message_id] = (phone_number, message) return message_id if len(message) > MAXIMUM_MESSAGE_LENGTH: raise InvalidParameterValue( "An error occurred (InvalidParameter) when calling the Publish operation: Invalid parameter: Message too long" ) try: topic = self.get_topic(arn) message_id = topic.publish( message, subject=subject, message_attributes=message_attributes ) except SNSNotFoundError: endpoint = self.get_endpoint(arn) message_id = endpoint.publish(message) return message_id def create_platform_application(self, region, name, platform, attributes): application = PlatformApplication(region, name, platform, attributes) self.applications[application.arn] = application return application def get_application(self, arn): try: return self.applications[arn] except KeyError: raise SNSNotFoundError("Application with arn {0} not found".format(arn)) def set_application_attributes(self, arn, attributes): application = self.get_application(arn) application.attributes.update(attributes) return application def list_platform_applications(self): return self.applications.values() def delete_platform_application(self, platform_arn): self.applications.pop(platform_arn) def create_platform_endpoint( self, region, application, custom_user_data, token, attributes ): if any( token == endpoint.token for endpoint in self.platform_endpoints.values() ): raise DuplicateSnsEndpointError("Duplicate endpoint token: %s" % token) platform_endpoint = PlatformEndpoint( region, application, custom_user_data, token, attributes ) self.platform_endpoints[platform_endpoint.arn] = platform_endpoint return platform_endpoint def list_endpoints_by_platform_application(self, application_arn): return [ endpoint for endpoint in self.platform_endpoints.values() if endpoint.application.arn == application_arn ] def get_endpoint(self, arn): try: return self.platform_endpoints[arn] except KeyError: raise SNSNotFoundError("Endpoint does not exist") def set_endpoint_attributes(self, arn, attributes): endpoint = self.get_endpoint(arn) if "Enabled" in attributes: attributes["Enabled"] = attributes["Enabled"].lower() endpoint.attributes.update(attributes) return endpoint def delete_endpoint(self, arn): try: del self.platform_endpoints[arn] except KeyError: raise SNSNotFoundError("Endpoint with arn {0} not found".format(arn)) def get_subscription_attributes(self, arn): _subscription = [_ for _ in self.subscriptions.values() if _.arn == arn] if not _subscription: raise SNSNotFoundError("Subscription with arn {0} not found".format(arn)) subscription = _subscription[0] return subscription.attributes def set_subscription_attributes(self, arn, name, value): if name not in [ "RawMessageDelivery", "DeliveryPolicy", "FilterPolicy", "RedrivePolicy", ]: raise SNSInvalidParameter("AttributeName") # TODO: should do validation _subscription = [_ for _ in self.subscriptions.values() if _.arn == arn] if not _subscription: raise SNSNotFoundError("Subscription with arn {0} not found".format(arn)) subscription = _subscription[0] subscription.attributes[name] = value if name == "FilterPolicy": filter_policy = json.loads(value) self._validate_filter_policy(filter_policy) subscription._filter_policy = filter_policy def _validate_filter_policy(self, value): # TODO: extend validation checks combinations = 1 for rules in six.itervalues(value): combinations *= len(rules) # Even the official documentation states the total combination of values must not exceed 100, in reality it is 150 # https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html#subscription-filter-policy-constraints if combinations > 150: raise SNSInvalidParameter( "Invalid parameter: FilterPolicy: Filter policy is too complex" ) for field, rules in six.iteritems(value): for rule in rules: if rule is None: continue if isinstance(rule, six.string_types): continue if isinstance(rule, bool): continue if isinstance(rule, (six.integer_types, float)): if rule <= -1000000000 or rule >= 1000000000: raise InternalError("Unknown") continue if isinstance(rule, dict): keyword = list(rule.keys())[0] attributes = list(rule.values())[0] if keyword == "anything-but": continue elif keyword == "exists": if not isinstance(attributes, bool): raise SNSInvalidParameter( "Invalid parameter: FilterPolicy: exists match pattern must be either true or false." ) continue elif keyword == "numeric": continue elif keyword == "prefix": continue else: raise SNSInvalidParameter( "Invalid parameter: FilterPolicy: Unrecognized match type {type}".format( type=keyword ) ) raise SNSInvalidParameter( "Invalid parameter: FilterPolicy: Match value must be String, number, true, false, or null" ) def add_permission(self, topic_arn, label, aws_account_ids, action_names): if topic_arn not in self.topics: raise SNSNotFoundError("Topic does not exist") policy = self.topics[topic_arn]._policy_json statement = next( ( statement for statement in policy["Statement"] if statement["Sid"] == label ), None, ) if statement: raise SNSInvalidParameter("Statement already exists") if any(action_name not in VALID_POLICY_ACTIONS for action_name in action_names): raise SNSInvalidParameter("Policy statement action out of service scope!") principals = [ "arn:aws:iam::{}:root".format(account_id) for account_id in aws_account_ids ] actions = ["SNS:{}".format(action_name) for action_name in action_names] statement = { "Sid": label, "Effect": "Allow", "Principal": {"AWS": principals[0] if len(principals) == 1 else principals}, "Action": actions[0] if len(actions) == 1 else actions, "Resource": topic_arn, } self.topics[topic_arn]._policy_json["Statement"].append(statement) def remove_permission(self, topic_arn, label): if topic_arn not in self.topics: raise SNSNotFoundError("Topic does not exist") statements = self.topics[topic_arn]._policy_json["Statement"] statements = [ statement for statement in statements if statement["Sid"] != label ] self.topics[topic_arn]._policy_json["Statement"] = statements def list_tags_for_resource(self, resource_arn): if resource_arn not in self.topics: raise ResourceNotFoundError return self.topics[resource_arn]._tags def tag_resource(self, resource_arn, tags): if resource_arn not in self.topics: raise ResourceNotFoundError updated_tags = self.topics[resource_arn]._tags.copy() updated_tags.update(tags) if len(updated_tags) > 50: raise TagLimitExceededError self.topics[resource_arn]._tags = updated_tags def untag_resource(self, resource_arn, tag_keys): if resource_arn not in self.topics: raise ResourceNotFoundError for key in tag_keys: self.topics[resource_arn]._tags.pop(key, None)
class AutoScalingBackend(BaseBackend): def __init__(self, ec2_backend, elb_backend, elbv2_backend): self.autoscaling_groups = OrderedDict() self.launch_configurations = OrderedDict() self.policies = {} self.ec2_backend = ec2_backend self.elb_backend = elb_backend self.elbv2_backend = elbv2_backend def reset(self): ec2_backend = self.ec2_backend elb_backend = self.elb_backend elbv2_backend = self.elbv2_backend self.__dict__ = {} self.__init__(ec2_backend, elb_backend, elbv2_backend) def create_launch_configuration( self, name, image_id, key_name, kernel_id, ramdisk_id, security_groups, user_data, instance_type, instance_monitoring, instance_profile_name, spot_price, ebs_optimized, associate_public_ip_address, block_device_mappings, ): launch_configuration = FakeLaunchConfiguration( name=name, image_id=image_id, key_name=key_name, kernel_id=kernel_id, ramdisk_id=ramdisk_id, security_groups=security_groups, user_data=user_data, instance_type=instance_type, instance_monitoring=instance_monitoring, instance_profile_name=instance_profile_name, spot_price=spot_price, ebs_optimized=ebs_optimized, associate_public_ip_address=associate_public_ip_address, block_device_mapping_dict=block_device_mappings, ) self.launch_configurations[name] = launch_configuration return launch_configuration def describe_launch_configurations(self, names): configurations = self.launch_configurations.values() if names: return [ configuration for configuration in configurations if configuration.name in names ] else: return list(configurations) def delete_launch_configuration(self, launch_configuration_name): self.launch_configurations.pop(launch_configuration_name, None) def create_auto_scaling_group( self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, load_balancers, target_group_arns, placement_group, termination_policies, tags, new_instances_protected_from_scale_in=False, instance_id=None, ): def make_int(value): return int(value) if value is not None else value max_size = make_int(max_size) min_size = make_int(min_size) desired_capacity = make_int(desired_capacity) default_cooldown = make_int(default_cooldown) if health_check_period is None: health_check_period = 300 else: health_check_period = make_int(health_check_period) if launch_config_name is None and instance_id is not None: try: instance = self.ec2_backend.get_instance(instance_id) launch_config_name = name FakeLaunchConfiguration.create_from_instance( launch_config_name, instance, self) except InvalidInstanceIdError: raise InvalidInstanceError(instance_id) group = FakeAutoScalingGroup( name=name, availability_zones=availability_zones, desired_capacity=desired_capacity, max_size=max_size, min_size=min_size, launch_config_name=launch_config_name, vpc_zone_identifier=vpc_zone_identifier, default_cooldown=default_cooldown, health_check_period=health_check_period, health_check_type=health_check_type, load_balancers=load_balancers, target_group_arns=target_group_arns, placement_group=placement_group, termination_policies=termination_policies, autoscaling_backend=self, tags=tags, new_instances_protected_from_scale_in= new_instances_protected_from_scale_in, ) self.autoscaling_groups[name] = group self.update_attached_elbs(group.name) self.update_attached_target_groups(group.name) return group def update_auto_scaling_group( self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, placement_group, termination_policies, new_instances_protected_from_scale_in=None, ): group = self.autoscaling_groups[name] group.update( availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, placement_group, termination_policies, new_instances_protected_from_scale_in= new_instances_protected_from_scale_in, ) return group def describe_auto_scaling_groups(self, names): groups = self.autoscaling_groups.values() if names: return [group for group in groups if group.name in names] else: return list(groups) def delete_auto_scaling_group(self, group_name): self.set_desired_capacity(group_name, 0) self.autoscaling_groups.pop(group_name, None) def describe_auto_scaling_instances(self, instance_ids): instance_states = [] for group in self.autoscaling_groups.values(): instance_states.extend([ x for x in group.instance_states if not instance_ids or x.instance.id in instance_ids ]) return instance_states def attach_instances(self, group_name, instance_ids): group = self.autoscaling_groups[group_name] original_size = len(group.instance_states) if (original_size + len(instance_ids)) > group.max_size: raise ResourceContentionError else: group.desired_capacity = original_size + len(instance_ids) new_instances = [ InstanceState( self.ec2_backend.get_instance(x), protected_from_scale_in=group. new_instances_protected_from_scale_in, ) for x in instance_ids ] for instance in new_instances: self.ec2_backend.create_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) group.instance_states.extend(new_instances) self.update_attached_elbs(group.name) self.update_attached_target_groups(group.name) def set_instance_health(self, instance_id, health_status, should_respect_grace_period): instance = self.ec2_backend.get_instance(instance_id) instance_state = next(instance_state for group in self.autoscaling_groups.values() for instance_state in group.instance_states if instance_state.instance.id == instance.id) instance_state.health_status = health_status def detach_instances(self, group_name, instance_ids, should_decrement): group = self.autoscaling_groups[group_name] original_size = group.desired_capacity detached_instances = [ x for x in group.instance_states if x.instance.id in instance_ids ] for instance in detached_instances: self.ec2_backend.delete_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) new_instance_state = [ x for x in group.instance_states if x.instance.id not in instance_ids ] group.instance_states = new_instance_state if should_decrement: group.desired_capacity = original_size - len(instance_ids) group.set_desired_capacity(group.desired_capacity) return detached_instances def set_desired_capacity(self, group_name, desired_capacity): group = self.autoscaling_groups[group_name] group.set_desired_capacity(desired_capacity) self.update_attached_elbs(group_name) def change_capacity(self, group_name, scaling_adjustment): group = self.autoscaling_groups[group_name] desired_capacity = group.desired_capacity + scaling_adjustment self.set_desired_capacity(group_name, desired_capacity) def change_capacity_percent(self, group_name, scaling_adjustment): """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html If PercentChangeInCapacity returns a value between 0 and 1, Auto Scaling will round it off to 1. If the PercentChangeInCapacity returns a value greater than 1, Auto Scaling will round it off to the lower value. For example, if PercentChangeInCapacity returns 12.5, then Auto Scaling will round it off to 12.""" group = self.autoscaling_groups[group_name] percent_change = 1 + (scaling_adjustment / 100.0) desired_capacity = group.desired_capacity * percent_change if group.desired_capacity < desired_capacity < group.desired_capacity + 1: desired_capacity = group.desired_capacity + 1 else: desired_capacity = int(desired_capacity) self.set_desired_capacity(group_name, desired_capacity) def create_autoscaling_policy(self, name, policy_type, adjustment_type, as_name, scaling_adjustment, cooldown): policy = FakeScalingPolicy( name, policy_type, adjustment_type, as_name, scaling_adjustment, cooldown, self, ) self.policies[name] = policy return policy def describe_policies(self, autoscaling_group_name=None, policy_names=None, policy_types=None): return [ policy for policy in self.policies.values() if (not autoscaling_group_name or policy.as_name == autoscaling_group_name) and ( not policy_names or policy.name in policy_names) and ( not policy_types or policy.policy_type in policy_types) ] def delete_policy(self, group_name): self.policies.pop(group_name, None) def execute_policy(self, group_name): policy = self.policies[group_name] policy.execute() def update_attached_elbs(self, group_name): group = self.autoscaling_groups[group_name] group_instance_ids = set(state.instance.id for state in group.active_instances()) # skip this if group.load_balancers is empty # otherwise elb_backend.describe_load_balancers returns all available load balancers if not group.load_balancers: return try: elbs = self.elb_backend.describe_load_balancers( names=group.load_balancers) except LoadBalancerNotFoundError: # ELBs can be deleted before their autoscaling group return for elb in elbs: elb_instace_ids = set(elb.instance_ids) self.elb_backend.register_instances( elb.name, group_instance_ids - elb_instace_ids) self.elb_backend.deregister_instances( elb.name, elb_instace_ids - group_instance_ids) def update_attached_target_groups(self, group_name): group = self.autoscaling_groups[group_name] group_instance_ids = set(state.instance.id for state in group.instance_states) # no action necessary if target_group_arns is empty if not group.target_group_arns: return target_groups = self.elbv2_backend.describe_target_groups( target_group_arns=group.target_group_arns, load_balancer_arn=None, names=None, ) for target_group in target_groups: asg_targets = [{ "id": x, "port": target_group.port } for x in group_instance_ids] self.elbv2_backend.register_targets(target_group.arn, (asg_targets)) def create_or_update_tags(self, tags): for tag in tags: group_name = tag["resource_id"] group = self.autoscaling_groups[group_name] old_tags = group.tags new_tags = [] # if key was in old_tags, update old tag for old_tag in old_tags: if old_tag["key"] == tag["key"]: new_tags.append(tag) else: new_tags.append(old_tag) # if key was never in old_tag's add it (create tag) if not any(new_tag["key"] == tag["key"] for new_tag in new_tags): new_tags.append(tag) group.tags = new_tags def attach_load_balancers(self, group_name, load_balancer_names): group = self.autoscaling_groups[group_name] group.load_balancers.extend( [x for x in load_balancer_names if x not in group.load_balancers]) self.update_attached_elbs(group_name) def describe_load_balancers(self, group_name): return self.autoscaling_groups[group_name].load_balancers def detach_load_balancers(self, group_name, load_balancer_names): group = self.autoscaling_groups[group_name] group_instance_ids = set(state.instance.id for state in group.instance_states) elbs = self.elb_backend.describe_load_balancers( names=group.load_balancers) for elb in elbs: self.elb_backend.deregister_instances(elb.name, group_instance_ids) group.load_balancers = [ x for x in group.load_balancers if x not in load_balancer_names ] def attach_load_balancer_target_groups(self, group_name, target_group_arns): group = self.autoscaling_groups[group_name] group.append_target_groups(target_group_arns) self.update_attached_target_groups(group_name) def describe_load_balancer_target_groups(self, group_name): return self.autoscaling_groups[group_name].target_group_arns def detach_load_balancer_target_groups(self, group_name, target_group_arns): group = self.autoscaling_groups[group_name] group.target_group_arns = [ x for x in group.target_group_arns if x not in target_group_arns ] for target_group in target_group_arns: asg_targets = [{ "id": x.instance.id } for x in group.instance_states] self.elbv2_backend.deregister_targets(target_group, (asg_targets)) def suspend_processes(self, group_name, scaling_processes): group = self.autoscaling_groups[group_name] group.suspended_processes = scaling_processes or [] def set_instance_protection(self, group_name, instance_ids, protected_from_scale_in): group = self.autoscaling_groups[group_name] protected_instances = [ x for x in group.instance_states if x.instance.id in instance_ids ] for instance in protected_instances: instance.protected_from_scale_in = protected_from_scale_in def notify_terminate_instances(self, instance_ids): for ( autoscaling_group_name, autoscaling_group, ) in self.autoscaling_groups.items(): original_active_instance_count = len( autoscaling_group.active_instances()) autoscaling_group.instance_states = list( filter( lambda i_state: i_state.instance.id not in instance_ids, autoscaling_group.instance_states, )) difference = original_active_instance_count - len( autoscaling_group.active_instances()) if difference > 0: autoscaling_group.replace_autoscaling_group_instances( difference, autoscaling_group.get_propagated_tags()) self.update_attached_elbs(autoscaling_group_name) def enter_standby_instances(self, group_name, instance_ids, should_decrement): group = self.autoscaling_groups[group_name] original_size = group.desired_capacity standby_instances = [] for instance_state in group.instance_states: if instance_state.instance.id in instance_ids: instance_state.lifecycle_state = "Standby" standby_instances.append(instance_state) if should_decrement: group.desired_capacity = group.desired_capacity - len(instance_ids) group.set_desired_capacity(group.desired_capacity) return standby_instances, original_size, group.desired_capacity def exit_standby_instances(self, group_name, instance_ids): group = self.autoscaling_groups[group_name] original_size = group.desired_capacity standby_instances = [] for instance_state in group.instance_states: if instance_state.instance.id in instance_ids: instance_state.lifecycle_state = "InService" standby_instances.append(instance_state) group.desired_capacity = group.desired_capacity + len(instance_ids) group.set_desired_capacity(group.desired_capacity) return standby_instances, original_size, group.desired_capacity def terminate_instance(self, instance_id, should_decrement): instance = self.ec2_backend.get_instance(instance_id) instance_state = next(instance_state for group in self.autoscaling_groups.values() for instance_state in group.instance_states if instance_state.instance.id == instance.id) group = instance.autoscaling_group original_size = group.desired_capacity self.detach_instances(group.name, [instance.id], should_decrement) self.ec2_backend.terminate_instances([instance.id]) return instance_state, original_size, group.desired_capacity
class Shard(BaseModel): def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() @property def shard_id(self): return "shardId-{0}".format(str(self._shard_id).zfill(12)) def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] secs_behind_latest = 0 for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number very_last_record = self.records[next(reversed(self.records))] secs_behind_latest = very_last_record.created_at - record.created_at if len(results) == limit: break millis_behind_latest = int(secs_behind_latest * 1000) return results, last_sequence_id, millis_behind_latest def put_record(self, partition_key, data, explicit_hash_key): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record( partition_key, data, sequence_number, explicit_hash_key) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def get_sequence_number_at(self, at_timestamp): if not self.records or at_timestamp < list(self.records.values())[0].created_at: return 0 else: # find the last item in the list that was created before # at_timestamp r = next((r for r in reversed(self.records.values()) if r.created_at < at_timestamp), None) return r.sequence_number def to_json(self): return { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash) }, "SequenceNumberRange": { "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id }
class SNSBackend(BaseBackend): def __init__(self, region_name): super(SNSBackend, self).__init__() self.topics = OrderedDict() self.subscriptions = OrderedDict() self.applications = {} self.platform_endpoints = {} self.region_name = region_name def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) def create_topic(self, name): topic = Topic(name, self) self.topics[topic.arn] = topic return topic def _get_values_nexttoken(self, values_map, next_token=None): if next_token is None: next_token = 0 next_token = int(next_token) values = list(values_map.values())[next_token:next_token + DEFAULT_PAGE_SIZE] if len(values) == DEFAULT_PAGE_SIZE: next_token = next_token + DEFAULT_PAGE_SIZE else: next_token = None return values, next_token def list_topics(self, next_token=None): return self._get_values_nexttoken(self.topics, next_token) def delete_topic(self, arn): self.topics.pop(arn) def get_topic(self, arn): try: return self.topics[arn] except KeyError: raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) def set_topic_attribute(self, topic_arn, attribute_name, attribute_value): topic = self.get_topic(topic_arn) setattr(topic, attribute_name, attribute_value) def subscribe(self, topic_arn, endpoint, protocol): topic = self.get_topic(topic_arn) subscription = Subscription(topic, endpoint, protocol) self.subscriptions[subscription.arn] = subscription return subscription def unsubscribe(self, subscription_arn): self.subscriptions.pop(subscription_arn) def list_subscriptions(self, topic_arn=None, next_token=None): if topic_arn: topic = self.get_topic(topic_arn) filtered = OrderedDict([(k, sub) for k, sub in self.subscriptions.items() if sub.topic == topic]) return self._get_values_nexttoken(filtered, next_token) else: return self._get_values_nexttoken(self.subscriptions, next_token) def publish(self, arn, message): try: topic = self.get_topic(arn) message_id = topic.publish(message) except SNSNotFoundError: endpoint = self.get_endpoint(arn) message_id = endpoint.publish(message) return message_id def create_platform_application(self, region, name, platform, attributes): application = PlatformApplication(region, name, platform, attributes) self.applications[application.arn] = application return application def get_application(self, arn): try: return self.applications[arn] except KeyError: raise SNSNotFoundError( "Application with arn {0} not found".format(arn)) def set_application_attributes(self, arn, attributes): application = self.get_application(arn) application.attributes.update(attributes) return application def list_platform_applications(self): return self.applications.values() def delete_platform_application(self, platform_arn): self.applications.pop(platform_arn) def create_platform_endpoint(self, region, application, custom_user_data, token, attributes): platform_endpoint = PlatformEndpoint(region, application, custom_user_data, token, attributes) self.platform_endpoints[platform_endpoint.arn] = platform_endpoint return platform_endpoint def list_endpoints_by_platform_application(self, application_arn): return [ endpoint for endpoint in self.platform_endpoints.values() if endpoint.application.arn == application_arn ] def get_endpoint(self, arn): try: return self.platform_endpoints[arn] except KeyError: raise SNSNotFoundError( "Endpoint with arn {0} not found".format(arn)) def set_endpoint_attributes(self, arn, attributes): endpoint = self.get_endpoint(arn) endpoint.attributes.update(attributes) return endpoint
class SNSBackend(BaseBackend): def __init__(self, region_name): super(SNSBackend, self).__init__() self.topics = OrderedDict() self.subscriptions = OrderedDict() self.applications = {} self.platform_endpoints = {} self.region_name = region_name def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) def create_topic(self, name): topic = Topic(name, self) self.topics[topic.arn] = topic return topic def _get_values_nexttoken(self, values_map, next_token=None): if next_token is None: next_token = 0 next_token = int(next_token) values = list(values_map.values())[next_token: next_token + DEFAULT_PAGE_SIZE] if len(values) == DEFAULT_PAGE_SIZE: next_token = next_token + DEFAULT_PAGE_SIZE else: next_token = None return values, next_token def list_topics(self, next_token=None): return self._get_values_nexttoken(self.topics, next_token) def delete_topic(self, arn): self.topics.pop(arn) def get_topic(self, arn): try: return self.topics[arn] except KeyError: raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) def set_topic_attribute(self, topic_arn, attribute_name, attribute_value): topic = self.get_topic(topic_arn) setattr(topic, attribute_name, attribute_value) def subscribe(self, topic_arn, endpoint, protocol): topic = self.get_topic(topic_arn) subscription = Subscription(topic, endpoint, protocol) self.subscriptions[subscription.arn] = subscription return subscription def unsubscribe(self, subscription_arn): self.subscriptions.pop(subscription_arn) def list_subscriptions(self, topic_arn=None, next_token=None): if topic_arn: topic = self.get_topic(topic_arn) filtered = OrderedDict([(k, sub) for k, sub in self.subscriptions.items() if sub.topic == topic]) return self._get_values_nexttoken(filtered, next_token) else: return self._get_values_nexttoken(self.subscriptions, next_token) def publish(self, arn, message): try: topic = self.get_topic(arn) message_id = topic.publish(message) except SNSNotFoundError: endpoint = self.get_endpoint(arn) message_id = endpoint.publish(message) return message_id def create_platform_application(self, region, name, platform, attributes): application = PlatformApplication(region, name, platform, attributes) self.applications[application.arn] = application return application def get_application(self, arn): try: return self.applications[arn] except KeyError: raise SNSNotFoundError("Application with arn {0} not found".format(arn)) def set_application_attributes(self, arn, attributes): application = self.get_application(arn) application.attributes.update(attributes) return application def list_platform_applications(self): return self.applications.values() def delete_platform_application(self, platform_arn): self.applications.pop(platform_arn) def create_platform_endpoint(self, region, application, custom_user_data, token, attributes): platform_endpoint = PlatformEndpoint(region, application, custom_user_data, token, attributes) self.platform_endpoints[platform_endpoint.arn] = platform_endpoint return platform_endpoint def list_endpoints_by_platform_application(self, application_arn): return [ endpoint for endpoint in self.platform_endpoints.values() if endpoint.application.arn == application_arn ] def get_endpoint(self, arn): try: return self.platform_endpoints[arn] except KeyError: raise SNSNotFoundError("Endpoint with arn {0} not found".format(arn)) def set_endpoint_attributes(self, arn, attributes): endpoint = self.get_endpoint(arn) endpoint.attributes.update(attributes) return endpoint def delete_endpoint(self, arn): try: del self.platform_endpoints[arn] except KeyError: raise SNSNotFoundError("Endpoint with arn {0} not found".format(arn))
class Shard(BaseModel): def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() self.is_open = True @property def shard_id(self): return "shardId-{0}".format(str(self._shard_id).zfill(12)) def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] secs_behind_latest = 0 for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number very_last_record = self.records[next(reversed(self.records))] secs_behind_latest = very_last_record.created_at - record.created_at if len(results) == limit: break millis_behind_latest = int(secs_behind_latest * 1000) return results, last_sequence_id, millis_behind_latest def put_record(self, partition_key, data, explicit_hash_key): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record( partition_key, data, sequence_number, explicit_hash_key ) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def get_sequence_number_at(self, at_timestamp): if not self.records or at_timestamp < list(self.records.values())[0].created_at: return 0 else: # find the last item in the list that was created before # at_timestamp r = next( ( r for r in reversed(self.records.values()) if r.created_at < at_timestamp ), None, ) return r.sequence_number def to_json(self): response = { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash), }, "SequenceNumberRange": { "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id, } if not self.is_open: response["SequenceNumberRange"][ "EndingSequenceNumber" ] = self.get_max_sequence_number() return response
class FakeTable(BaseModel): def __init__(self, database_name, table_name, table_input): self.database_name = database_name self.name = table_name self.partitions = OrderedDict() self.versions = [] self.update(table_input) def update(self, table_input): self.versions.append(table_input) def get_version(self, ver): try: if not isinstance(ver, int): # "1" goes to [0] ver = int(ver) - 1 except ValueError as e: raise JsonRESTError("InvalidInputException", str(e)) try: return self.versions[ver] except IndexError: raise VersionNotFoundException() def as_dict(self, version=-1): obj = {"DatabaseName": self.database_name, "Name": self.name} obj.update(self.get_version(version)) return obj def create_partition(self, partiton_input): partition = FakePartition(self.database_name, self.name, partiton_input) key = str(partition.values) if key in self.partitions: raise PartitionAlreadyExistsException() self.partitions[str(partition.values)] = partition def get_partitions(self): return [p for str_part_values, p in self.partitions.items()] def get_partition(self, values): try: return self.partitions[str(values)] except KeyError: raise PartitionNotFoundException() def update_partition(self, old_values, partiton_input): partition = FakePartition(self.database_name, self.name, partiton_input) key = str(partition.values) if old_values == partiton_input["Values"]: # Altering a partition in place. Don't remove it so the order of # returned partitions doesn't change if key not in self.partitions: raise PartitionNotFoundException() else: removed = self.partitions.pop(str(old_values), None) if removed is None: raise PartitionNotFoundException() if key in self.partitions: # Trying to update to overwrite a partition that exists raise PartitionAlreadyExistsException() self.partitions[key] = partition def delete_partition(self, values): try: del self.partitions[str(values)] except KeyError: raise PartitionNotFoundException()
class FakeTable(BaseModel): def __init__(self, database_name, table_name, table_input): self.database_name = database_name self.name = table_name self.partitions = OrderedDict() self.versions = [] self.update(table_input) def update(self, table_input): self.versions.append(table_input) def get_version(self, ver): try: if not isinstance(ver, int): # "1" goes to [0] ver = int(ver) - 1 except ValueError as e: raise JsonRESTError("InvalidInputException", str(e)) try: return self.versions[ver] except IndexError: raise VersionNotFoundException() def as_dict(self, version=-1): obj = { 'DatabaseName': self.database_name, 'Name': self.name, } obj.update(self.get_version(version)) return obj def create_partition(self, partiton_input): partition = FakePartition(self.database_name, self.name, partiton_input) key = str(partition.values) if key in self.partitions: raise PartitionAlreadyExistsException() self.partitions[str(partition.values)] = partition def get_partitions(self): return [p for str_part_values, p in self.partitions.items()] def get_partition(self, values): try: return self.partitions[str(values)] except KeyError: raise PartitionNotFoundException() def update_partition(self, old_values, partiton_input): partition = FakePartition(self.database_name, self.name, partiton_input) key = str(partition.values) if old_values == partiton_input['Values']: # Altering a partition in place. Don't remove it so the order of # returned partitions doesn't change if key not in self.partitions: raise PartitionNotFoundException() else: removed = self.partitions.pop(str(old_values), None) if removed is None: raise PartitionNotFoundException() if key in self.partitions: # Trying to update to overwrite a partition that exists raise PartitionAlreadyExistsException() self.partitions[key] = partition
class FakeListener(CloudFormationModel): def __init__( self, load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions, ): self.load_balancer_arn = load_balancer_arn self.arn = arn self.protocol = protocol.upper() self.port = port self.ssl_policy = ssl_policy self.certificate = certificate self.certificates = [certificate] if certificate is not None else [] self.default_actions = default_actions self._non_default_rules = OrderedDict() self._default_rule = OrderedDict() self._default_rule[0] = FakeRule( listener_arn=self.arn, conditions=[], priority="default", actions=default_actions, is_default=True, ) @property def physical_resource_id(self): return self.arn @property def rules(self): return OrderedDict( list(self._non_default_rules.items()) + list(self._default_rule.items()) ) def remove_rule(self, arn): self._non_default_rules.pop(arn) def register(self, arn, rule): self._non_default_rules[arn] = rule sorted(self._non_default_rules.values(), key=lambda x: x.priority) @staticmethod def cloudformation_name_type(): return None @staticmethod def cloudformation_type(): # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listener.html return "AWS::ElasticLoadBalancingV2::Listener" @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] elbv2_backend = elbv2_backends[region_name] load_balancer_arn = properties.get("LoadBalancerArn") protocol = properties.get("Protocol") port = properties.get("Port") ssl_policy = properties.get("SslPolicy") certificates = properties.get("Certificates") # transform default actions to confirm with the rest of the code and XML templates default_actions = [] for i, action in enumerate(properties["DefaultActions"]): action_type = action["Type"] if action_type == "forward": default_actions.append( {"type": action_type, "target_group_arn": action["TargetGroupArn"],} ) elif action_type in [ "redirect", "authenticate-cognito", "fixed-response", ]: redirect_action = {"type": action_type} key = ( underscores_to_camelcase(action_type.capitalize().replace("-", "_")) + "Config" ) for redirect_config_key, redirect_config_value in action[key].items(): # need to match the output of _get_list_prefix redirect_action[ camelcase_to_underscores(key) + "._" + camelcase_to_underscores(redirect_config_key) ] = redirect_config_value default_actions.append(redirect_action) else: raise InvalidActionTypeError(action_type, i + 1) listener = elbv2_backend.create_listener( load_balancer_arn, protocol, port, ssl_policy, certificates, default_actions ) return listener