class Shard(BaseModel): def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() @property def shard_id(self): return "shardId-{0}".format(str(self._shard_id).zfill(12)) def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number if len(results) == limit: break return results, last_sequence_id def put_record(self, partition_key, data, explicit_hash_key): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record(partition_key, data, sequence_number, explicit_hash_key) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def to_json(self): return { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash) }, "SequenceNumberRange": { "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id }
class Shard(BaseModel): def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() @property def shard_id(self): return "shardId-{0}".format(str(self._shard_id).zfill(12)) def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number if len(results) == limit: break return results, last_sequence_id def put_record(self, partition_key, data, explicit_hash_key): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record( partition_key, data, sequence_number, explicit_hash_key) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def to_json(self): return { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash) }, "SequenceNumberRange": { "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id }
class Shard(object): def __init__(self, shard_id): self.shard_id = shard_id self.records = OrderedDict() def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number if len(results) == limit: break return results, last_sequence_id def put_record(self, partition_key, data): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record(partition_key, data, sequence_number) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def to_json(self): return { "HashKeyRange": { "EndingHashKey": "113427455640312821154458202477256070484", "StartingHashKey": "0" }, "SequenceNumberRange": { "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id }
class Shard(BaseModel): def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() @property def shard_id(self): return "shardId-{0}".format(str(self._shard_id).zfill(12)) def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] secs_behind_latest = 0 for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number very_last_record = self.records[next(reversed(self.records))] secs_behind_latest = very_last_record.created_at - record.created_at if len(results) == limit: break millis_behind_latest = int(secs_behind_latest * 1000) return results, last_sequence_id, millis_behind_latest def put_record(self, partition_key, data, explicit_hash_key): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record( partition_key, data, sequence_number, explicit_hash_key) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def get_sequence_number_at(self, at_timestamp): if not self.records or at_timestamp < list(self.records.values())[0].created_at: return 0 else: # find the last item in the list that was created before # at_timestamp r = next((r for r in reversed(self.records.values()) if r.created_at < at_timestamp), None) return r.sequence_number def to_json(self): return { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash) }, "SequenceNumberRange": { "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id }
class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() self.stacksets = OrderedDict() self.deleted_stacks = {} self.exports = OrderedDict() self.change_sets = OrderedDict() def create_stack_set( self, name, template, parameters, tags=None, description=None, region="us-east-1", admin_role=None, execution_role=None, ): stackset_id = generate_stackset_id(name) new_stackset = FakeStackSet( stackset_id=stackset_id, name=name, template=template, parameters=parameters, description=description, tags=tags, admin_role=admin_role, execution_role=execution_role, ) self.stacksets[stackset_id] = new_stackset return new_stackset def get_stack_set(self, name): stacksets = self.stacksets.keys() for stackset in stacksets: if self.stacksets[stackset].name == name: return self.stacksets[stackset] raise ValidationError(name) def delete_stack_set(self, name): stacksets = self.stacksets.keys() for stackset in stacksets: if self.stacksets[stackset].name == name: self.stacksets[stackset].delete() def create_stack_instances( self, stackset_name, accounts, regions, parameters, operation_id=None ): stackset = self.get_stack_set(stackset_name) stackset.create_stack_instances( accounts=accounts, regions=regions, parameters=parameters, operation_id=operation_id, ) return stackset def update_stack_set( self, stackset_name, template=None, description=None, parameters=None, tags=None, admin_role=None, execution_role=None, accounts=None, regions=None, operation_id=None, ): stackset = self.get_stack_set(stackset_name) update = stackset.update( template=template, description=description, parameters=parameters, tags=tags, admin_role=admin_role, execution_role=execution_role, accounts=accounts, regions=regions, operation_id=operation_id, ) return update def delete_stack_instances( self, stackset_name, accounts, regions, operation_id=None ): stackset = self.get_stack_set(stackset_name) stackset.delete_stack_instances(accounts, regions, operation_id) return stackset def create_stack( self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False, ): stack_id = generate_stack_id(name) new_stack = FakeStack( stack_id=stack_id, name=name, template=template, parameters=parameters, region_name=region_name, notification_arns=notification_arns, tags=tags, role_arn=role_arn, cross_stack_resources=self.exports, create_change_set=create_change_set, ) self.stacks[stack_id] = new_stack self._validate_export_uniqueness(new_stack) for export in new_stack.exports: self.exports[export.name] = export return new_stack def create_change_set( self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None, ): stack_id = None stack_template = None if change_set_type == "UPDATE": stacks = self.stacks.values() stack = None for s in stacks: if s.name == stack_name: stack = s stack_id = stack.stack_id stack_template = stack.template if stack is None: raise ValidationError(stack_name) else: stack_id = generate_stack_id(stack_name) stack_template = template change_set_id = generate_changeset_id(change_set_name, region_name) new_change_set = FakeChangeSet( stack_id=stack_id, stack_name=stack_name, stack_template=stack_template, change_set_id=change_set_id, change_set_name=change_set_name, template=template, parameters=parameters, region_name=region_name, notification_arns=notification_arns, tags=tags, role_arn=role_arn, cross_stack_resources=self.exports, ) self.change_sets[change_set_id] = new_change_set self.stacks[stack_id] = new_change_set return change_set_id, stack_id def delete_change_set(self, change_set_name, stack_name=None): if change_set_name in self.change_sets: # This means arn was passed in del self.change_sets[change_set_name] else: for cs in self.change_sets: if self.change_sets[cs].change_set_name == change_set_name: del self.change_sets[cs] def describe_change_set(self, change_set_name, stack_name=None): change_set = None if change_set_name in self.change_sets: # This means arn was passed in change_set = self.change_sets[change_set_name] else: for cs in self.change_sets: if self.change_sets[cs].change_set_name == change_set_name: change_set = self.change_sets[cs] if change_set is None: raise ValidationError(change_set_name) return change_set def execute_change_set(self, change_set_name, stack_name=None): stack = None if change_set_name in self.change_sets: # This means arn was passed in stack = self.change_sets[change_set_name] else: for cs in self.change_sets: if self.change_sets[cs].change_set_name == change_set_name: stack = self.change_sets[cs] if stack is None: raise ValidationError(stack_name) if stack.events[-1].resource_status == "REVIEW_IN_PROGRESS": stack._add_stack_event("CREATE_COMPLETE") else: stack._add_stack_event("UPDATE_IN_PROGRESS") stack._add_stack_event("UPDATE_COMPLETE") stack.create_resources() return True def describe_stacks(self, name_or_stack_id): stacks = self.stacks.values() if name_or_stack_id: for stack in stacks: if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id: return [stack] if self.deleted_stacks: deleted_stacks = self.deleted_stacks.values() for stack in deleted_stacks: if stack.stack_id == name_or_stack_id: return [stack] raise ValidationError(name_or_stack_id) else: return list(stacks) def list_change_sets(self): return self.change_sets.values() def list_stacks(self): return [v for v in self.stacks.values()] + [ v for v in self.deleted_stacks.values() ] def get_stack(self, name_or_stack_id): all_stacks = dict(self.deleted_stacks, **self.stacks) if name_or_stack_id in all_stacks: # Lookup by stack id - deleted stacks incldued return all_stacks[name_or_stack_id] else: # Lookup by stack name - undeleted stacks only for stack in self.stacks.values(): if stack.name == name_or_stack_id: return stack def update_stack(self, name, template, role_arn=None, parameters=None, tags=None): stack = self.get_stack(name) stack.update(template, role_arn, parameters=parameters, tags=tags) return stack def list_stack_resources(self, stack_name_or_id): stack = self.get_stack(stack_name_or_id) if stack is None: return None return stack.stack_resources def delete_stack(self, name_or_stack_id): if name_or_stack_id in self.stacks: # Delete by stack id stack = self.stacks.pop(name_or_stack_id, None) stack.delete() self.deleted_stacks[stack.stack_id] = stack [self.exports.pop(export.name) for export in stack.exports] return self.stacks.pop(name_or_stack_id, None) else: # Delete by stack name for stack in list(self.stacks.values()): if stack.name == name_or_stack_id: self.delete_stack(stack.stack_id) def list_exports(self, token): all_exports = list(self.exports.values()) if token is None: exports = all_exports[0:100] next_token = "100" if len(all_exports) > 100 else None else: token = int(token) exports = all_exports[token : token + 100] next_token = str(token + 100) if len(all_exports) > token + 100 else None return exports, next_token def validate_template(self, template): return validate_template_cfn_lint(template) def _validate_export_uniqueness(self, stack): new_stack_export_names = [x.name for x in stack.exports] export_names = self.exports.keys() if not set(export_names).isdisjoint(new_stack_export_names): raise ValidationError( stack.stack_id, message="Export names must be unique across a given region", )
class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() self.deleted_stacks = {} self.exports = OrderedDict() self.change_sets = OrderedDict() def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False): stack_id = generate_stack_id(name) new_stack = FakeStack( stack_id=stack_id, name=name, template=template, parameters=parameters, region_name=region_name, notification_arns=notification_arns, tags=tags, role_arn=role_arn, cross_stack_resources=self.exports, create_change_set=create_change_set, ) self.stacks[stack_id] = new_stack self._validate_export_uniqueness(new_stack) for export in new_stack.exports: self.exports[export.name] = export return new_stack def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None): if change_set_type == 'UPDATE': stacks = self.stacks.values() stack = None for s in stacks: if s.name == stack_name: stack = s if stack is None: raise ValidationError(stack_name) else: stack = self.create_stack(stack_name, template, parameters, region_name, notification_arns, tags, role_arn, create_change_set=True) change_set_id = generate_changeset_id(change_set_name, region_name) self.stacks[change_set_name] = { 'Id': change_set_id, 'StackId': stack.stack_id } self.change_sets[change_set_id] = stack return change_set_id, stack.stack_id def execute_change_set(self, change_set_name, stack_name=None): stack = None if change_set_name in self.change_sets: # This means arn was passed in stack = self.change_sets[change_set_name] else: for cs in self.change_sets: if self.change_sets[cs].name == change_set_name: stack = self.change_sets[cs] if stack is None: raise ValidationError(stack_name) if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS': stack._add_stack_event('CREATE_COMPLETE') else: stack._add_stack_event('UPDATE_IN_PROGRESS') stack._add_stack_event('UPDATE_COMPLETE') return True def describe_stacks(self, name_or_stack_id): stacks = self.stacks.values() if name_or_stack_id: for stack in stacks: if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id: return [stack] if self.deleted_stacks: deleted_stacks = self.deleted_stacks.values() for stack in deleted_stacks: if stack.stack_id == name_or_stack_id: return [stack] raise ValidationError(name_or_stack_id) else: return list(stacks) def list_stacks(self): return self.stacks.values() def get_stack(self, name_or_stack_id): all_stacks = dict(self.deleted_stacks, **self.stacks) if name_or_stack_id in all_stacks: # Lookup by stack id - deleted stacks incldued return all_stacks[name_or_stack_id] else: # Lookup by stack name - undeleted stacks only for stack in self.stacks.values(): if stack.name == name_or_stack_id: return stack def update_stack(self, name, template, role_arn=None, parameters=None, tags=None): stack = self.get_stack(name) stack.update(template, role_arn, parameters=parameters, tags=tags) return stack def list_stack_resources(self, stack_name_or_id): stack = self.get_stack(stack_name_or_id) return stack.stack_resources def delete_stack(self, name_or_stack_id): if name_or_stack_id in self.stacks: # Delete by stack id stack = self.stacks.pop(name_or_stack_id, None) stack.delete() self.deleted_stacks[stack.stack_id] = stack [self.exports.pop(export.name) for export in stack.exports] return self.stacks.pop(name_or_stack_id, None) else: # Delete by stack name for stack in list(self.stacks.values()): if stack.name == name_or_stack_id: self.delete_stack(stack.stack_id) def list_exports(self, token): all_exports = list(self.exports.values()) if token is None: exports = all_exports[0:100] next_token = '100' if len(all_exports) > 100 else None else: token = int(token) exports = all_exports[token:token + 100] next_token = str(token + 100) if len(all_exports) > token + 100 else None return exports, next_token def _validate_export_uniqueness(self, stack): new_stack_export_names = [x.name for x in stack.exports] export_names = self.exports.keys() if not set(export_names).isdisjoint(new_stack_export_names): raise ValidationError( stack.stack_id, message='Export names must be unique across a given region')
class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() self.deleted_stacks = {} self.exports = OrderedDict() self.change_sets = OrderedDict() def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False): stack_id = generate_stack_id(name) new_stack = FakeStack( stack_id=stack_id, name=name, template=template, parameters=parameters, region_name=region_name, notification_arns=notification_arns, tags=tags, role_arn=role_arn, cross_stack_resources=self.exports, create_change_set=create_change_set, ) self.stacks[stack_id] = new_stack self._validate_export_uniqueness(new_stack) for export in new_stack.exports: self.exports[export.name] = export return new_stack def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None): if change_set_type == 'UPDATE': stacks = self.stacks.values() stack = None for s in stacks: if s.name == stack_name: stack = s if stack is None: raise ValidationError(stack_name) else: stack = self.create_stack(stack_name, template, parameters, region_name, notification_arns, tags, role_arn, create_change_set=True) change_set_id = generate_changeset_id(change_set_name, region_name) self.stacks[change_set_name] = {'Id': change_set_id, 'StackId': stack.stack_id} self.change_sets[change_set_id] = stack return change_set_id, stack.stack_id def execute_change_set(self, change_set_name, stack_name=None): stack = None if change_set_name in self.change_sets: # This means arn was passed in stack = self.change_sets[change_set_name] else: for cs in self.change_sets: if self.change_sets[cs].name == change_set_name: stack = self.change_sets[cs] if stack is None: raise ValidationError(stack_name) if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS': stack._add_stack_event('CREATE_COMPLETE') else: stack._add_stack_event('UPDATE_IN_PROGRESS') stack._add_stack_event('UPDATE_COMPLETE') return True def describe_stacks(self, name_or_stack_id): stacks = self.stacks.values() if name_or_stack_id: for stack in stacks: if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id: return [stack] if self.deleted_stacks: deleted_stacks = self.deleted_stacks.values() for stack in deleted_stacks: if stack.stack_id == name_or_stack_id: return [stack] raise ValidationError(name_or_stack_id) else: return list(stacks) def list_stacks(self): return self.stacks.values() def get_stack(self, name_or_stack_id): all_stacks = dict(self.deleted_stacks, **self.stacks) if name_or_stack_id in all_stacks: # Lookup by stack id - deleted stacks incldued return all_stacks[name_or_stack_id] else: # Lookup by stack name - undeleted stacks only for stack in self.stacks.values(): if stack.name == name_or_stack_id: return stack def update_stack(self, name, template, role_arn=None, parameters=None, tags=None): stack = self.get_stack(name) stack.update(template, role_arn, parameters=parameters, tags=tags) return stack def list_stack_resources(self, stack_name_or_id): stack = self.get_stack(stack_name_or_id) return stack.stack_resources def delete_stack(self, name_or_stack_id): if name_or_stack_id in self.stacks: # Delete by stack id stack = self.stacks.pop(name_or_stack_id, None) stack.delete() self.deleted_stacks[stack.stack_id] = stack [self.exports.pop(export.name) for export in stack.exports] return self.stacks.pop(name_or_stack_id, None) else: # Delete by stack name for stack in list(self.stacks.values()): if stack.name == name_or_stack_id: self.delete_stack(stack.stack_id) def list_exports(self, token): all_exports = list(self.exports.values()) if token is None: exports = all_exports[0:100] next_token = '100' if len(all_exports) > 100 else None else: token = int(token) exports = all_exports[token:token + 100] next_token = str(token + 100) if len(all_exports) > token + 100 else None return exports, next_token def _validate_export_uniqueness(self, stack): new_stack_export_names = [x.name for x in stack.exports] export_names = self.exports.keys() if not set(export_names).isdisjoint(new_stack_export_names): raise ValidationError(stack.stack_id, message='Export names must be unique across a given region')
class DynamoDBBackend(BaseBackend): def __init__(self, region_name=None): self.region_name = region_name self.tables = OrderedDict() def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) def create_table(self, name, **params): if name in self.tables: return None table = Table(name, **params) self.tables[name] = table return table def delete_table(self, name): return self.tables.pop(name, None) def tag_resource(self, table_arn, tags): for table in self.tables: if self.tables[table].table_arn == table_arn: self.tables[table].tags.extend(tags) def untag_resource(self, table_arn, tag_keys): for table in self.tables: if self.tables[table].table_arn == table_arn: self.tables[table].tags = [ tag for tag in self.tables[table].tags if tag["Key"] not in tag_keys ] def list_tags_of_resource(self, table_arn): required_table = None for table in self.tables: if self.tables[table].table_arn == table_arn: required_table = self.tables[table] return required_table.tags def list_tables(self, limit, exclusive_start_table_name): all_tables = list(self.tables.keys()) if exclusive_start_table_name: try: last_table_index = all_tables.index(exclusive_start_table_name) except ValueError: start = len(all_tables) else: start = last_table_index + 1 else: start = 0 if limit: tables = all_tables[start:start + limit] else: tables = all_tables[start:] if limit and len(all_tables) > start + limit: return tables, tables[-1] return tables, None def describe_table(self, name): table = self.tables[name] return table.describe(base_key="Table") def update_table(self, name, global_index, throughput, stream_spec): table = self.get_table(name) if global_index: table = self.update_table_global_indexes(name, global_index) if throughput: table = self.update_table_throughput(name, throughput) if stream_spec: table = self.update_table_streams(name, stream_spec) return table def update_table_throughput(self, name, throughput): table = self.tables[name] table.throughput = throughput return table def update_table_streams(self, name, stream_specification): table = self.tables[name] if (stream_specification.get("StreamEnabled") or stream_specification.get("StreamViewType") ) and table.latest_stream_label: raise ValueError("Table already has stream enabled") table.set_stream_specification(stream_specification) return table def update_table_global_indexes(self, name, global_index_updates): table = self.tables[name] gsis_by_name = dict((i["IndexName"], i) for i in table.global_indexes) for gsi_update in global_index_updates: gsi_to_create = gsi_update.get("Create") gsi_to_update = gsi_update.get("Update") gsi_to_delete = gsi_update.get("Delete") if gsi_to_delete: index_name = gsi_to_delete["IndexName"] if index_name not in gsis_by_name: raise ValueError( "Global Secondary Index does not exist, but tried to delete: %s" % gsi_to_delete["IndexName"]) del gsis_by_name[index_name] if gsi_to_update: index_name = gsi_to_update["IndexName"] if index_name not in gsis_by_name: raise ValueError( "Global Secondary Index does not exist, but tried to update: %s" % gsi_to_update["IndexName"]) gsis_by_name[index_name].update(gsi_to_update) if gsi_to_create: if gsi_to_create["IndexName"] in gsis_by_name: raise ValueError( "Global Secondary Index already exists: %s" % gsi_to_create["IndexName"]) gsis_by_name[gsi_to_create["IndexName"]] = gsi_to_create # in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other # parts of the codebase table.global_indexes = list(gsis_by_name.values()) return table def put_item( self, table_name, item_attrs, expected=None, condition_expression=None, expression_attribute_names=None, expression_attribute_values=None, overwrite=False, ): table = self.tables.get(table_name) if not table: return None return table.put_item( item_attrs, expected, condition_expression, expression_attribute_names, expression_attribute_values, overwrite, ) def get_table_keys_name(self, table_name, keys): """ Given a set of keys, extracts the key and range key """ table = self.tables.get(table_name) if not table: return None, None else: if len(keys) == 1: for key in keys: if key in table.hash_key_names: return key, None # for potential_hash, potential_range in zip(table.hash_key_names, table.range_key_names): # if set([potential_hash, potential_range]) == set(keys): # return potential_hash, potential_range potential_hash, potential_range = None, None for key in set(keys): if key in table.hash_key_names: potential_hash = key elif key in table.range_key_names: potential_range = key return potential_hash, potential_range def get_keys_value(self, table, keys): if table.hash_key_attr not in keys or ( table.has_range_key and table.range_key_attr not in keys): raise ValueError( "Table has a range key, but no range key was passed into get_item" ) hash_key = DynamoType(keys[table.hash_key_attr]) range_key = (DynamoType(keys[table.range_key_attr]) if table.has_range_key else None) return hash_key, range_key def get_table(self, table_name): return self.tables.get(table_name) def get_item(self, table_name, keys, projection_expression=None): table = self.get_table(table_name) if not table: raise ValueError("No table found") hash_key, range_key = self.get_keys_value(table, keys) return table.get_item(hash_key, range_key, projection_expression) def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts, limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None, expr_names=None, expr_values=None, filter_expression=None, **filter_kwargs): table = self.tables.get(table_name) if not table: return None, None hash_key = DynamoType(hash_key_dict) range_values = [ DynamoType(range_value) for range_value in range_value_dicts ] filter_expression = get_filter_expression(filter_expression, expr_names, expr_values) return table.query(hash_key, range_comparison, range_values, limit, exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs) def scan( self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values, index_name, projection_expression, ): table = self.tables.get(table_name) if not table: return None, None, None scan_filters = {} for key, (comparison_operator, comparison_values) in filters.items(): dynamo_types = [DynamoType(value) for value in comparison_values] scan_filters[key] = (comparison_operator, dynamo_types) filter_expression = get_filter_expression(filter_expression, expr_names, expr_values) projection_expression = ",".join([ expr_names.get(attr, attr) for attr in projection_expression.replace(" ", "").split(",") ]) return table.scan( scan_filters, limit, exclusive_start_key, filter_expression, index_name, projection_expression, ) def update_item( self, table_name, key, update_expression, expression_attribute_names, expression_attribute_values, attribute_updates=None, expected=None, condition_expression=None, ): table = self.get_table(table_name) # Support spaces between operators in an update expression # E.g. `a = b + c` -> `a=b+c` if update_expression: # Parse expression to get validation errors update_expression_ast = UpdateExpressionParser.make( update_expression) update_expression = re.sub(r"\s*([=\+-])\s*", "\\1", update_expression) if all([table.hash_key_attr in key, table.range_key_attr in key]): # Covers cases where table has hash and range keys, ``key`` param # will be a dict hash_value = DynamoType(key[table.hash_key_attr]) range_value = DynamoType(key[table.range_key_attr]) elif table.hash_key_attr in key: # Covers tables that have a range key where ``key`` param is a dict hash_value = DynamoType(key[table.hash_key_attr]) range_value = None else: # Covers other cases hash_value = DynamoType(key) range_value = None item = table.get_item(hash_value, range_value) orig_item = copy.deepcopy(item) if not expected: expected = {} if not get_expected(expected).expr(item): raise ConditionalCheckFailed condition_op = get_filter_expression( condition_expression, expression_attribute_names, expression_attribute_values, ) if not condition_op.expr(item): raise ConditionalCheckFailed # Update does not fail on new items, so create one if item is None: data = {table.hash_key_attr: {hash_value.type: hash_value.value}} if range_value: data.update({ table.range_key_attr: { range_value.type: range_value.value } }) table.put_item(data) item = table.get_item(hash_value, range_value) if update_expression: validated_ast = UpdateExpressionValidator( update_expression_ast, expression_attribute_names=expression_attribute_names, expression_attribute_values=expression_attribute_values, item=item, ).validate() try: UpdateExpressionExecutor(validated_ast, item, expression_attribute_names).execute() except ItemSizeTooLarge: raise ItemSizeToUpdateTooLarge() else: item.update_with_attribute_updates(attribute_updates) if table.stream_shard is not None: table.stream_shard.add(orig_item, item) return item def delete_item( self, table_name, key, expression_attribute_names=None, expression_attribute_values=None, condition_expression=None, ): table = self.get_table(table_name) if not table: return None hash_value, range_value = self.get_keys_value(table, key) item = table.get_item(hash_value, range_value) condition_op = get_filter_expression( condition_expression, expression_attribute_names, expression_attribute_values, ) if not condition_op.expr(item): raise ConditionalCheckFailed return table.delete_item(hash_value, range_value) def update_time_to_live(self, table_name, ttl_spec): table = self.tables.get(table_name) if table is None: raise JsonRESTError("ResourceNotFound", "Table not found") if "Enabled" not in ttl_spec or "AttributeName" not in ttl_spec: raise JsonRESTError( "InvalidParameterValue", "TimeToLiveSpecification does not contain Enabled and AttributeName", ) if ttl_spec["Enabled"]: table.ttl["TimeToLiveStatus"] = "ENABLED" else: table.ttl["TimeToLiveStatus"] = "DISABLED" table.ttl["AttributeName"] = ttl_spec["AttributeName"] def describe_time_to_live(self, table_name): table = self.tables.get(table_name) if table is None: raise JsonRESTError("ResourceNotFound", "Table not found") return table.ttl def transact_write_items(self, transact_items): # Create a backup in case any of the transactions fail original_table_state = copy.deepcopy(self.tables) errors = [] for item in transact_items: try: if "ConditionCheck" in item: item = item["ConditionCheck"] key = item["Key"] table_name = item["TableName"] condition_expression = item.get("ConditionExpression", None) expression_attribute_names = item.get( "ExpressionAttributeNames", None) expression_attribute_values = item.get( "ExpressionAttributeValues", None) current = self.get_item(table_name, key) condition_op = get_filter_expression( condition_expression, expression_attribute_names, expression_attribute_values, ) if not condition_op.expr(current): raise ConditionalCheckFailed() elif "Put" in item: item = item["Put"] attrs = item["Item"] table_name = item["TableName"] condition_expression = item.get("ConditionExpression", None) expression_attribute_names = item.get( "ExpressionAttributeNames", None) expression_attribute_values = item.get( "ExpressionAttributeValues", None) self.put_item( table_name, attrs, condition_expression=condition_expression, expression_attribute_names=expression_attribute_names, expression_attribute_values=expression_attribute_values, ) elif "Delete" in item: item = item["Delete"] key = item["Key"] table_name = item["TableName"] condition_expression = item.get("ConditionExpression", None) expression_attribute_names = item.get( "ExpressionAttributeNames", None) expression_attribute_values = item.get( "ExpressionAttributeValues", None) self.delete_item( table_name, key, condition_expression=condition_expression, expression_attribute_names=expression_attribute_names, expression_attribute_values=expression_attribute_values, ) elif "Update" in item: item = item["Update"] key = item["Key"] table_name = item["TableName"] update_expression = item["UpdateExpression"] condition_expression = item.get("ConditionExpression", None) expression_attribute_names = item.get( "ExpressionAttributeNames", None) expression_attribute_values = item.get( "ExpressionAttributeValues", None) self.update_item( table_name, key, update_expression=update_expression, condition_expression=condition_expression, expression_attribute_names=expression_attribute_names, expression_attribute_values=expression_attribute_values, ) else: raise ValueError errors.append(None) except Exception as e: # noqa: E722 Do not use bare except errors.append(type(e).__name__) if any(errors): # Rollback to the original state, and reraise the errors self.tables = original_table_state raise TransactionCanceledException(errors) def describe_continuous_backups(self, table_name): table = self.get_table(table_name) return table.continuous_backups def update_continuous_backups(self, table_name, point_in_time_spec): table = self.get_table(table_name) if (point_in_time_spec["PointInTimeRecoveryEnabled"] and table.continuous_backups["PointInTimeRecoveryDescription"] ["PointInTimeRecoveryStatus"] == "DISABLED"): table.continuous_backups["PointInTimeRecoveryDescription"] = { "PointInTimeRecoveryStatus": "ENABLED", "EarliestRestorableDateTime": unix_time(), "LatestRestorableDateTime": unix_time(), } elif not point_in_time_spec["PointInTimeRecoveryEnabled"]: table.continuous_backups["PointInTimeRecoveryDescription"] = { "PointInTimeRecoveryStatus": "DISABLED" } return table.continuous_backups ###################### # LIST of methods where the logic completely resides in responses.py # Duplicated here so that the implementation coverage script is aware # TODO: Move logic here ###################### def batch_get_item(self): pass def batch_write_item(self): pass def transact_get_items(self): pass
class ELBv2Backend(BaseBackend): def __init__(self, region_name=None): self.region_name = region_name self.target_groups = OrderedDict() self.load_balancers = OrderedDict() @property def ec2_backend(self): """ EC2 backend :return: EC2 Backend :rtype: moto.ec2.models.EC2Backend """ return ec2_backends[self.region_name] @property def acm_backend(self): """ ACM backend :return: ACM Backend :rtype: moto.acm.models.AWSCertificateManagerBackend """ return acm_backends[self.region_name] def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) def create_load_balancer(self, name, security_groups, subnet_ids, scheme='internet-facing'): vpc_id = None subnets = [] if not subnet_ids: raise SubnetNotFoundError() for subnet_id in subnet_ids: subnet = self.ec2_backend.get_subnet(subnet_id) if subnet is None: raise SubnetNotFoundError() subnets.append(subnet) vpc_id = subnets[0].vpc_id arn = make_arn_for_load_balancer(account_id=1, name=name, region_name=self.region_name) dns_name = "%s-1.%s.elb.amazonaws.com" % (name, self.region_name) if arn in self.load_balancers: raise DuplicateLoadBalancerName() new_load_balancer = FakeLoadBalancer( name=name, security_groups=security_groups, arn=arn, scheme=scheme, subnets=subnets, vpc_id=vpc_id, dns_name=dns_name) self.load_balancers[arn] = new_load_balancer return new_load_balancer def create_rule(self, listener_arn, conditions, priority, actions): listeners = self.describe_listeners(None, [listener_arn]) if not listeners: raise ListenerNotFoundError() listener = listeners[0] # validate conditions for condition in conditions: field = condition['field'] if field not in ['path-pattern', 'host-header']: raise InvalidConditionFieldError(field) values = condition['values'] if len(values) == 0: raise InvalidConditionValueError('A condition value must be specified') if len(values) > 1: raise InvalidConditionValueError( "The '%s' field contains too many values; the limit is '1'" % field ) # TODO: check pattern of value for 'host-header' # TODO: check pattern of value for 'path-pattern' # validate Priority for rule in listener.rules: if rule.priority == priority: raise PriorityInUseError() # validate Actions target_group_arns = [target_group.arn for target_group in self.target_groups.values()] for i, action in enumerate(actions): index = i + 1 action_type = action['type'] if action_type not in ['forward']: raise InvalidActionTypeError(action_type, index) action_target_group_arn = action['target_group_arn'] if action_target_group_arn not in target_group_arns: raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' # create rule rule = FakeRule(listener.arn, conditions, priority, actions, is_default=False) listener.register(rule) return [rule] def create_target_group(self, name, **kwargs): if len(name) > 32: raise InvalidTargetGroupNameError( "Target group name '%s' cannot be longer than '32' characters" % name ) if not re.match('^[a-zA-Z0-9\-]+$', name): raise InvalidTargetGroupNameError( "Target group name '%s' can only contain characters that are alphanumeric characters or hyphens(-)" % name ) # undocumented validation if not re.match('(?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$', name): raise InvalidTargetGroupNameError( "1 validation error detected: Value '%s' at 'targetGroup.targetGroupArn.targetGroupName' failed to satisfy constraint: Member must satisfy regular expression pattern: (?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" % name ) if name.startswith('-') or name.endswith('-'): raise InvalidTargetGroupNameError( "Target group name '%s' cannot begin or end with '-'" % name ) for target_group in self.target_groups.values(): if target_group.name == name: raise DuplicateTargetGroupName() valid_protocols = ['HTTPS', 'HTTP', 'TCP'] if kwargs.get('healthcheck_protocol') and kwargs['healthcheck_protocol'] not in valid_protocols: raise InvalidConditionValueError( "Value {} at 'healthCheckProtocol' failed to satisfy constraint: " "Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols)) if kwargs.get('protocol') and kwargs['protocol'] not in valid_protocols: raise InvalidConditionValueError( "Value {} at 'protocol' failed to satisfy constraint: " "Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols)) if kwargs.get('matcher') and FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None: raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...') arn = make_arn_for_target_group(account_id=1, name=name, region_name=self.region_name) target_group = FakeTargetGroup(name, arn, **kwargs) self.target_groups[target_group.arn] = target_group return target_group def create_listener(self, load_balancer_arn, protocol, port, ssl_policy, certificate, default_actions): balancer = self.load_balancers.get(load_balancer_arn) if balancer is None: raise LoadBalancerNotFoundError() if port in balancer.listeners: raise DuplicateListenerError() arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) balancer.listeners[listener.arn] = listener for action in default_actions: if action['target_group_arn'] in self.target_groups.keys(): target_group = self.target_groups[action['target_group_arn']] target_group.load_balancer_arns.append(load_balancer_arn) return listener def describe_load_balancers(self, arns, names): balancers = self.load_balancers.values() arns = arns or [] names = names or [] if not arns and not names: return balancers matched_balancers = [] matched_balancer = None for arn in arns: for balancer in balancers: if balancer.arn == arn: matched_balancer = balancer if matched_balancer is None: raise LoadBalancerNotFoundError() elif matched_balancer not in matched_balancers: matched_balancers.append(matched_balancer) for name in names: for balancer in balancers: if balancer.name == name: matched_balancer = balancer if matched_balancer is None: raise LoadBalancerNotFoundError() elif matched_balancer not in matched_balancers: matched_balancers.append(matched_balancer) return matched_balancers def describe_rules(self, listener_arn, rule_arns): if listener_arn is None and not rule_arns: raise InvalidDescribeRulesRequest( "You must specify either listener rule ARNs or a listener ARN" ) if listener_arn is not None and rule_arns is not None: raise InvalidDescribeRulesRequest( 'Listener rule ARNs and a listener ARN cannot be specified at the same time' ) if listener_arn: listener = self.describe_listeners(None, [listener_arn])[0] return listener.rules # search for rule arns matched_rules = [] for load_balancer_arn in self.load_balancers: listeners = self.load_balancers.get(load_balancer_arn).listeners.values() for listener in listeners: for rule in listener.rules: if rule.arn in rule_arns: matched_rules.append(rule) return matched_rules def describe_target_groups(self, load_balancer_arn, target_group_arns, names): if load_balancer_arn: if load_balancer_arn not in self.load_balancers: raise LoadBalancerNotFoundError() return [tg for tg in self.target_groups.values() if load_balancer_arn in tg.load_balancer_arns] if target_group_arns: try: return [self.target_groups[arn] for arn in target_group_arns] except KeyError: raise TargetGroupNotFoundError() if names: matched = [] for name in names: found = None for target_group in self.target_groups.values(): if target_group.name == name: found = target_group if not found: raise TargetGroupNotFoundError() matched.append(found) return matched return self.target_groups.values() def describe_listeners(self, load_balancer_arn, listener_arns): if load_balancer_arn: if load_balancer_arn not in self.load_balancers: raise LoadBalancerNotFoundError() return self.load_balancers.get(load_balancer_arn).listeners.values() matched = [] for load_balancer in self.load_balancers.values(): for listener_arn in listener_arns: listener = load_balancer.listeners.get(listener_arn) if not listener: raise ListenerNotFoundError() matched.append(listener) return matched def delete_load_balancer(self, arn): self.load_balancers.pop(arn, None) def delete_rule(self, arn): for load_balancer_arn in self.load_balancers: listeners = self.load_balancers.get(load_balancer_arn).listeners.values() for listener in listeners: for rule in listener.rules: if rule.arn == arn: listener.remove_rule(rule) return # should raise RuleNotFound Error according to the AWS API doc # however, boto3 does't raise error even if rule is not found def delete_target_group(self, target_group_arn): if target_group_arn not in self.target_groups: raise TargetGroupNotFoundError() target_group = self.target_groups[target_group_arn] if target_group: if self._any_listener_using(target_group_arn): raise ResourceInUseError( "The target group '{}' is currently in use by a listener or a rule".format( target_group_arn)) del self.target_groups[target_group_arn] return target_group def delete_listener(self, listener_arn): for load_balancer in self.load_balancers.values(): listener = load_balancer.listeners.pop(listener_arn, None) if listener: return listener raise ListenerNotFoundError() def modify_rule(self, rule_arn, conditions, actions): # if conditions or actions is empty list, do not update the attributes if not conditions and not actions: raise InvalidModifyRuleArgumentsError() rules = self.describe_rules(listener_arn=None, rule_arns=[rule_arn]) if not rules: raise RuleNotFoundError() rule = rules[0] if conditions: for condition in conditions: field = condition['field'] if field not in ['path-pattern', 'host-header']: raise InvalidConditionFieldError(field) values = condition['values'] if len(values) == 0: raise InvalidConditionValueError('A condition value must be specified') if len(values) > 1: raise InvalidConditionValueError( "The '%s' field contains too many values; the limit is '1'" % field ) # TODO: check pattern of value for 'host-header' # TODO: check pattern of value for 'path-pattern' # validate Actions target_group_arns = [target_group.arn for target_group in self.target_groups.values()] if actions: for i, action in enumerate(actions): index = i + 1 action_type = action['type'] if action_type not in ['forward']: raise InvalidActionTypeError(action_type, index) action_target_group_arn = action['target_group_arn'] if action_target_group_arn not in target_group_arns: raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' # modify rule if conditions: rule.conditions = conditions if actions: rule.actions = actions return [rule] def register_targets(self, target_group_arn, instances): target_group = self.target_groups.get(target_group_arn) if target_group is None: raise TargetGroupNotFoundError() target_group.register(instances) def deregister_targets(self, target_group_arn, instances): target_group = self.target_groups.get(target_group_arn) if target_group is None: raise TargetGroupNotFoundError() target_group.deregister(instances) def describe_target_health(self, target_group_arn, targets): target_group = self.target_groups.get(target_group_arn) if target_group is None: raise TargetGroupNotFoundError() if not targets: targets = target_group.targets.values() return [target_group.health_for(target) for target in targets] def set_rule_priorities(self, rule_priorities): # validate priorities = [rule_priority['priority'] for rule_priority in rule_priorities] for priority in set(priorities): if priorities.count(priority) > 1: raise DuplicatePriorityError(priority) # validate for rule_priority in rule_priorities: given_rule_arn = rule_priority['rule_arn'] priority = rule_priority['priority'] _given_rules = self.describe_rules(listener_arn=None, rule_arns=[given_rule_arn]) if not _given_rules: raise RuleNotFoundError() given_rule = _given_rules[0] listeners = self.describe_listeners(None, [given_rule.listener_arn]) listener = listeners[0] for rule_in_listener in listener.rules: if rule_in_listener.priority == priority: raise PriorityInUseError() # modify modified_rules = [] for rule_priority in rule_priorities: given_rule_arn = rule_priority['rule_arn'] priority = rule_priority['priority'] _given_rules = self.describe_rules(listener_arn=None, rule_arns=[given_rule_arn]) if not _given_rules: raise RuleNotFoundError() given_rule = _given_rules[0] given_rule.priority = priority modified_rules.append(given_rule) return modified_rules def set_ip_address_type(self, arn, ip_type): if ip_type not in ('internal', 'dualstack'): raise RESTError('InvalidParameterValue', 'IpAddressType must be either internal | dualstack') balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() if ip_type == 'dualstack' and balancer.scheme == 'internal': raise RESTError('InvalidConfigurationRequest', 'Internal load balancers cannot be dualstack') balancer.stack = ip_type def set_security_groups(self, arn, sec_groups): balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() # Check all security groups exist for sec_group_id in sec_groups: if self.ec2_backend.get_security_group_from_id(sec_group_id) is None: raise RESTError('InvalidSecurityGroup', 'Security group {0} does not exist'.format(sec_group_id)) balancer.security_groups = sec_groups def set_subnets(self, arn, subnets): balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() subnet_objects = [] sub_zone_list = {} for subnet in subnets: try: subnet = self.ec2_backend.get_subnet(subnet) if subnet.availability_zone in sub_zone_list: raise RESTError('InvalidConfigurationRequest', 'More than 1 subnet cannot be specified for 1 availability zone') sub_zone_list[subnet.availability_zone] = subnet.id subnet_objects.append(subnet) except Exception: raise SubnetNotFoundError() if len(sub_zone_list) < 2: raise RESTError('InvalidConfigurationRequest', 'More than 1 availability zone must be specified') balancer.subnets = subnet_objects return sub_zone_list.items() def modify_load_balancer_attributes(self, arn, attrs): balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() for key in attrs: if key not in FakeLoadBalancer.VALID_ATTRS: raise RESTError('InvalidConfigurationRequest', 'Key {0} not valid'.format(key)) balancer.attrs.update(attrs) return balancer.attrs def describe_load_balancer_attributes(self, arn): balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() return balancer.attrs def modify_target_group(self, arn, health_check_proto=None, health_check_port=None, health_check_path=None, health_check_interval=None, health_check_timeout=None, healthy_threshold_count=None, unhealthy_threshold_count=None, http_codes=None): target_group = self.target_groups.get(arn) if target_group is None: raise TargetGroupNotFoundError() if http_codes is not None and FakeTargetGroup.HTTP_CODE_REGEX.match(http_codes) is None: raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...') if http_codes is not None: target_group.matcher['HttpCode'] = http_codes if health_check_interval is not None: target_group.healthcheck_interval_seconds = health_check_interval if health_check_path is not None: target_group.healthcheck_path = health_check_path if health_check_port is not None: target_group.healthcheck_port = health_check_port if health_check_proto is not None: target_group.healthcheck_protocol = health_check_proto if health_check_timeout is not None: target_group.healthcheck_timeout_seconds = health_check_timeout if healthy_threshold_count is not None: target_group.healthy_threshold_count = healthy_threshold_count if unhealthy_threshold_count is not None: target_group.unhealthy_threshold_count = unhealthy_threshold_count return target_group def modify_listener(self, arn, port=None, protocol=None, ssl_policy=None, certificates=None, default_actions=None): for load_balancer in self.load_balancers.values(): if arn in load_balancer.listeners: break else: raise ListenerNotFoundError() listener = load_balancer.listeners[arn] if port is not None: for listener_arn, current_listener in load_balancer.listeners.items(): if listener_arn == arn: continue if listener.port == port: raise DuplicateListenerError() listener.port = port if protocol is not None: if protocol not in ('HTTP', 'HTTPS', 'TCP'): raise RESTError('UnsupportedProtocol', 'Protocol {0} is not supported'.format(protocol)) # HTTPS checks if protocol == 'HTTPS': # HTTPS # Might already be HTTPS so may not provide certs if certificates is None and listener.protocol != 'HTTPS': raise RESTError('InvalidConfigurationRequest', 'Certificates must be provided for HTTPS') # Check certificates exist if certificates is not None: default_cert = None all_certs = set() # for SNI for cert in certificates: if cert['is_default'] == 'true': default_cert = cert['certificate_arn'] try: self.acm_backend.get_certificate(cert['certificate_arn']) except Exception: raise RESTError('CertificateNotFound', 'Certificate {0} not found'.format(cert['certificate_arn'])) all_certs.add(cert['certificate_arn']) if default_cert is None: raise RESTError('InvalidConfigurationRequest', 'No default certificate') listener.certificate = default_cert listener.certificates = list(all_certs) listener.protocol = protocol if ssl_policy is not None: # Its already validated in responses.py listener.ssl_policy = ssl_policy if default_actions is not None: # Is currently not validated listener.default_actions = default_actions return listener def _any_listener_using(self, target_group_arn): for load_balancer in self.load_balancers.values(): for listener in load_balancer.listeners.values(): for rule in listener.rules: for action in rule.actions: if action.get('target_group_arn') == target_group_arn: return True return False
class FakeTargetGroup(CloudFormationModel): HTTP_CODE_REGEX = re.compile(r"(?:(?:\d+-\d+|\d+),?)+") def __init__( self, name, arn, vpc_id, protocol, port, healthcheck_protocol=None, healthcheck_port=None, healthcheck_path=None, healthcheck_interval_seconds=None, healthcheck_timeout_seconds=None, healthy_threshold_count=None, unhealthy_threshold_count=None, matcher=None, target_type=None, ): # TODO: default values differs when you add Network Load balancer self.name = name self.arn = arn self.vpc_id = vpc_id self.protocol = protocol self.port = port self.healthcheck_protocol = healthcheck_protocol or "HTTP" self.healthcheck_port = healthcheck_port or str(self.port) self.healthcheck_path = healthcheck_path or "/" self.healthcheck_interval_seconds = healthcheck_interval_seconds or 30 self.healthcheck_timeout_seconds = healthcheck_timeout_seconds or 5 self.healthy_threshold_count = healthy_threshold_count or 5 self.unhealthy_threshold_count = unhealthy_threshold_count or 2 self.load_balancer_arns = [] self.tags = {} if matcher is None: self.matcher = {"HttpCode": "200"} else: self.matcher = matcher self.target_type = target_type self.attributes = { "deregistration_delay.timeout_seconds": 300, "stickiness.enabled": "false", } self.targets = OrderedDict() @property def physical_resource_id(self): return self.arn def register(self, targets): for target in targets: self.targets[target["id"]] = { "id": target["id"], "port": target.get("port", self.port), } def deregister(self, targets): for target in targets: t = self.targets.pop(target["id"], None) if not t: raise InvalidTargetError() def deregister_terminated_instances(self, instance_ids): for target_id in list(self.targets.keys()): if target_id in instance_ids: del self.targets[target_id] def add_tag(self, key, value): if len(self.tags) >= 10 and key not in self.tags: raise TooManyTagsError() self.tags[key] = value def health_for(self, target, ec2_backend): t = self.targets.get(target["id"]) if t is None: raise InvalidTargetError() if t["id"].startswith("i-"): # EC2 instance ID instance = ec2_backend.get_instance_by_id(t["id"]) if instance.state == "stopped": return FakeHealthStatus( t["id"], t["port"], self.healthcheck_port, "unused", "Target.InvalidState", "Target is in the stopped state", ) return FakeHealthStatus(t["id"], t["port"], self.healthcheck_port, "healthy") @staticmethod def cloudformation_name_type(): return "Name" @staticmethod def cloudformation_type(): # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html return "AWS::ElasticLoadBalancingV2::TargetGroup" @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] elbv2_backend = elbv2_backends[region_name] vpc_id = properties.get("VpcId") protocol = properties.get("Protocol") port = properties.get("Port") healthcheck_protocol = properties.get("HealthCheckProtocol") healthcheck_port = properties.get("HealthCheckPort") healthcheck_path = properties.get("HealthCheckPath") healthcheck_interval_seconds = properties.get("HealthCheckIntervalSeconds") healthcheck_timeout_seconds = properties.get("HealthCheckTimeoutSeconds") healthy_threshold_count = properties.get("HealthyThresholdCount") unhealthy_threshold_count = properties.get("UnhealthyThresholdCount") matcher = properties.get("Matcher") target_type = properties.get("TargetType") target_group = elbv2_backend.create_target_group( name=resource_name, vpc_id=vpc_id, protocol=protocol, port=port, healthcheck_protocol=healthcheck_protocol, healthcheck_port=healthcheck_port, healthcheck_path=healthcheck_path, healthcheck_interval_seconds=healthcheck_interval_seconds, healthcheck_timeout_seconds=healthcheck_timeout_seconds, healthy_threshold_count=healthy_threshold_count, unhealthy_threshold_count=unhealthy_threshold_count, matcher=matcher, target_type=target_type, ) return target_group
class Shard(BaseModel): def __init__(self, shard_id, starting_hash, ending_hash): self._shard_id = shard_id self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() self.is_open = True @property def shard_id(self): return "shardId-{0}".format(str(self._shard_id).zfill(12)) def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] secs_behind_latest = 0 for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number very_last_record = self.records[next(reversed(self.records))] secs_behind_latest = very_last_record.created_at - record.created_at if len(results) == limit: break millis_behind_latest = int(secs_behind_latest * 1000) return results, last_sequence_id, millis_behind_latest def put_record(self, partition_key, data, explicit_hash_key): # Note: this function is not safe for concurrency if self.records: last_sequence_number = self.get_max_sequence_number() else: last_sequence_number = 0 sequence_number = last_sequence_number + 1 self.records[sequence_number] = Record( partition_key, data, sequence_number, explicit_hash_key ) return sequence_number def get_min_sequence_number(self): if self.records: return list(self.records.keys())[0] return 0 def get_max_sequence_number(self): if self.records: return list(self.records.keys())[-1] return 0 def get_sequence_number_at(self, at_timestamp): if not self.records or at_timestamp < list(self.records.values())[0].created_at: return 0 else: # find the last item in the list that was created before # at_timestamp r = next( ( r for r in reversed(self.records.values()) if r.created_at < at_timestamp ), None, ) return r.sequence_number def to_json(self): response = { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash), }, "SequenceNumberRange": { "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id, } if not self.is_open: response["SequenceNumberRange"][ "EndingSequenceNumber" ] = self.get_max_sequence_number() return response
class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() self.deleted_stacks = {} self.exports = OrderedDict() def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): stack_id = generate_stack_id(name) new_stack = FakeStack( stack_id=stack_id, name=name, template=template, parameters=parameters, region_name=region_name, notification_arns=notification_arns, tags=tags, role_arn=role_arn, cross_stack_resources=self.exports, ) self.stacks[stack_id] = new_stack self._validate_export_uniqueness(new_stack) for export in new_stack.exports: self.exports[export.name] = export return new_stack def describe_stacks(self, name_or_stack_id): stacks = self.stacks.values() if name_or_stack_id: for stack in stacks: if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id: return [stack] if self.deleted_stacks: deleted_stacks = self.deleted_stacks.values() for stack in deleted_stacks: if stack.stack_id == name_or_stack_id: return [stack] raise ValidationError(name_or_stack_id) else: return list(stacks) def list_stacks(self): return self.stacks.values() def get_stack(self, name_or_stack_id): all_stacks = dict(self.deleted_stacks, **self.stacks) if name_or_stack_id in all_stacks: # Lookup by stack id - deleted stacks incldued return all_stacks[name_or_stack_id] else: # Lookup by stack name - undeleted stacks only for stack in self.stacks.values(): if stack.name == name_or_stack_id: return stack def update_stack(self, name, template, role_arn=None, parameters=None, tags=None): stack = self.get_stack(name) stack.update(template, role_arn, parameters=parameters, tags=tags) return stack def list_stack_resources(self, stack_name_or_id): stack = self.get_stack(stack_name_or_id) return stack.stack_resources def delete_stack(self, name_or_stack_id): if name_or_stack_id in self.stacks: # Delete by stack id stack = self.stacks.pop(name_or_stack_id, None) stack.delete() self.deleted_stacks[stack.stack_id] = stack [self.exports.pop(export.name) for export in stack.exports] return self.stacks.pop(name_or_stack_id, None) else: # Delete by stack name for stack in list(self.stacks.values()): if stack.name == name_or_stack_id: self.delete_stack(stack.stack_id) def list_exports(self, token): all_exports = list(self.exports.values()) if token is None: exports = all_exports[0:100] next_token = '100' if len(all_exports) > 100 else None else: token = int(token) exports = all_exports[token:token + 100] next_token = str(token + 100) if len(all_exports) > token + 100 else None return exports, next_token def _validate_export_uniqueness(self, stack): new_stack_export_names = [x.name for x in stack.exports] export_names = self.exports.keys() if not set(export_names).isdisjoint(new_stack_export_names): raise ValidationError(stack.stack_id, message='Export names must be unique across a given region')
class ELBv2Backend(BaseBackend): def __init__(self, region_name=None): self.region_name = region_name self.target_groups = OrderedDict() self.load_balancers = OrderedDict() @property def ec2_backend(self): """ EC2 backend :return: EC2 Backend :rtype: moto.ec2.models.EC2Backend """ return ec2_backends[self.region_name] @property def acm_backend(self): """ ACM backend :return: ACM Backend :rtype: moto.acm.models.AWSCertificateManagerBackend """ return acm_backends[self.region_name] def reset(self): region_name = self.region_name self.__dict__ = {} self.__init__(region_name) def create_load_balancer(self, name, security_groups, subnet_ids, scheme='internet-facing'): vpc_id = None subnets = [] if not subnet_ids: raise SubnetNotFoundError() for subnet_id in subnet_ids: subnet = self.ec2_backend.get_subnet(subnet_id) if subnet is None: raise SubnetNotFoundError() subnets.append(subnet) vpc_id = subnets[0].vpc_id arn = make_arn_for_load_balancer(account_id=1, name=name, region_name=self.region_name) dns_name = "%s-1.%s.elb.amazonaws.com" % (name, self.region_name) if arn in self.load_balancers: raise DuplicateLoadBalancerName() new_load_balancer = FakeLoadBalancer(name=name, security_groups=security_groups, arn=arn, scheme=scheme, subnets=subnets, vpc_id=vpc_id, dns_name=dns_name) self.load_balancers[arn] = new_load_balancer return new_load_balancer def create_rule(self, listener_arn, conditions, priority, actions): listeners = self.describe_listeners(None, [listener_arn]) if not listeners: raise ListenerNotFoundError() listener = listeners[0] # validate conditions for condition in conditions: field = condition['field'] if field not in ['path-pattern', 'host-header']: raise InvalidConditionFieldError(field) values = condition['values'] if len(values) == 0: raise InvalidConditionValueError( 'A condition value must be specified') if len(values) > 1: raise InvalidConditionValueError( "The '%s' field contains too many values; the limit is '1'" % field) # TODO: check pattern of value for 'host-header' # TODO: check pattern of value for 'path-pattern' # validate Priority for rule in listener.rules: if rule.priority == priority: raise PriorityInUseError() # validate Actions target_group_arns = [ target_group.arn for target_group in self.target_groups.values() ] for i, action in enumerate(actions): index = i + 1 action_type = action['type'] if action_type == 'forward': action_target_group_arn = action['target_group_arn'] if action_target_group_arn not in target_group_arns: raise ActionTargetGroupNotFoundError( action_target_group_arn) elif action_type == 'redirect': # nothing to do pass else: raise InvalidActionTypeError(action_type, index) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' # create rule rule = FakeRule(listener.arn, conditions, priority, actions, is_default=False) listener.register(rule) return [rule] def create_target_group(self, name, **kwargs): if len(name) > 32: raise InvalidTargetGroupNameError( "Target group name '%s' cannot be longer than '32' characters" % name) if not re.match('^[a-zA-Z0-9\-]+$', name): raise InvalidTargetGroupNameError( "Target group name '%s' can only contain characters that are alphanumeric characters or hyphens(-)" % name) # undocumented validation if not re.match('(?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$', name): raise InvalidTargetGroupNameError( "1 validation error detected: Value '%s' at 'targetGroup.targetGroupArn.targetGroupName' failed to satisfy constraint: Member must satisfy regular expression pattern: (?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" % name) if name.startswith('-') or name.endswith('-'): raise InvalidTargetGroupNameError( "Target group name '%s' cannot begin or end with '-'" % name) for target_group in self.target_groups.values(): if target_group.name == name: raise DuplicateTargetGroupName() valid_protocols = ['HTTPS', 'HTTP', 'TCP'] if kwargs.get('healthcheck_protocol') and kwargs[ 'healthcheck_protocol'] not in valid_protocols: raise InvalidConditionValueError( "Value {} at 'healthCheckProtocol' failed to satisfy constraint: " "Member must satisfy enum value set: {}".format( kwargs['healthcheck_protocol'], valid_protocols)) if kwargs.get( 'protocol') and kwargs['protocol'] not in valid_protocols: raise InvalidConditionValueError( "Value {} at 'protocol' failed to satisfy constraint: " "Member must satisfy enum value set: {}".format( kwargs['protocol'], valid_protocols)) if kwargs.get('matcher') and FakeTargetGroup.HTTP_CODE_REGEX.match( kwargs['matcher']['HttpCode']) is None: raise RESTError( 'InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...') arn = make_arn_for_target_group(account_id=1, name=name, region_name=self.region_name) target_group = FakeTargetGroup(name, arn, **kwargs) self.target_groups[target_group.arn] = target_group return target_group def create_listener(self, load_balancer_arn, protocol, port, ssl_policy, certificate, default_actions): balancer = self.load_balancers.get(load_balancer_arn) if balancer is None: raise LoadBalancerNotFoundError() if port in balancer.listeners: raise DuplicateListenerError() arn = load_balancer_arn.replace( ':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) balancer.listeners[listener.arn] = listener for i, action in enumerate(default_actions): action_type = action['type'] if action_type == 'forward': if action['target_group_arn'] in self.target_groups.keys(): target_group = self.target_groups[ action['target_group_arn']] target_group.load_balancer_arns.append(load_balancer_arn) elif action_type == 'redirect': # nothing to do pass else: raise InvalidActionTypeError(action_type, i + 1) return listener def describe_load_balancers(self, arns, names): balancers = self.load_balancers.values() arns = arns or [] names = names or [] if not arns and not names: return balancers matched_balancers = [] matched_balancer = None for arn in arns: for balancer in balancers: if balancer.arn == arn: matched_balancer = balancer if matched_balancer is None: raise LoadBalancerNotFoundError() elif matched_balancer not in matched_balancers: matched_balancers.append(matched_balancer) for name in names: for balancer in balancers: if balancer.name == name: matched_balancer = balancer if matched_balancer is None: raise LoadBalancerNotFoundError() elif matched_balancer not in matched_balancers: matched_balancers.append(matched_balancer) return matched_balancers def describe_rules(self, listener_arn, rule_arns): if listener_arn is None and not rule_arns: raise InvalidDescribeRulesRequest( "You must specify either listener rule ARNs or a listener ARN") if listener_arn is not None and rule_arns is not None: raise InvalidDescribeRulesRequest( 'Listener rule ARNs and a listener ARN cannot be specified at the same time' ) if listener_arn: listener = self.describe_listeners(None, [listener_arn])[0] return listener.rules # search for rule arns matched_rules = [] for load_balancer_arn in self.load_balancers: listeners = self.load_balancers.get( load_balancer_arn).listeners.values() for listener in listeners: for rule in listener.rules: if rule.arn in rule_arns: matched_rules.append(rule) return matched_rules def describe_target_groups(self, load_balancer_arn, target_group_arns, names): if load_balancer_arn: if load_balancer_arn not in self.load_balancers: raise LoadBalancerNotFoundError() return [ tg for tg in self.target_groups.values() if load_balancer_arn in tg.load_balancer_arns ] if target_group_arns: try: return [self.target_groups[arn] for arn in target_group_arns] except KeyError: raise TargetGroupNotFoundError() if names: matched = [] for name in names: found = None for target_group in self.target_groups.values(): if target_group.name == name: found = target_group if not found: raise TargetGroupNotFoundError() matched.append(found) return matched return self.target_groups.values() def describe_listeners(self, load_balancer_arn, listener_arns): if load_balancer_arn: if load_balancer_arn not in self.load_balancers: raise LoadBalancerNotFoundError() return self.load_balancers.get( load_balancer_arn).listeners.values() matched = [] for load_balancer in self.load_balancers.values(): for listener_arn in listener_arns: listener = load_balancer.listeners.get(listener_arn) if not listener: raise ListenerNotFoundError() matched.append(listener) return matched def delete_load_balancer(self, arn): self.load_balancers.pop(arn, None) def delete_rule(self, arn): for load_balancer_arn in self.load_balancers: listeners = self.load_balancers.get( load_balancer_arn).listeners.values() for listener in listeners: for rule in listener.rules: if rule.arn == arn: listener.remove_rule(rule) return # should raise RuleNotFound Error according to the AWS API doc # however, boto3 does't raise error even if rule is not found def delete_target_group(self, target_group_arn): if target_group_arn not in self.target_groups: raise TargetGroupNotFoundError() target_group = self.target_groups[target_group_arn] if target_group: if self._any_listener_using(target_group_arn): raise ResourceInUseError( "The target group '{}' is currently in use by a listener or a rule" .format(target_group_arn)) del self.target_groups[target_group_arn] return target_group def delete_listener(self, listener_arn): for load_balancer in self.load_balancers.values(): listener = load_balancer.listeners.pop(listener_arn, None) if listener: return listener raise ListenerNotFoundError() def modify_rule(self, rule_arn, conditions, actions): # if conditions or actions is empty list, do not update the attributes if not conditions and not actions: raise InvalidModifyRuleArgumentsError() rules = self.describe_rules(listener_arn=None, rule_arns=[rule_arn]) if not rules: raise RuleNotFoundError() rule = rules[0] if conditions: for condition in conditions: field = condition['field'] if field not in ['path-pattern', 'host-header']: raise InvalidConditionFieldError(field) values = condition['values'] if len(values) == 0: raise InvalidConditionValueError( 'A condition value must be specified') if len(values) > 1: raise InvalidConditionValueError( "The '%s' field contains too many values; the limit is '1'" % field) # TODO: check pattern of value for 'host-header' # TODO: check pattern of value for 'path-pattern' # validate Actions target_group_arns = [ target_group.arn for target_group in self.target_groups.values() ] if actions: for i, action in enumerate(actions): index = i + 1 action_type = action['type'] if action_type == 'forward': action_target_group_arn = action['target_group_arn'] if action_target_group_arn not in target_group_arns: raise ActionTargetGroupNotFoundError( action_target_group_arn) elif action_type == 'redirect': # nothing to do pass else: raise InvalidActionTypeError(action_type, index) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' # modify rule if conditions: rule.conditions = conditions if actions: rule.actions = actions return [rule] def register_targets(self, target_group_arn, instances): target_group = self.target_groups.get(target_group_arn) if target_group is None: raise TargetGroupNotFoundError() target_group.register(instances) def deregister_targets(self, target_group_arn, instances): target_group = self.target_groups.get(target_group_arn) if target_group is None: raise TargetGroupNotFoundError() target_group.deregister(instances) def describe_target_health(self, target_group_arn, targets): target_group = self.target_groups.get(target_group_arn) if target_group is None: raise TargetGroupNotFoundError() if not targets: targets = target_group.targets.values() return [ target_group.health_for(target, self.ec2_backend) for target in targets ] def set_rule_priorities(self, rule_priorities): # validate priorities = [ rule_priority['priority'] for rule_priority in rule_priorities ] for priority in set(priorities): if priorities.count(priority) > 1: raise DuplicatePriorityError(priority) # validate for rule_priority in rule_priorities: given_rule_arn = rule_priority['rule_arn'] priority = rule_priority['priority'] _given_rules = self.describe_rules(listener_arn=None, rule_arns=[given_rule_arn]) if not _given_rules: raise RuleNotFoundError() given_rule = _given_rules[0] listeners = self.describe_listeners(None, [given_rule.listener_arn]) listener = listeners[0] for rule_in_listener in listener.rules: if rule_in_listener.priority == priority: raise PriorityInUseError() # modify modified_rules = [] for rule_priority in rule_priorities: given_rule_arn = rule_priority['rule_arn'] priority = rule_priority['priority'] _given_rules = self.describe_rules(listener_arn=None, rule_arns=[given_rule_arn]) if not _given_rules: raise RuleNotFoundError() given_rule = _given_rules[0] given_rule.priority = priority modified_rules.append(given_rule) return modified_rules def set_ip_address_type(self, arn, ip_type): if ip_type not in ('internal', 'dualstack'): raise RESTError( 'InvalidParameterValue', 'IpAddressType must be either internal | dualstack') balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() if ip_type == 'dualstack' and balancer.scheme == 'internal': raise RESTError('InvalidConfigurationRequest', 'Internal load balancers cannot be dualstack') balancer.stack = ip_type def set_security_groups(self, arn, sec_groups): balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() # Check all security groups exist for sec_group_id in sec_groups: if self.ec2_backend.get_security_group_from_id( sec_group_id) is None: raise RESTError( 'InvalidSecurityGroup', 'Security group {0} does not exist'.format(sec_group_id)) balancer.security_groups = sec_groups def set_subnets(self, arn, subnets): balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() subnet_objects = [] sub_zone_list = {} for subnet in subnets: try: subnet = self.ec2_backend.get_subnet(subnet) if subnet.availability_zone in sub_zone_list: raise RESTError( 'InvalidConfigurationRequest', 'More than 1 subnet cannot be specified for 1 availability zone' ) sub_zone_list[subnet.availability_zone] = subnet.id subnet_objects.append(subnet) except Exception: raise SubnetNotFoundError() if len(sub_zone_list) < 2: raise RESTError('InvalidConfigurationRequest', 'More than 1 availability zone must be specified') balancer.subnets = subnet_objects return sub_zone_list.items() def modify_load_balancer_attributes(self, arn, attrs): balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() for key in attrs: if key not in FakeLoadBalancer.VALID_ATTRS: raise RESTError('InvalidConfigurationRequest', 'Key {0} not valid'.format(key)) balancer.attrs.update(attrs) return balancer.attrs def describe_load_balancer_attributes(self, arn): balancer = self.load_balancers.get(arn) if balancer is None: raise LoadBalancerNotFoundError() return balancer.attrs def modify_target_group(self, arn, health_check_proto=None, health_check_port=None, health_check_path=None, health_check_interval=None, health_check_timeout=None, healthy_threshold_count=None, unhealthy_threshold_count=None, http_codes=None): target_group = self.target_groups.get(arn) if target_group is None: raise TargetGroupNotFoundError() if http_codes is not None and FakeTargetGroup.HTTP_CODE_REGEX.match( http_codes) is None: raise RESTError( 'InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...') if http_codes is not None: target_group.matcher['HttpCode'] = http_codes if health_check_interval is not None: target_group.healthcheck_interval_seconds = health_check_interval if health_check_path is not None: target_group.healthcheck_path = health_check_path if health_check_port is not None: target_group.healthcheck_port = health_check_port if health_check_proto is not None: target_group.healthcheck_protocol = health_check_proto if health_check_timeout is not None: target_group.healthcheck_timeout_seconds = health_check_timeout if healthy_threshold_count is not None: target_group.healthy_threshold_count = healthy_threshold_count if unhealthy_threshold_count is not None: target_group.unhealthy_threshold_count = unhealthy_threshold_count return target_group def modify_listener(self, arn, port=None, protocol=None, ssl_policy=None, certificates=None, default_actions=None): for load_balancer in self.load_balancers.values(): if arn in load_balancer.listeners: break else: raise ListenerNotFoundError() listener = load_balancer.listeners[arn] if port is not None: for listener_arn, current_listener in load_balancer.listeners.items( ): if listener_arn == arn: continue if listener.port == port: raise DuplicateListenerError() listener.port = port if protocol is not None: if protocol not in ('HTTP', 'HTTPS', 'TCP'): raise RESTError( 'UnsupportedProtocol', 'Protocol {0} is not supported'.format(protocol)) # HTTPS checks if protocol == 'HTTPS': # HTTPS # Might already be HTTPS so may not provide certs if certificates is None and listener.protocol != 'HTTPS': raise RESTError('InvalidConfigurationRequest', 'Certificates must be provided for HTTPS') # Check certificates exist if certificates is not None: default_cert = None all_certs = set() # for SNI for cert in certificates: if cert['is_default'] == 'true': default_cert = cert['certificate_arn'] try: self.acm_backend.get_certificate( cert['certificate_arn']) except Exception: raise RESTError( 'CertificateNotFound', 'Certificate {0} not found'.format( cert['certificate_arn'])) all_certs.add(cert['certificate_arn']) if default_cert is None: raise RESTError('InvalidConfigurationRequest', 'No default certificate') listener.certificate = default_cert listener.certificates = list(all_certs) listener.protocol = protocol if ssl_policy is not None: # Its already validated in responses.py listener.ssl_policy = ssl_policy if default_actions is not None and default_actions != []: # Is currently not validated listener.default_actions = default_actions return listener def _any_listener_using(self, target_group_arn): for load_balancer in self.load_balancers.values(): for listener in load_balancer.listeners.values(): for rule in listener.rules: for action in rule.actions: if action.get('target_group_arn') == target_group_arn: return True return False
class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() self.deleted_stacks = {} self.exports = OrderedDict() def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): stack_id = generate_stack_id(name) new_stack = FakeStack( stack_id=stack_id, name=name, template=template, parameters=parameters, region_name=region_name, notification_arns=notification_arns, tags=tags, role_arn=role_arn, ) self.stacks[stack_id] = new_stack self._validate_export_uniqueness(new_stack) for export in new_stack.exports: self.exports[export.name] = export return new_stack def describe_stacks(self, name_or_stack_id): stacks = self.stacks.values() if name_or_stack_id: for stack in stacks: if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id: return [stack] if self.deleted_stacks: deleted_stacks = self.deleted_stacks.values() for stack in deleted_stacks: if stack.stack_id == name_or_stack_id: return [stack] raise ValidationError(name_or_stack_id) else: return list(stacks) def list_stacks(self): return self.stacks.values() def get_stack(self, name_or_stack_id): all_stacks = dict(self.deleted_stacks, **self.stacks) if name_or_stack_id in all_stacks: # Lookup by stack id - deleted stacks incldued return all_stacks[name_or_stack_id] else: # Lookup by stack name - undeleted stacks only for stack in self.stacks.values(): if stack.name == name_or_stack_id: return stack def update_stack(self, name, template, role_arn=None, parameters=None, tags=None): stack = self.get_stack(name) stack.update(template, role_arn, parameters=parameters, tags=tags) return stack def list_stack_resources(self, stack_name_or_id): stack = self.get_stack(stack_name_or_id) return stack.stack_resources def delete_stack(self, name_or_stack_id): if name_or_stack_id in self.stacks: # Delete by stack id stack = self.stacks.pop(name_or_stack_id, None) stack.delete() self.deleted_stacks[stack.stack_id] = stack [self.exports.pop(export.name) for export in stack.exports] return self.stacks.pop(name_or_stack_id, None) else: # Delete by stack name for stack in list(self.stacks.values()): if stack.name == name_or_stack_id: self.delete_stack(stack.stack_id) def list_exports(self, token): all_exports = list(self.exports.values()) if token is None: exports = all_exports[0:100] next_token = '100' if len(all_exports) > 100 else None else: token = int(token) exports = all_exports[token:token + 100] next_token = str(token + 100) if len(all_exports) > token + 100 else None return exports, next_token def _validate_export_uniqueness(self, stack): new_stack_export_names = [x.name for x in stack.exports] export_names = self.exports.keys() if not set(export_names).isdisjoint(new_stack_export_names): raise ValidationError( stack.stack_id, message='Export names must be unique across a given region')