def __init__( self, region, project_name, project_source, artifacts, environment, serviceRole="some_role", ): current_date = iso_8601_datetime_with_milliseconds( datetime.datetime.utcnow()) self.project_metadata = dict() self.project_metadata["name"] = project_name self.project_metadata[ "arn"] = "arn:aws:codebuild:{0}:{1}:project/{2}".format( region, get_account_id(), self.project_metadata["name"]) self.project_metadata[ "encryptionKey"] = "arn:aws:kms:{0}:{1}:alias/aws/s3".format( region, get_account_id()) self.project_metadata[ "serviceRole"] = "arn:aws:iam::{0}:role/service-role/{1}".format( get_account_id(), serviceRole) self.project_metadata["lastModifiedDate"] = current_date self.project_metadata["created"] = current_date self.project_metadata["badge"] = dict() self.project_metadata["badge"][ "badgeEnabled"] = False # this false needs to be a json false not a python false self.project_metadata["environment"] = environment self.project_metadata["artifacts"] = artifacts self.project_metadata["source"] = project_source self.project_metadata["cache"] = dict() self.project_metadata["cache"]["type"] = "NO_CACHE" self.project_metadata["timeoutInMinutes"] = "" self.project_metadata["queuedTimeoutInMinutes"] = ""
def get_registry_policy(self): if not self.registry_policy: raise RegistryPolicyNotFoundException(get_account_id()) return { "registryId": get_account_id(), "policyText": self.registry_policy, }
def delete_registry_policy(self): policy = self.registry_policy if not policy: raise RegistryPolicyNotFoundException(get_account_id()) self.registry_policy = None return { "registryId": get_account_id(), "policyText": policy, }
def __init__( self, region_name, creation_token, file_system_id, context, performance_mode, encrypted, kms_key_id, throughput_mode, provisioned_throughput_in_mibps, availability_zone_name, backup, lifecycle_policies=None, file_system_policy=None, ): if availability_zone_name: backup = True if kms_key_id and not encrypted: raise BadRequest('If kms_key_id given, "encrypted" must be True.') # Save given parameters self.creation_token = creation_token self.performance_mode = performance_mode or "generalPurpose" self.encrypted = encrypted or False self.kms_key_id = kms_key_id self.throughput_mode = throughput_mode or "bursting" self.provisioned_throughput_in_mibps = provisioned_throughput_in_mibps self.availability_zone_name = availability_zone_name self.availability_zone_id = None if self.availability_zone_name: self.availability_zone_id = _lookup_az_id( self.availability_zone_name) self._backup = backup self.lifecycle_policies = lifecycle_policies or [] self.file_system_policy = file_system_policy self._context = context # Generate AWS-assigned parameters self.file_system_id = file_system_id self.file_system_arn = "arn:aws:elasticfilesystem:{region}:{user_id}:file-system/{file_system_id}".format( region=region_name, user_id=get_account_id(), file_system_id=self.file_system_id, ) self.creation_time = time.time() self.owner_id = get_account_id() # Initialize some state parameters self.life_cycle_state = "available" self._mount_targets = {} self._size_value = 0
def __init__(self, region, **kwargs): self.region = region self.allow_external_principals = kwargs.get("allowExternalPrincipals", True) self.arn = "arn:aws:ram:{0}:{1}:resource-share/{2}".format( self.region, get_account_id(), uuid4() ) self.creation_time = datetime.utcnow() self.feature_set = "STANDARD" self.last_updated_time = datetime.utcnow() self.name = kwargs["name"] self.owning_account_id = get_account_id() self.principals = [] self.resource_arns = [] self.status = "ACTIVE"
def __init__(self, policy, key_usage, key_spec, description, region, multi_region=False): self.id = generate_key_id(multi_region) self.creation_date = unix_time() self.policy = policy or self.generate_default_policy() self.key_usage = key_usage self.key_state = "Enabled" self.description = description or "" self.enabled = True self.region = region self.multi_region = multi_region self.account_id = get_account_id() self.key_rotation_status = False self.deletion_date = None self.key_material = generate_master_key() self.private_key = generate_private_key() self.origin = "AWS_KMS" self.key_manager = "CUSTOMER" self.key_spec = key_spec or "SYMMETRIC_DEFAULT" self.grants = dict()
def deregister_delegated_administrator(self, **kwargs): account_id = kwargs["AccountId"] service = kwargs["ServicePrincipal"] if account_id == get_account_id(): raise ConstraintViolationException( "You cannot register master account/yourself as delegated administrator for your organization." ) admin = next( (admin for admin in self.admins if admin.account.id == account_id), None) if admin is None: account = next( (account for account in self.accounts if account.id == kwargs["AccountId"]), None, ) if account: raise AccountNotRegisteredException raise AccountNotFoundException admin.remove_service_principal(service) # remove account, when no services attached if not admin.services: self.admins.remove(admin)
def to_short_dict(self): hsh = {"name": self.name, "status": self.status} if self.description: hsh["description"] = self.description hsh["arn"] = "arn:aws:swf:{0}:{1}:/domain/{2}".format( self.region_name, get_account_id(), self.name) return hsh
def create_pipeline(self, pipeline, tags): if pipeline["name"] in self.pipelines: raise InvalidStructureException( "A pipeline with the name '{0}' already exists in account '{1}'" .format(pipeline["name"], get_account_id())) try: role = self.iam_backend.get_role_by_arn(pipeline["roleArn"]) service_principal = json.loads( role.assume_role_policy_document )["Statement"][0]["Principal"]["Service"] if "codepipeline.amazonaws.com" not in service_principal: raise IAMNotFoundException("") except IAMNotFoundException: raise InvalidStructureException( "CodePipeline is not authorized to perform AssumeRole on role {}" .format(pipeline["roleArn"])) if len(pipeline["stages"]) < 2: raise InvalidStructureException( "Pipeline has only 1 stage(s). There should be a minimum of 2 stages in a pipeline" ) self.pipelines[pipeline["name"]] = CodePipeline(self.region, pipeline) if tags: self.pipelines[pipeline["name"]].validate_tags(tags) new_tags = {tag["key"]: tag["value"] for tag in tags} self.pipelines[pipeline["name"]].tags.update(new_tags) return pipeline, sorted(tags, key=lambda i: i["key"])
def __init__( self, region_name, client_token, file_system_id, name, posix_user, root_directory, context, ): self.access_point_id = get_random_hex(8) self.access_point_arn = "arn:aws:elasticfilesystem:{region}:{user_id}:access-point/fsap-{file_system_id}".format( region=region_name, user_id=get_account_id(), file_system_id=self.access_point_id, ) self.client_token = client_token self.file_system_id = file_system_id self.name = name self.posix_user = posix_user if not root_directory: root_directory = {"Path": "/"} self.root_directory = root_directory self.context = context
def arn(self): return "arn:aws:redshift:{region}:{account_id}:{resource_type}:{resource_id}".format( region=self.region, account_id=get_account_id(), resource_type=self.resource_type, resource_id=self.resource_id, )
def sendToSns(self, region, sns_topic_arns): message = """StackId='{stack_id}' Timestamp='{timestamp}' EventId='{event_id}' LogicalResourceId='{logical_resource_id}' Namespace='{account_id}' ResourceProperties='{resource_properties}' ResourceStatus='{resource_status}' ResourceStatusReason='{resource_status_reason}' ResourceType='{resource_type}' StackName='{stack_name}' ClientRequestToken='{client_request_token}'""".format( stack_id=self.stack_id, timestamp=iso_8601_datetime_with_milliseconds(self.timestamp), event_id=self.event_id, logical_resource_id=self.logical_resource_id, account_id=get_account_id(), resource_properties=self.resource_properties, resource_status=self.resource_status, resource_status_reason=self.resource_status_reason, resource_type=self.resource_type, stack_name=self.stack_name, client_request_token=self.client_request_token, ) for sns_topic_arn in sns_topic_arns: sns_backends[region].publish( message, subject="AWS CloudFormation Notification", arn=sns_topic_arn)
def __init__( self, region, name, input_bucket, output_bucket, role, content_config, thumbnail_config, ): a = "".join(random.choice(string.digits) for _ in range(13)) b = "".join(random.choice(string.ascii_lowercase) for _ in range(6)) self.id = "{}-{}".format(a, b) self.name = name self.arn = "arn:aws:elastictranscoder:{}:{}:pipeline/{}".format( region, get_account_id(), self.id) self.status = "Active" self.input_bucket = input_bucket self.output_bucket = output_bucket or content_config["Bucket"] self.role = role self.content_config = content_config or {"Bucket": self.output_bucket} if "Permissions" not in self.content_config: self.content_config["Permissions"] = [] self.thumbnail_config = thumbnail_config or { "Bucket": self.output_bucket } if "Permissions" not in self.thumbnail_config: self.thumbnail_config["Permissions"] = []
def __init__( self, stack_id, stack_name, parameters, tags, region_name, template, cross_stack_resources, ): self._template = template self._resource_json_map = template[ "Resources"] if template != {} else {} self._region_name = region_name self.input_parameters = parameters self.tags = copy.deepcopy(tags) self.resolved_parameters = {} self.cross_stack_resources = cross_stack_resources self.stack_id = stack_id # Create the default resources self._parsed_resources = { "AWS::AccountId": get_account_id(), "AWS::Region": self._region_name, "AWS::StackId": stack_id, "AWS::StackName": stack_name, "AWS::URLSuffix": "amazonaws.com", "AWS::NoValue": None, "AWS::Partition": "aws", }
def __init__( self, region_name, device_name, stream_name, media_type, kms_key_id, data_retention_in_hours, tags, ): self.region_name = region_name self.stream_name = stream_name self.device_name = device_name self.media_type = media_type self.kms_key_id = kms_key_id self.data_retention_in_hours = data_retention_in_hours self.tags = tags self.status = "ACTIVE" self.version = self._get_random_string() self.creation_time = datetime.utcnow() stream_arn = "arn:aws:kinesisvideo:{}:{}:stream/{}/1598784211076".format( self.region_name, get_account_id(), self.stream_name ) self.data_endpoint_number = get_random_hex() self.arn = stream_arn
def create_image( self, instance_id, name=None, description=None, tag_specifications=None, ): # TODO: check that instance exists and pull info from it. ami_id = random_ami_id() instance = self.get_instance(instance_id) tags = [] for tag_specification in tag_specifications: resource_type = tag_specification["ResourceType"] if resource_type == "image": tags += tag_specification["Tag"] elif resource_type == "snapshot": raise NotImplementedError() else: raise InvalidTaggableResourceType(resource_type) ami = Ami( self, ami_id, instance=instance, source_ami=None, name=name, description=description, owner_id=get_account_id(), snapshot_description= f"Created by CreateImage({instance_id}) for {ami_id}", ) for tag in tags: ami.add_tag(tag["Key"], tag["Value"]) self.amis[ami_id] = ami return ami
def _send_safe_notification(source, event_name, region, resources, detail): from .models import events_backends event = None if source == "aws.s3" and event_name == "CreateBucket": event = _EVENT_S3_OBJECT_CREATED.copy() event["region"] = region event["resources"] = resources event["detail"] = detail if event is None: return account = events_backends[get_account_id()] for backend in account.values(): applicable_targets = [] for rule in backend.rules.values(): if rule.state != "ENABLED": continue pattern = rule.event_pattern.get_pattern() if source in pattern.get("source", []): if event_name in pattern.get("detail", {}).get("eventName", []): applicable_targets.extend(rule.targets) for target in applicable_targets: if target.get("Arn", "").startswith("arn:aws:lambda"): _invoke_lambda(target.get("Arn"), event=event)
def send_log_event( self, delivery_stream_arn, filter_name, log_group_name, log_stream_name, log_events, ): # pylint: disable=too-many-arguments """Send log events to a S3 bucket after encoding and gzipping it.""" data = { "logEvents": log_events, "logGroup": log_group_name, "logStream": log_stream_name, "messageType": "DATA_MESSAGE", "owner": get_account_id(), "subscriptionFilters": [filter_name], } output = io.BytesIO() with GzipFile(fileobj=output, mode="w") as fhandle: fhandle.write( json.dumps(data, separators=(",", ":")).encode("utf-8")) gzipped_payload = b64encode(output.getvalue()).decode("utf-8") delivery_stream = self.lookup_name_from_arn(delivery_stream_arn) self.put_s3_records( delivery_stream.delivery_stream_name, delivery_stream.version_id, delivery_stream.destinations[0]["S3"], [{ "Data": gzipped_payload }], )
def subscribe(self, topic_arn, endpoint, protocol): if protocol == "sms": if re.search(r"[./-]{2,}", endpoint) or re.search( r"(^[./-]|[./-]$)", endpoint ): raise SNSInvalidParameter("Invalid SMS endpoint: {}".format(endpoint)) reduced_endpoint = re.sub(r"[./-]", "", endpoint) if not is_e164(reduced_endpoint): raise SNSInvalidParameter("Invalid SMS endpoint: {}".format(endpoint)) # AWS doesn't create duplicates old_subscription = self._find_subscription(topic_arn, endpoint, protocol) if old_subscription: return old_subscription topic = self.get_topic(topic_arn) subscription = Subscription(topic, endpoint, protocol) attributes = { "PendingConfirmation": "false", "ConfirmationWasAuthenticated": "true", "Endpoint": endpoint, "TopicArn": topic_arn, "Protocol": protocol, "SubscriptionArn": subscription.arn, "Owner": get_account_id(), "RawMessageDelivery": "false", } if protocol in ["http", "https"]: attributes["EffectiveDeliveryPolicy"] = topic.effective_delivery_policy subscription.attributes = attributes self.subscriptions[subscription.arn] = subscription return subscription
def arn(self): return "arn:aws:sns:{region}:{AccountId}:app/{platform}/{name}".format( region=self.region, platform=self.platform, name=self.name, AccountId=get_account_id(), )
def describe_vpc_endpoints(self): vpc_end_points_ids = self._get_multi_param("VpcEndpointId") filters = self._filters_from_querystring() vpc_end_points = self.ec2_backend.describe_vpc_endpoints( vpc_end_point_ids=vpc_end_points_ids, filters=filters) template = self.response_template(DESCRIBE_VPC_ENDPOINT_RESPONSE) return template.render(vpc_end_points=vpc_end_points, account_id=get_account_id())
def arn(self): return ( "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}" .format( account_id=get_account_id(), role_name=self.role_arn.split("/")[-1], session_name=self.session_name, ))
def arn(self): return ( "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}" .format( account_id=get_account_id(), role_name=self._owner_role_name, session_name=self._session_name, ))
def get_pipeline(self, name): codepipeline = self.pipelines.get(name) if not codepipeline: raise PipelineNotFoundException( "Account '{0}' does not have a pipeline with name '{1}'". format(get_account_id(), name)) return codepipeline.pipeline, codepipeline.metadata
def get_public_access_block(self, account_id): # The account ID should equal the account id that is set for Moto: if account_id != get_account_id(): raise WrongPublicAccessBlockAccountIdError() if not self.public_access_block: raise NoSuchPublicAccessBlockConfiguration() return self.public_access_block
def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == "StreamArn": region = "us-east-1" time = "2000-01-01T00:00:00.000" return "arn:aws:dynamodb:{0}:{1}:table/{2}/stream/{3}".format( region, get_account_id(), self.name, time) raise UnformattedGetAttTemplateException()
def make_arn_for_wacl(name, region_name, wacl_id, scope): """https://docs.aws.amazon.com/waf/latest/developerguide/how-aws-waf-works.html - explains --scope (cloudfront vs regional)""" if scope == "REGIONAL": scope = "regional" elif scope == "CLOUDFRONT": scope = "global" return "arn:aws:wafv2:{}:{}:{}/webacl/{}/{}".format( region_name, get_account_id(), scope, name, wacl_id)
def __init__(self, region, repository_description, repository_name): current_date = iso_8601_datetime_with_milliseconds(datetime.utcnow()) self.repository_metadata = dict() self.repository_metadata["repositoryName"] = repository_name self.repository_metadata[ "cloneUrlSsh"] = "ssh://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( region, repository_name) self.repository_metadata[ "cloneUrlHttp"] = "https://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( region, repository_name) self.repository_metadata["creationDate"] = current_date self.repository_metadata["lastModifiedDate"] = current_date self.repository_metadata[ "repositoryDescription"] = repository_description self.repository_metadata["repositoryId"] = str(uuid.uuid4()) self.repository_metadata[ "Arn"] = "arn:aws:codecommit:{0}:{1}:{2}".format( region, get_account_id(), repository_name) self.repository_metadata["accountId"] = get_account_id()
def get_caller_identity(self): template = self.response_template(GET_CALLER_IDENTITY_RESPONSE) # Default values in case the request does not use valid credentials generated by moto user_id = "AKIAIOSFODNN7EXAMPLE" arn = "arn:aws:sts::{account_id}:user/moto".format(account_id=get_account_id()) access_key_id = self.get_current_user() assumed_role = self.backend.get_assumed_role_from_access_key(access_key_id) if assumed_role: user_id = assumed_role.user_id arn = assumed_role.arn user = iam_backends["global"].get_user_from_access_key_id(access_key_id) if user: user_id = user.id arn = user.arn return template.render(account_id=get_account_id(), user_id=user_id, arn=arn)
def arn(self): return ( "arn:aws:sns:{region}:{AccountId}:endpoint/{platform}/{name}/{id}".format( region=self.region, AccountId=get_account_id(), platform=self.application.platform, name=self.application.name, id=self.id, ) )