def _add_custom_cookbook_policies_to_role(self, role_ref: str, name: str): bucket_info = parse_bucket_url( self._config.dev_settings.cookbook.chef_cookbook) bucket_name = bucket_info.get("bucket_name") object_key = bucket_info.get("object_key") iam.CfnPolicy( Stack.of(self), name, policy_name="CustomCookbookS3Url", policy_document=iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=["s3:GetObject"], effect=iam.Effect.ALLOW, resources=[ self._format_arn(region="", service="s3", account="", resource=bucket_name, resource_name=object_key) ], ), iam.PolicyStatement( actions=["s3:GetBucketLocation"], effect=iam.Effect.ALLOW, resources=[ self._format_arn(service="s3", resource=bucket_name, region="", account="") ], ), ]), roles=[role_ref], )
def _add_policies_to_head_node_role(self, node_name, role): suffix = create_hash_suffix(node_name) policy_statements = [ { "sid": "DynamoDBTable", "actions": [ "dynamodb:PutItem", "dynamodb:BatchWriteItem", "dynamodb:GetItem", ], "effect": iam.Effect.ALLOW, "resources": [ self._format_arn( service="dynamodb", resource= f"table/{PCLUSTER_DYNAMODB_PREFIX}{self.stack_name}") ], }, ] policy_name = "parallelcluster-slurm-head-node" iam.CfnPolicy( self.stack_scope, f"SlurmPolicies{suffix}", policy_name=policy_name, policy_document=iam.PolicyDocument(statements=[ iam.PolicyStatement(**statement) for statement in policy_statements ]), roles=[role], )
def _add_pcluster_policies_to_role(self, role_ref: str, name: str): iam.CfnPolicy( Stack.of(self), name, policy_name="parallelcluster", policy_document=iam.PolicyDocument( statements=self._build_policy()), roles=[role_ref], )
def _add_code_build_policy(self): return iam.CfnPolicy( self.stack_scope, "CodeBuildPolicy", policy_name="CodeBuildPolicy", policy_document=iam.PolicyDocument(statements=[ iam.PolicyStatement( sid="ECRRepoPolicy", effect=iam.Effect.ALLOW, actions=[ "ecr:BatchCheckLayerAvailability", "ecr:CompleteLayerUpload", "ecr:InitiateLayerUpload", "ecr:PutImage", "ecr:UploadLayerPart", ], resources=[self._docker_images_repo.attr_arn], ), iam.PolicyStatement(sid="ECRPolicy", effect=iam.Effect.ALLOW, actions=["ecr:GetAuthorizationToken"], resources=["*"]), get_cloud_watch_logs_policy_statement( resource=self._format_arn( service="logs", account="*", region="*", resource="*")), iam.PolicyStatement( sid="S3GetObjectPolicy", effect=iam.Effect.ALLOW, actions=["s3:GetObject", "s3:GetObjectVersion"], resources=[ self._format_arn( service="s3", region="", resource= f"{self.bucket.name}/{self.bucket.artifact_directory}/*", account="", ) ], ), ]), roles=[self._code_build_role.ref], )
def _add_batch_head_node_policies_to_role(self): iam.CfnPolicy( self, "ParallelClusterBatchPoliciesHeadNode", policy_name="parallelcluster-awsbatch-head-node", policy_document=iam.PolicyDocument(statements=[ iam.PolicyStatement( sid="BatchJobPassRole", actions=["iam:PassRole"], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="iam", region="", resource=f"role{self._cluster_scoped_iam_path()}*", ) ], ), ]), roles=[self.head_node_instance_role.ref], )
def _add_s3_access_policies_to_role(self, node: Union[HeadNode, BaseQueue], role_ref: str, name: str): """Attach S3 policies to given role.""" read_only_s3_resources = [] read_write_s3_resources = [] for s3_access in node.iam.s3_access: for resource in s3_access.resource_regex: arn = self._format_arn(service="s3", resource=resource, region="", account="") if s3_access.enable_write_access: read_write_s3_resources.append(arn) else: read_only_s3_resources.append(arn) s3_access_policy = iam.CfnPolicy( Stack.of(self), name, policy_document=iam.PolicyDocument(statements=[]), roles=[role_ref], policy_name="S3Access", ) if read_only_s3_resources: s3_access_policy.policy_document.add_statements( iam.PolicyStatement( sid="S3Read", effect=iam.Effect.ALLOW, actions=["s3:Get*", "s3:List*"], resources=read_only_s3_resources, )) if read_write_s3_resources: s3_access_policy.policy_document.add_statements( iam.PolicyStatement(sid="S3ReadWrite", effect=iam.Effect.ALLOW, actions=["s3:*"], resources=read_write_s3_resources))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here role01 = iam.CfnRole( self, id="firehose01_role", assume_role_policy_document={ "Statement": [{ "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { "Service": "lambda.amazonaws.com" } }], "Version": "2012-10-17" }, managed_policy_arns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole" ]) policy01 = iam.CfnPolicy(self, id="firehose01_policy", policy_name="firehose01_policy", policy_document={ 'Version': "2012-10-17", 'Statement': [{ "Action": [ 's3:AbortMultipartUpload', 's3:GetBucketLocation', 's3:GetObject', 's3:ListBucket', 's3:ListBucketMultipartUploads', 's3:PutObject' ], "Resource": ['*'], "Effect": "Allow" }] }, roles=[role01.ref]) delivery_stream = kinesisfirehose.CfnDeliveryStream( self, id="firehose01", delivery_stream_name="firehose01", extended_s3_destination_configuration={ # s3桶信息 'bucketArn': 'arn:aws:s3:::fluent-bit-s3', # 压缩设置,老方案:gzip,新方案待定 'compressionFormat': 'GZIP', # 格式转换,是否转换为orc,parquet,默认无 'DataFormatConversionConfiguration': "Disabled", # 是否加密:默认无 'EncryptionConfiguration': "NoEncryption", # 错误输出前缀 'bufferingHints': { 'intervalInSeconds': 600, 'sizeInMBs': 128 }, 'ProcessingConfiguration': { "Enabled": True, "Processor": { "Type": "Lambda", "Parameters": [{ "ParameterName": "BufferIntervalInSeconds", "ParameterValue": "60" }, { "ParameterName": "BufferSizeInMBs", "ParameterValue": "3" }, { "ParameterName": "LambdaArn", "ParameterValue": "arn:aws:lambda:ap-southeast-1:596030579944:function:firehose-test" }] } }, 'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role', 'S3BackupConfiguration': { "BucketARN": 'arn:aws:s3:::fluent-bit-s3', 'bufferingHints': { 'intervalInSeconds': 600, 'sizeInMBs': 128 }, 'compressionFormat': 'GZIP', 'EncryptionConfiguration': "NoEncryption", 'Prefix': "/backup", 'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role' } }, )
def _add_private_hosted_zone(self): if self._condition_custom_cluster_dns(): hosted_zone_id = self.config.scheduling.settings.dns.hosted_zone_id cluster_hosted_zone = CustomDns( ref=hosted_zone_id, name=self.cluster_dns_domain.value_as_string) else: cluster_hosted_zone = route53.CfnHostedZone( self.stack_scope, "Route53HostedZone", name=self.cluster_dns_domain.value_as_string, vpcs=[ route53.CfnHostedZone.VPCProperty( vpc_id=self.config.vpc_id, vpc_region=self._stack_region) ], ) # If Headnode InstanceRole is created by ParallelCluster, add Route53 policy for InstanceRole head_node_role_info = self.instance_roles.get("HeadNode") if head_node_role_info: iam.CfnPolicy( self.stack_scope, "ParallelClusterSlurmRoute53Policies", policy_name="parallelcluster-slurm-route53", policy_document=iam.PolicyDocument(statements=[ iam.PolicyStatement( sid="Route53Add", effect=iam.Effect.ALLOW, actions=["route53:ChangeResourceRecordSets"], resources=[ self._format_arn( service="route53", region="", account="", resource= f"hostedzone/{cluster_hosted_zone.ref}", ), ], ), ]), roles=[head_node_role_info.get("RoleRef")], ) cleanup_route53_lambda_execution_role = None if self.cleanup_lambda_role: cleanup_route53_lambda_execution_role = add_lambda_cfn_role( scope=self.stack_scope, function_id="CleanupRoute53", statements=[ iam.PolicyStatement( actions=[ "route53:ListResourceRecordSets", "route53:ChangeResourceRecordSets" ], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="route53", region="", account="", resource= f"hostedzone/{cluster_hosted_zone.ref}", ), ], sid="Route53DeletePolicy", ), get_cloud_watch_logs_policy_statement( resource=self._format_arn(service="logs", account="*", region="*", resource="*")), ], ) cleanup_route53_lambda = PclusterLambdaConstruct( scope=self.stack_scope, id="CleanupRoute53FunctionConstruct", function_id="CleanupRoute53", bucket=self.bucket, config=self.config, execution_role=cleanup_route53_lambda_execution_role.attr_arn if cleanup_route53_lambda_execution_role else self.config.iam.roles.custom_lambda_resources, handler_func="cleanup_resources", ).lambda_func self.cleanup_route53_custom_resource = CfnCustomResource( self.stack_scope, "CleanupRoute53CustomResource", service_token=cleanup_route53_lambda.attr_arn, ) self.cleanup_route53_custom_resource.add_property_override( "ClusterHostedZone", cluster_hosted_zone.ref) self.cleanup_route53_custom_resource.add_property_override( "Action", "DELETE_DNS_RECORDS") self.cleanup_route53_custom_resource.add_property_override( "ClusterDNSDomain", cluster_hosted_zone.name) CfnOutput( self.stack_scope, "ClusterHostedZone", description= "Id of the private hosted zone created within the cluster", value=cluster_hosted_zone.ref, ) return cluster_hosted_zone
def _add_policies_to_role(self, node_name, role): suffix = create_hash_suffix(node_name) if node_name == "HeadNode": policy_statements = [ { "sid": "DynamoDBTable", "actions": [ "dynamodb:PutItem", "dynamodb:BatchWriteItem", "dynamodb:GetItem", "dynamodb:DeleteItem", "dynamodb:DescribeTable", ], "effect": iam.Effect.ALLOW, "resources": [ self._format_arn( service="dynamodb", resource= f"table/{PCLUSTER_DYNAMODB_PREFIX}{self.stack_name}" ) ], }, { "sid": "EC2Terminate", "effect": iam.Effect.ALLOW, "actions": ["ec2:TerminateInstances"], "resources": ["*"], "conditions": { "StringEquals": { f"ec2:ResourceTag/{PCLUSTER_CLUSTER_NAME_TAG}": self.stack_name } }, }, { "sid": "EC2RunInstances", "effect": iam.Effect.ALLOW, "actions": ["ec2:RunInstances"], "resources": [ self._format_arn(service="ec2", resource=f"subnet/{subnet_id}") for subnet_id in self.config.compute_subnet_ids ] + [ self._format_arn(service="ec2", resource="network-interface/*"), self._format_arn(service="ec2", resource="instance/*"), self._format_arn(service="ec2", resource="volume/*"), self._format_arn( service="ec2", resource= f"key-pair/{self.config.head_node.ssh.key_name}"), self._format_arn(service="ec2", resource="security-group/*"), self._format_arn(service="ec2", resource="launch-template/*"), self._format_arn(service="ec2", resource="placement-group/*"), ] + [ self._format_arn(service="ec2", resource=f"image/{queue_ami}", account="") for _, queue_ami in self.config.image_dict.items() ], }, { "sid": "PassRole", "actions": ["iam:PassRole"], "effect": iam.Effect.ALLOW, "resources": self._generate_head_node_pass_role_resources(), }, { "sid": "EC2", "effect": iam.Effect.ALLOW, "actions": [ "ec2:DescribeInstances", "ec2:DescribeInstanceStatus", "ec2:CreateTags", "ec2:DescribeVolumes", "ec2:AttachVolume", ], "resources": ["*"], }, { "sid": "ResourcesS3Bucket", "effect": iam.Effect.ALLOW, "actions": ["s3:*"], "resources": [ self._format_arn(service="s3", resource=self.bucket.name, region="", account=""), self._format_arn( service="s3", resource= f"{self.bucket.name}/{self.bucket.artifact_directory}/*", region="", account="", ), ], }, { "sid": "Cloudformation", "actions": [ "cloudformation:DescribeStackResource", "cloudformation:SignalResource", ], "effect": iam.Effect.ALLOW, "resources": [ self._format_arn( service="cloudformation", resource=f"stack/{self.stack_name}/*"), # ToDo: This resource is for substack. Check if this is necessary for pcluster3 self._format_arn( service="cloudformation", resource=f"stack/{self.stack_name}-*/*"), ], }, { "sid": "DcvLicense", "actions": ["s3:GetObject"], "effect": iam.Effect.ALLOW, "resources": [ self._format_arn( service="s3", resource="dcv-license.{0}/*".format( self._stack_region), region="", account="", ) ], }, ] policy_name = "parallelcluster-slurm-head-node" else: policy_statements = [ { "sid": "DynamoDBTableQuery", "effect": iam.Effect.ALLOW, "actions": ["dynamodb:Query"], "resources": [ self._format_arn( service="dynamodb", resource= f"table/{PCLUSTER_DYNAMODB_PREFIX}{self.stack_name}" ), self._format_arn( service="dynamodb", resource= f"table/{PCLUSTER_DYNAMODB_PREFIX}{self.stack_name}/index/*", ), ], }, ] policy_name = "parallelcluster-slurm-compute" iam.CfnPolicy( self.stack_scope, f"SlurmPolicies{suffix}", policy_name=policy_name, policy_document=iam.PolicyDocument(statements=[ iam.PolicyStatement(**statement) for statement in policy_statements ]), roles=[role], )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Exercise 5 # Set Parameters password = core.CfnParameter( self, "Password", no_echo=True, description="New account password", min_length=1, max_length=41, constraint_description= "the password must be between 1 and 41 characters", default="Password") # CFNUser cfn_user = iam.CfnUser( self, "CFNUser", user_name="EdXProjectUser", login_profile={"password": password.value_as_string}) # CFNUserGroup cfn_user_group = iam.CfnGroup( self, "CFNUserGroup", ) # Users users_to_group = iam.CfnUserToGroupAddition( self, "UserToGroupAddition", group_name=cfn_user_group.ref, users=[cfn_user.ref]) # CFNUserPolicies cfn_user_policies = iam.CfnPolicy( self, "CFNUserPolicies", policy_name="edXProjectPolicy", policy_document={ "Version": "2012-10-17", "Statement": [{ "Sid": "Sid1", "Effect": "Allow", "Action": [ "iam:*", "rds:*", "sns:*", "cloudformation:*", "rekognition:*", "ec2:*", "cognito-idp:*", "sqs:*", "xray:*", "s3:*", "elasticloadbalancing:*", "cloud9:*", "lambda:*", "tag:GetResources", "logs:*", "kms:ListRetirableGrants", "kms:GetKeyPolicy", "kms:ListResourceTags", "kms:ReEncryptFrom", "kms:ListGrants", "kms:GetParametersForImport", "kms:ListKeys", "kms:GetKeyRotationStatus", "kms:ListAliases", "kms:ReEncryptTo", "kms:DescribeKey" ], "Resource": "*", }] }, groups=[cfn_user_group.ref]) # CFNKeys cfn_keys = iam.CfnAccessKey(self, "CFNKeys", user_name=cfn_user.ref) # Output core.CfnOutput(self, "AccessKeyOutput", value=cfn_keys.ref, description="AWSAccessKeyId of new user", export_name="AccessKey") core.CfnOutput(self, "SecretKeyOutput", value=cfn_keys.attr_secret_access_key, description="AWSSecretKey of new user", export_name="SecretKey") core.CfnOutput(self, "edXProjectUser", value=cfn_user.attr_arn, description="edXProjectUser", export_name="edXProjectUser")
def __init__(self, scope: core.Construct, id: str, data, iam_vars) -> None: super().__init__(scope, id) # VPC vpc = ec2.CfnVPC(self, "cdk-vpc", cidr_block=data["vpc"]) igw = ec2.CfnInternetGateway(self, id="igw") ec2.CfnVPCGatewayAttachment(self, id="igw-attach", vpc_id=vpc.ref, internet_gateway_id=igw.ref) public_route_table = ec2.CfnRouteTable(self, id="public_route_table", vpc_id=vpc.ref) ec2.CfnRoute(self, id="public_route", route_table_id=public_route_table.ref, destination_cidr_block="0.0.0.0/0", gateway_id=igw.ref) public_subnets = [] for i, s in enumerate(data["subnets"]["public"]): subnet = ec2.CfnSubnet(self, id="public_{}".format(s), cidr_block=s, vpc_id=vpc.ref, availability_zone=core.Fn.select( i, core.Fn.get_azs()), map_public_ip_on_launch=True) public_subnets.append(subnet) ec2.CfnSubnetRouteTableAssociation( self, id="public_{}_association".format(s), route_table_id=public_route_table.ref, subnet_id=subnet.ref) eip = ec2.CfnEIP(self, id="natip") nat = ec2.CfnNatGateway(self, id="nat", allocation_id=eip.attr_allocation_id, subnet_id=public_subnets[0].ref) private_route_table = ec2.CfnRouteTable(self, id="private_route_table", vpc_id=vpc.ref) ec2.CfnRoute(self, id="private_route", route_table_id=private_route_table.ref, destination_cidr_block="0.0.0.0/0", nat_gateway_id=nat.ref) private_subnets = [] for i, s in enumerate(data["subnets"]["private"]): subnet = ec2.CfnSubnet(self, id="private_{}".format(s), cidr_block=s, vpc_id=vpc.ref, availability_zone=core.Fn.select( i, core.Fn.get_azs()), map_public_ip_on_launch=False) private_subnets.append(subnet) ec2.CfnSubnetRouteTableAssociation( self, id="private_{}_association".format(s), route_table_id=private_route_table.ref, subnet_id=subnet.ref) # Security groups lb_sg = ec2.CfnSecurityGroup(self, id="lb", group_description="LB SG", vpc_id=vpc.ref) lambda_sg = ec2.CfnSecurityGroup(self, id="lambda", group_description="Lambda SG", vpc_id=vpc.ref) public_prefix = ec2.CfnPrefixList(self, id="cidr_prefix", address_family="IPv4", max_entries=1, prefix_list_name="public", entries=[{ "cidr": "0.0.0.0/0", "description": "Public" }]) _sg_rules = [{ 'sg': lb_sg.attr_group_id, 'rules': [{ "direction": "ingress", "description": "HTTP from Internet", "from_port": 80, "to_port": 80, "protocol": "tcp", "cidr_blocks": public_prefix.ref }, { "direction": "egress", "description": "LB to Lambda", "from_port": 80, "to_port": 80, "protocol": "tcp", "source_security_group_id": lambda_sg.attr_group_id }] }, { "sg": lambda_sg.attr_group_id, "rules": [{ "direction": "ingress", "description": "HTTP from LB", "from_port": 80, "to_port": 80, "protocol": "tcp", "source_security_group_id": lb_sg.attr_group_id }, { "direction": "egress", "description": "All to Internet", "from_port": 0, "to_port": 65535, "protocol": "tcp", "cidr_blocks": public_prefix.ref }] }] for ruleset in _sg_rules: for rule in ruleset["rules"]: if rule["direction"] == "ingress": ec2.CfnSecurityGroupIngress( self, id=rule["description"].replace(" ", "_"), description=rule["description"], to_port=rule["to_port"], from_port=rule["from_port"], ip_protocol=rule["protocol"], group_id=ruleset["sg"], source_prefix_list_id=rule["cidr_blocks"] if "cidr_blocks" in rule else None, source_security_group_id=rule[ "source_security_group_id"] if "source_security_group_id" in rule else None) else: ec2.CfnSecurityGroupEgress( self, id=rule["description"].replace(" ", "_"), description=rule["description"], to_port=rule["to_port"], from_port=rule["from_port"], ip_protocol=rule["protocol"], group_id=ruleset["sg"], destination_prefix_list_id=rule["cidr_blocks"] if "cidr_blocks" in rule else None, destination_security_group_id=rule[ "source_security_group_id"] if "source_security_group_id" in rule else None) # IAM assume_policy_doc = iam.PolicyDocument() for statement in iam_vars["assume"]["Statement"]: _statement = iam.PolicyStatement(actions=[statement["Action"]], ) _statement.add_service_principal(statement["Principal"]["Service"]) assume_policy_doc.add_statements(_statement) role = iam.CfnRole(self, id="iam_role", path="/", assume_role_policy_document=assume_policy_doc) role_policy_doc = iam.PolicyDocument() for statement in iam_vars["policy"]["Statement"]: _statement = iam.PolicyStatement(actions=statement["Action"], resources=["*"]) role_policy_doc.add_statements(_statement) policy = iam.CfnPolicy(self, id="iam_policy", policy_document=role_policy_doc, policy_name="cdkPolicy", roles=[role.ref]) # Lambda shutil.make_archive("../lambda", 'zip', "../lambda/") s3_client = boto3.client('s3') s3_client.upload_file("../lambda.zip", "cloudevescops-zdays-demo", "cdk.zip") function = lmd.CfnFunction(self, id="lambda_function", handler="lambda.lambda_handler", role=role.attr_arn, runtime="python3.7", code={ "s3Bucket": "cloudevescops-zdays-demo", "s3Key": "cdk.zip" }, vpc_config={ "securityGroupIds": [lambda_sg.ref], "subnetIds": [s.ref for s in private_subnets] }, environment={"variables": { "TOOL": "CDK" }}) # LB lb = alb.CfnLoadBalancer(self, id="alb", name="lb-cdk", scheme="internet-facing", type="application", subnets=[s.ref for s in public_subnets], security_groups=[lb_sg.ref]) lmd.CfnPermission(self, id="lambda_permis", action="lambda:InvokeFunction", function_name=function.ref, principal="elasticloadbalancing.amazonaws.com") tg = alb.CfnTargetGroup(self, id="alb_tg", name="lambda-cdk", target_type="lambda", health_check_enabled=True, health_check_interval_seconds=40, health_check_path="/", health_check_timeout_seconds=30, targets=[{ "id": function.get_att("Arn").to_string() }], matcher={"httpCode": "200"}) alb.CfnListener(self, id="listener", default_actions=[{ "type": "forward", "targetGroupArn": tg.ref }], load_balancer_arn=lb.ref, port=80, protocol="HTTP") core.CfnOutput(self, id="fqdn", value=lb.attr_dns_name)
def _add_batch_head_node_policies_to_role(self): iam.CfnPolicy( self, "ParallelClusterBatchPoliciesHeadNode", policy_name="parallelcluster-awsbatch-head-node", policy_document=iam.PolicyDocument(statements=[ iam.PolicyStatement( sid="Cloudformation", actions=[ "cloudformation:DescribeStacks", "cloudformation:DescribeStackResource", "cloudformation:SignalResource", ], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="cloudformation", resource=f"stack/{self.stack_name}/*"), # ToDo: This resource is for substack. Check if this is necessary for pcluster3 self._format_arn( service="cloudformation", resource=f"stack/{self.stack_name}-*/*"), ], ), iam.PolicyStatement( sid="EC2", effect=iam.Effect.ALLOW, actions=[ "ec2:DescribeInstances", "ec2:DescribeInstanceStatus", "ec2:DescribeVolumes", "ec2:AttachVolume", ], resources=["*"], ), iam.PolicyStatement( sid="S3PutObj", actions=["s3:PutObject"], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="s3", resource= f"{self.bucket.name}/{self.bucket.artifact_directory}/batch/*", region="", account="", ) ], ), iam.PolicyStatement( sid="BatchJobPassRole", actions=["iam:PassRole"], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="iam", region="", resource=f"role{self._cluster_scoped_iam_path()}*", ) ], ), iam.PolicyStatement( sid="DcvLicense", actions=["s3:GetObject"], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="s3", resource="dcv-license.{0}/*".format( self._stack_region), region="", account="", ) ], ), ]), roles=[self.instance_roles.get("HeadNode").get("RoleRef")], )