def _validate(self, url, retries=3, fail_on_https_error: bool = False, fail_on_s3_error: bool = False): scheme = get_url_scheme(url) if scheme in ["https", "s3"]: try: if scheme == "s3": self._validate_s3_uri(url, fail_on_error=fail_on_s3_error) else: self._validate_https_uri(url, fail_on_error=fail_on_https_error) except ConnectionError as e: if retries > 0: time.sleep(5) self._validate(url, retries=retries - 1) else: self._add_failure( f"The url '{url}' causes ConnectionError: {e}.", FailureLevel.WARNING) else: self._add_failure( f"The value '{url}' is not a valid URL, choose URL with 'https' or 's3' prefix.", FailureLevel.ERROR, )
def _validate(self, url): scheme = get_url_scheme(url) if scheme in ["https", "s3"]: if scheme == "s3": self._validate_s3_uri(url) else: try: with urlopen(url): # nosec nosemgrep pass except HTTPError as e: self._add_failure( f"The url '{url}' causes HTTPError, the error code is '{e.code}'," f" the error reason is '{e.reason}'.", FailureLevel.WARNING, ) except URLError as e: self._add_failure( f"The url '{url}' causes URLError, the error reason is '{e.reason}'.", FailureLevel.WARNING, ) except ValueError: self._add_failure( f"The value '{url}' is not a valid URL.", FailureLevel.ERROR, ) else: self._add_failure( f"The value '{url}' is not a valid URL, choose URL with 'https' or 's3' prefix.", FailureLevel.ERROR, )
def _validate(self, url): if get_url_scheme(url) == "s3": try: bucket = get_bucket_name_from_s3_url(url) AWSApi.instance().s3.head_bucket(bucket_name=bucket) except AWSClientError as e: self._add_failure(str(e), FailureLevel.ERROR) else: self._add_failure(f"The value '{url}' is not a valid S3 URI.", FailureLevel.ERROR)
def validate_component_value(self, data, **kwargs): """Validate component value format.""" type = data.get("type") value = data.get("value") if type == "arn" and not value.startswith("arn"): raise ValidationError( message= "The Type in Component is arn, the value '{0}' is invalid. " "Choose a value with 'arn' prefix.".format(value), field_name="Value", ) if type == "script" and get_url_scheme(value) not in ["https", "s3"]: raise ValidationError( message= "The Type in Component is script, the value '{0}' is invalid. " "Choose a value with 'https' or 's3' prefix url.".format( value), field_name="Value", )
def wrap_script_to_component(url): """Wrap script to custom component data property.""" scheme = get_url_scheme(url) current_dir = os.path.dirname(os.path.abspath(__file__)) custom_component_script_template_file = os.path.join( current_dir, "resources", "imagebuilder", "custom_script.yaml") with open(custom_component_script_template_file, "r", encoding="utf-8") as file: custom_component_script_template = yaml.safe_load(file) script_url_action = _generate_action("ScriptUrl", "set -v\necho {0}\n".format(url)) custom_component_script_template["phases"][0]["steps"].insert( 0, script_url_action) script_scheme_action = _generate_action( "ScriptUrlScheme", "set -v\necho {0}\n".format(scheme)) custom_component_script_template["phases"][0]["steps"].insert( 0, script_scheme_action) return yaml.dump(custom_component_script_template)
def _add_default_instance_role(self, cleanup_policy_statements, build_tags): """Set default instance role in imagebuilder cfn template.""" managed_policy_arns = [ Fn.sub( "arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" ), Fn.sub( "arn:${AWS::Partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder" ), ] if self.config.build.iam and self.config.build.iam.additional_iam_policies: for policy in self.config.build.iam.additional_iam_policy_arns: managed_policy_arns.append(policy) instancerole_policy_document = iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ self.format_arn( service="ec2", account="", resource="image", resource_name="*", ) ], actions=["ec2:CreateTags", "ec2:ModifyImageAttribute"], ) ]) if self.config.build.components: for custom_component in self.config.build.components: # Check custom component is script, and the url is S3 url if custom_component.type == "script" and utils.get_url_scheme( custom_component.value) == "s3": bucket_info = parse_bucket_url(custom_component.value) bucket_name = bucket_info.get("bucket_name") object_key = bucket_info.get("object_key") instancerole_policy_document.add_statements( iam.PolicyStatement( actions=["s3:GetObject"], effect=iam.Effect.ALLOW, resources=[ self.format_arn( region="", service="s3", account="", resource=bucket_name, resource_name=object_key, ) ], ), ) instancerole_policy = iam.CfnRole.PolicyProperty( policy_name="InstanceRoleInlinePolicy", policy_document=instancerole_policy_document, ) instance_role_resource = iam.CfnRole( self, "InstanceRole", path=IAM_ROLE_PATH, managed_policy_arns=managed_policy_arns, assume_role_policy_document=get_assume_role_policy_document( "ec2.{0}".format(self.url_suffix)), policies=[ instancerole_policy, ], tags=build_tags, role_name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( cleanup_policy_statements, ["iam:DeleteRole"], [ self.format_arn( service="iam", region="", resource="role", resource_name="{0}/{1}".format( IAM_ROLE_PATH.strip("/"), self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ), ) ], ) return instance_role_resource
def test_get_url_scheme(url, expect_output): assert_that(utils.get_url_scheme(url)).is_equal_to(expect_output)
def _build_policy(self) -> List[iam.PolicyStatement]: policy = [ iam.PolicyStatement( sid="Ec2", actions=[ "ec2:DescribeInstanceAttribute", "ec2:DescribeInstances", "ec2:DescribeInstanceStatus", "ec2:CreateTags", "ec2:DescribeVolumes", "ec2:AttachVolume", ], effect=iam.Effect.ALLOW, resources=["*"], ), iam.PolicyStatement( sid="S3GetObj", actions=["s3:GetObject"], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="s3", resource="{0}-aws-parallelcluster/*".format( Stack.of(self).region), region="", account="", ) ], ), iam.PolicyStatement( sid="ResourcesS3Bucket", effect=iam.Effect.ALLOW, actions=["s3:*"], resources=[ self._format_arn(service="s3", resource=self._cluster_bucket.name, region="", account=""), self._format_arn( service="s3", resource= f"{self._cluster_bucket.name}/{self._cluster_bucket.artifact_directory}/*", region="", account="", ), ], ), iam.PolicyStatement( sid="CloudFormation", actions=[ "cloudformation:DescribeStacks", "cloudformation:DescribeStackResource", "cloudformation:SignalResource", ], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="cloudformation", resource=f"stack/{Stack.of(self).stack_name}/*"), self._format_arn( service="cloudformation", resource=f"stack/{Stack.of(self).stack_name}-*/*"), ], ), iam.PolicyStatement( sid="DcvLicense", actions=[ "s3:GetObject", ], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="s3", resource="dcv-license.{0}/*".format( Stack.of(self).region), region="", account="", ) ], ), ] if self._config.scheduling.scheduler != "awsbatch": policy.extend([ iam.PolicyStatement( sid="EC2Terminate", actions=["ec2:TerminateInstances"], effect=iam.Effect.ALLOW, resources=["*"], conditions={ "StringEquals": { f"ec2:ResourceTag/{PCLUSTER_CLUSTER_NAME_TAG}": Stack.of(self).stack_name } }, ), iam.PolicyStatement( sid="EC2RunInstances", actions=["ec2:RunInstances"], effect=iam.Effect.ALLOW, resources=[ self._format_arn(service="ec2", resource=f"subnet/{subnet_id}") for subnet_id in self._config.compute_subnet_ids ] + [ self._format_arn(service="ec2", resource="network-interface/*"), self._format_arn(service="ec2", resource="instance/*"), self._format_arn(service="ec2", resource="volume/*"), self._format_arn( service="ec2", resource= f"key-pair/{self._config.head_node.ssh.key_name}"), self._format_arn(service="ec2", resource="security-group/*"), self._format_arn(service="ec2", resource="launch-template/*"), self._format_arn(service="ec2", resource="placement-group/*"), ] + [ self._format_arn(service="ec2", resource=f"image/{queue_ami}", account="") for _, queue_ami in self._config.image_dict.items() ], ), iam.PolicyStatement( sid="PassRole", actions=["iam:PassRole"], effect=iam.Effect.ALLOW, resources=self._generate_head_node_pass_role_resources(), ), ]) if self._config.scheduling.scheduler == "plugin": cluster_shared_artifacts = get_attr( self._config, "scheduling.settings.scheduler_definition.plugin_resources.cluster_shared_artifacts" ) if cluster_shared_artifacts: for artifacts in cluster_shared_artifacts: if get_url_scheme(artifacts.source) == "s3": bucket_info = parse_bucket_url(artifacts.source) bucket_name = bucket_info.get("bucket_name") object_key = bucket_info.get("object_key") policy.extend([ iam.PolicyStatement( actions=["s3:GetObject"], effect=iam.Effect.ALLOW, resources=[ self._format_arn( region="", service="s3", account="", resource=bucket_name, resource_name=object_key, ) ], ), ]) if self._config.directory_service: policy.append( iam.PolicyStatement( actions=["secretsmanager:GetSecretValue"], effect=iam.Effect.ALLOW, resources=[ self._config.directory_service.password_secret_arn ], )) return policy