def get_version_of_artifact(self, s3_url): """ Returns version information of the S3 object that is given as S3 URL """ parsed_s3_url = parse_s3_url(s3_url) s3_bucket = parsed_s3_url["Bucket"] s3_key = parsed_s3_url["Key"] s3_object_tagging = self.s3.get_object_tagging(Bucket=s3_bucket, Key=s3_key) LOG.debug("S3 Object (%s) tagging information %s", s3_url, s3_object_tagging) s3_object_version_id = s3_object_tagging["VersionId"] return s3_object_version_id
def create_changeset(self, stack_name, cfn_template, parameter_values, capabilities, role_arn, notification_arns, s3_uploader, tags): """ Call Cloudformation to create a changeset and wait for it to complete :param stack_name: Name or ID of stack :param cfn_template: CloudFormation template string :param parameter_values: Template parameters object :param capabilities: Array of capabilities passed to CloudFormation :param tags: Array of tags passed to CloudFormation :return: """ if not self.has_stack(stack_name): changeset_type = "CREATE" # When creating a new stack, UsePreviousValue=True is invalid. # For such parameters, users should either override with new value, # or set a Default value in template to successfully create a stack. parameter_values = [ x for x in parameter_values if not x.get("UsePreviousValue", False) ] else: changeset_type = "UPDATE" # UsePreviousValue not valid if parameter is new summary = self._client.get_template_summary(StackName=stack_name) existing_parameters = [ parameter["ParameterKey"] for parameter in summary["Parameters"] ] parameter_values = [ x for x in parameter_values if not (x.get("UsePreviousValue", False) and x["ParameterKey"] not in existing_parameters) ] # Each changeset will get a unique name based on time. # Description is also setup based on current date and that SAM CLI is used. kwargs = { "ChangeSetName": self.changeset_prefix + str(int(time.time())), "StackName": stack_name, "TemplateBody": cfn_template, "ChangeSetType": changeset_type, "Parameters": parameter_values, "Capabilities": capabilities, "Description": "Created by SAM CLI at {0} UTC".format( datetime.utcnow().isoformat()), "Tags": tags, } # If an S3 uploader is available, use TemplateURL to deploy rather than # TemplateBody. This is required for large templates. if s3_uploader: with mktempfile() as temporary_file: temporary_file.write(kwargs.pop("TemplateBody")) temporary_file.flush() # TemplateUrl property requires S3 URL to be in path-style format parts = parse_s3_url(s3_uploader.upload_with_dedup( temporary_file.name, "template"), version_property="Version") kwargs["TemplateURL"] = s3_uploader.to_path_style_s3_url( parts["Key"], parts.get("Version", None)) # don't set these arguments if not specified to use existing values if role_arn is not None: kwargs["RoleARN"] = role_arn if notification_arns is not None: kwargs["NotificationARNs"] = notification_arns return self._create_change_set(stack_name=stack_name, changeset_type=changeset_type, **kwargs)
def sign_package(self, resource_id, s3_url, s3_version): """ Signs artifact which is named with resource_id, its location is s3_url and its s3 object version is s3_version """ # extract code signing config for the resource signing_profile_for_resource = self.signing_profiles[resource_id] profile_name = signing_profile_for_resource["profile_name"] profile_owner = signing_profile_for_resource["profile_owner"] # parse given s3 url, and extract bucket and object key parsed_s3_url = parse_s3_url(s3_url) s3_bucket = parsed_s3_url["Bucket"] s3_key = parsed_s3_url["Key"] s3_target_prefix = s3_key.rsplit("/", 1)[0] + "/signed_" LOG.debug( "Initiating signing job with bucket:%s key:%s version:%s prefix:%s profile name:%s profile owner:%s", s3_bucket, s3_key, s3_version, s3_target_prefix, profile_name, profile_owner, ) # initiate and wait for signing job to finish code_sign_job_id = self._initiate_code_signing(profile_name, profile_owner, s3_bucket, s3_key, s3_target_prefix, s3_version) self._wait_for_signing_job_to_complete(code_sign_job_id) try: code_sign_job_result = self.signer_client.describe_signing_job( jobId=code_sign_job_id) except Exception as e: LOG.error("Checking the result of the code signing job failed %s", code_sign_job_id, exc_info=e) raise CodeSigningJobFailureException( f"Signing job has failed status {code_sign_job_id}") from e # check if code sign job result status is Succeeded, fail otherwise if code_sign_job_result and code_sign_job_result.get( "status") == "Succeeded": signed_object_result = code_sign_job_result.get( "signedObject", {}).get("s3", {}) LOG.info( "Package has successfully signed into the location %s/%s", signed_object_result.get("bucketName"), signed_object_result.get("key"), ) signed_package_location = code_sign_job_result["signedObject"][ "s3"]["key"] return f"s3://{s3_bucket}/{signed_package_location}" LOG.error("Failed to sign the package, result: %s", code_sign_job_result) raise CodeSigningJobFailureException( f"Signing job not succeeded {code_sign_job_id}")