def wait_for_execute(self, stack_name, changeset_type): sys.stdout.write("\n{} - Waiting for stack create/update " "to complete\n".format( datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) sys.stdout.flush() self.describe_stack_events(stack_name, self.get_last_event_time(stack_name)) # Pick the right waiter if changeset_type == "CREATE": waiter = self._client.get_waiter("stack_create_complete") elif changeset_type == "UPDATE": waiter = self._client.get_waiter("stack_update_complete") else: raise RuntimeError( "Invalid changeset type {0}".format(changeset_type)) # Poll every 5 seconds. Optimizing for the case when the stack has only # minimal changes, such the Code for Lambda Function waiter_config = {"Delay": 5, "MaxAttempts": 720} try: waiter.wait(StackName=stack_name, WaiterConfig=waiter_config) except botocore.exceptions.WaiterError as ex: LOG.debug("Execute changeset waiter exception", exc_info=ex) raise deploy_exceptions.DeployFailedError(stack_name=stack_name, msg=str(ex)) outputs = self.get_stack_outputs(stack_name=stack_name, echo=False) if outputs: self._display_stack_outputs(outputs)
def wait_for_execute(self, stack_name, changeset_type): sys.stdout.write("\n{} - Waiting for stack create/update " "to complete\n".format( datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) sys.stdout.flush() self.describe_stack_events(stack_name, self.get_last_event_time(stack_name)) # Pick the right waiter if changeset_type == "CREATE": waiter = self._client.get_waiter("stack_create_complete") elif changeset_type == "UPDATE": waiter = self._client.get_waiter("stack_update_complete") else: raise RuntimeError( "Invalid changeset type {0}".format(changeset_type)) # Poll every 30 seconds. Polling too frequently risks hitting rate limits # on CloudFormation's DescribeStacks API waiter_config = {"Delay": 30, "MaxAttempts": 120} try: waiter.wait(StackName=stack_name, WaiterConfig=waiter_config) except botocore.exceptions.WaiterError as ex: LOG.debug("Execute changeset waiter exception", exc_info=ex) raise deploy_exceptions.DeployFailedError(stack_name=stack_name, msg=str(ex)) outputs = self.get_stack_outputs(stack_name=stack_name, echo=False) if outputs: self._display_stack_outputs(outputs)
def run(self): # Parse parameters with open(self.template_file, "r") as handle: template_str = handle.read() template_dict = yaml_parse(template_str) if not isinstance(template_dict, dict): raise deploy_exceptions.DeployFailedError( stack_name=self.stack_name, msg="{} not in required format".format(self.template_file) ) parameters = self.merge_parameters(template_dict, self.parameter_overrides) template_size = os.path.getsize(self.template_file) if template_size > 51200 and not self.s3_bucket: raise deploy_exceptions.DeployBucketRequiredError() boto_config = get_boto_config_with_user_agent() cloudformation_client = boto3.client( "cloudformation", region_name=self.region if self.region else None, config=boto_config ) s3_client = None if self.s3_bucket: s3_client = boto3.client("s3", region_name=self.region if self.region else None, config=boto_config) self.s3_uploader = S3Uploader( s3_client, self.s3_bucket, self.s3_prefix, self.kms_key_id, self.force_upload, self.no_progressbar ) self.deployer = Deployer(cloudformation_client) region = s3_client._client_config.region_name if s3_client else self.region # pylint: disable=W0212 print_deploy_args( self.stack_name, self.s3_bucket, self.image_repository, region, self.capabilities, self.parameter_overrides, self.confirm_changeset, self.signing_profiles, ) return self.deploy( self.stack_name, template_str, parameters, self.capabilities, self.no_execute_changeset, self.role_arn, self.notification_arns, self.s3_uploader, [{"Key": key, "Value": value} for key, value in self.tags.items()] if self.tags else [], region, self.fail_on_empty_changeset, self.confirm_changeset, )