def rrset(self): """Get rrset information""" rrset = self.provider().client.get_rrsets(self.zone, {"owner": self.domain}, limit=500) if rrset.get("errorMessage"): raise BespinError("Failed to get record set", error=rrset["errorMessage"], error_code=rrset["errorCode"]) found = rrset["rrSets"] if len(found) > 1: found = [] for record in rrset["rrSets"]: if record["ownerName"] == self.domain and record[ "rrtype"].startswith("{0} ".format(self.record_type)): found.append(record) if len(found) > 1: raise BespinError("Got multiple record sets", domain=self.domain, got=found) if not found: raise BespinError("Found no record sets", domain=self.domain) return found[0]
def switched_to(self, environment): rtype, current_val = self.current_value wanted_val = self.environments[environment] if set(current_val) != set(wanted_val): raise BespinError("The current value in ultradns is different than the specified value for this environment" , environment = environment , ultradns_has = current_val , specified = wanted_val ) log.info("Seeing if %s has switched to %s(%s)", self.domain, environment, current_val) if rtype == "A": info = socket.getaddrinfo(self.domain, 80) found = [sockaddr[0] for _, _, _, _, sockaddr in info] if set(found) == set(current_val): return True else: log.info("Current value is %s", list(set(found))) if rtype == "CNAME": answer = DNSRecord.parse(DNSRecord(q=DNSQuestion(self.domain, QTYPE.CNAME)).send("8.8.8.8", 53)).short() if not answer: raise BespinError("couldn't resolve the domain", domain=self.domain) if answer == current_val[0]: return True else: log.info("Current value is %s", answer) return False
def sync_netscaler_config(collector, stack, **kwargs): """Sync netscaler configuration with the specified netscaler""" logging.captureWarnings(True) logging.getLogger("py.warnings").setLevel(logging.ERROR) configuration = collector.configuration if stack.netscaler is NotSpecified or stack.netscaler.configuration is NotSpecified: raise BespinError("Please configure {netscaler.configuration}") if stack.netscaler.syncable_environments is not NotSpecified: if configuration["environment"] not in stack.netscaler.syncable_environments: raise BespinError("Sorry, can only sync netscaler config for particular environments", wanted=configuration["environment"], available=list(stack.netscaler.syncable_environments)) for_layers = [] all_configuration = {} for vkey, value in stack.netscaler.configuration.items(): for key, thing in value.items(): if thing.environments is NotSpecified or configuration["environment"] in thing.environments: for_layers.append(thing.long_name) all_configuration[thing.long_name] = thing if vkey not in all_configuration: all_configuration[vkey] = {} all_configuration[vkey][key] = thing layers = Layers(for_layers, all_stacks=all_configuration) layers.add_all_to_layers() stack.netscaler.syncing_configuration = True with stack.netscaler as netscaler: for layer in layers.layered: for _, thing in layer: netscaler.sync(all_configuration, configuration["environment"], thing)
def scale_instances(collector, stack, artifact, **kwargs): """Change the number of instances in the stack's auto_scaling_group""" if isinstance(artifact, int) or artifact.isdigit(): artifact = int(artifact) else: raise BespinError("The number of instances must be an integer") if artifact > stack.scaling_options.instance_count_limit: raise BespinError("The instance_count_limit is smaller than the specified number of instances", limit=stack.scaling_options.instance_count_limit, wanted=artifact) group = stack.auto_scaling_group current_count = group.desired_capacity log.info("Changing the number of instances in the %s stack from %s to %s", stack.stack_name, current_count, artifact) if group.min_size > artifact: log.info("Changing min_size from %s to %s", group.min_size, artifact) group.min_size = artifact if group.max_size < artifact: log.info("Changing max_size from %s to %s", group.max_size, artifact) group.max_size = artifact if group.min_size < artifact: group.min_size = artifact if group.min_size > stack.scaling_options.highest_min: group.min_size = stack.scaling_options.highest_min group.desired_capacity = artifact group.update()
def command_on_instances(collector, stack, artifact, **kwargs): """Run a shell command on all the instances in the stack""" if stack.command is NotSpecified: raise BespinError("No command was found to run") log.info("Running '%s' on all instances for the %s stack", stack.command, stack.stack_name) if artifact: ips = [artifact] else: ips = list(stack.ssh.find_ips(stack)) if not ips: raise BespinError("Didn't find any instances to run the command on") log.info("Running command on the following ips: %s", ips) if collector.configuration["bespin"].dry_run: log.warning("Dry-run, only gonna run hostname on the boxes") command = "hostname" else: command = stack.command bastion_key_path, instance_key_path = stack.ssh.chmod_keys() proxy = stack.ssh.proxy_options(bastion_key_path) extra_kwargs = {} if proxy: extra_kwargs = {"proxy": stack.ssh.bastion, "proxy_ssh_key": bastion_key_path, "proxy_ssh_user": stack.ssh.user} SSH(ips, command, stack.ssh.user, instance_key_path, **extra_kwargs).run()
def verify_creds(self): """Make sure our current credentials are for this account and set self.connection""" if getattr(self, "_verified", None): return if self.assume_role is not NotSpecified: self.assume() self._verified = True return log.info("Verifying amazon credentials") try: self.session = boto3.session.Session(region_name=self.region) amazon_account_id = self.session.client( 'sts').get_caller_identity().get('Account') if int(self.account_id) != int(amazon_account_id): raise BespinError( "Please use credentials for the right account", expect=self.account_id, got=amazon_account_id) self._verified = True except botocore.exceptions.NoCredentialsError: raise BespinError( "Export AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY before running this script (your aws credentials)" ) except botocore.exceptions.ClientError as error: raise BespinError( "Couldn't determine what account your credentials are from", error=error.message) if self.session is None or self.session.region_name != self.region: raise ProgrammerError( "botocore.session created in incorrect region")
def get_from_env(wanted): """Get environment variables from the env""" env = sb.listof(env_spec()).normalise(Meta({}, []), wanted) missing = [e.env_name for e in env if e.missing] if missing: raise BespinError("Missing environment variables", missing=missing) return dict(e.pair for e in env)
def convert_passwords(path, val): log.info("Converting %s", path) password = str(path)[len("passwords."):] configuration.converters.started(path) environment = configuration['bespin'].environment val_as_dict = configuration["passwords"][password].as_dict() if not environment: raise BespinError("No environment was provided", available=list( configuration["environments"].keys())) password_environment_as_dict = {} if ["passwords", password, environment] in configuration: password_environment_as_dict = configuration[[ "passwords", password, environment ]].as_dict() base = MergedOptions(dont_prefix=path.configuration.dont_prefix, converters=path.configuration.converters) everything = path.configuration.root().wrapped() base.update(val_as_dict) everything[path] = val_as_dict base.update(password_environment_as_dict) everything[path].update(password_environment_as_dict) for thing in (base, everything): thing["__password__"] = val thing["__environment__"] = configuration["environments"][ environment] meta = Meta(everything, [("passwords", ""), (password, "")]) return bespin_spec.password_spec.normalise(meta, base)
def make_api_key(self): api_key_url = "{0}/account/generate_api_key".format(self.host) cookies = requests.get(api_key_url, headers={"Referer": self.host}).cookies data = {'rattic_tfa_generate_api_key-current_step': 'auth', 'csrfmiddlewaretoken': cookies['csrftoken']} sys.stderr.write("username: "******"password: "******"") data['auth-username'] = username data['auth-password'] = password res = requests.post(api_key_url, data=data, cookies=cookies, headers={"Referer": self.host}) if res.status_code not in (200, 400): print(res.content.decode('utf-8')) raise BespinError("Failed to generate an api token from rattic") if res.status_code == 400: data['rattic_tfa_generate_api_key-current_step'] = 'token' sys.stderr.write("token: ") sys.stderr.flush() token = input() data['token-otp_token'] = token res = requests.post(api_key_url, data=data, cookies=cookies, headers={"Referer": self.host}) return "ApiKey {0}:{1}".format(username, res.content.decode('utf-8'))
def become(collector, stack, artifact, **kwargs): """Print export statements for assuming an amazon iam role""" configuration = collector.configuration bespin = configuration['bespin'] environment = bespin.environment region = configuration['environments'][environment].region if not environment: raise BadOption("Please specify an environment") if all(thing in ("", None, NotSpecified) for thing in (stack, artifact)): raise BespinError("Please specify your desired role as an artifact") if artifact: role = artifact else: role = stack credentials = Credentials(region, configuration["environments"][environment].account_id, role) credentials.verify_creds() print("export AWS_ACCESS_KEY_ID={0}".format(os.environ['AWS_ACCESS_KEY_ID'])) print("export AWS_SECRET_ACCESS_KEY={0}".format(os.environ['AWS_SECRET_ACCESS_KEY'])) print("export AWS_SECURITY_TOKEN={0}".format(os.environ['AWS_SECURITY_TOKEN'])) print("export AWS_SESSION_TOKEN={0}".format(os.environ['AWS_SESSION_TOKEN']))
def find_instance_ids(self, stack): if self.auto_scaling_group_name is not NotSpecified: instances = None asg_physical_id = stack.cloudformation.map_logical_to_physical_resource_id( self.auto_scaling_group_name) elif self.instance is not NotSpecified: instances = [ stack.cloudformation.map_logical_to_physical_resource_id(inst) for inst in self.instance ] asg_physical_id = None else: raise BespinError( "Please specify either ssh.instance or ssh.auto_scaling_group_name", stack=stack) log.info("Finding instances") instance_ids = [] if asg_physical_id: instance_ids = stack.ec2.instance_ids_in_autoscaling_group( asg_physical_id) elif instances: instance_ids = instances return instance_ids
def wait_for_dns_switch(collector, stack, artifact, site=NotSpecified, **kwargs): """Periodically check dns until all our sites point to where they should be pointing to for specified environment""" if stack.dns is NotSpecified: raise BespinError("No dns options are specified!") if site is NotSpecified and artifact not in ("", None, NotSpecified): site = artifact if site is NotSpecified or not site: site = None all_sites = stack.dns.sites() available = list(all_sites.keys()) if site: if site not in available: raise BespinError("Have no dns options for specified site", available=available, wanted=site) sites = [site] else: sites = available sites = [all_sites[s] for s in sites] environment = collector.configuration["bespin"].environment errors = [] for site in sorted(sites): if environment not in site.environments: errors.append(BespinError("Site doesn't have specified environment", site=site.name, wanted=environment, available=list(stack.environments.keys()))) try: rtype, rdata = site.current_value if rtype != site.record_type: errors.append(BespinError("Site is a different record type!", recorded_as=rtype, wanted=site.record_type)) log.info("%s is currently %s (%s)", site.domain, rdata, rtype) except BespinError as error: errors.append(error) continue if errors: raise BespinError("Prechecks failed", _errors=errors) log.info("Waiting for traffic to switch to %s\tsites=%s", environment, [site.domain for site in sites]) for _ in hp.until(timeout=600, step=5): if all(site.switched_to(environment) for site in sites): log.info("Finished switching!") break else: log.info("Waiting for sites to switch")
def outputs(collector, stack, artifact, **kwargs): """Print out the outputs""" outputs = stack.cloudformation.outputs if artifact not in (None, NotSpecified): if artifact not in outputs: raise BespinError("Couldn't find output", wanted=artifact, available=list(outputs.keys())) print(outputs[artifact]) else: print(json.dumps(outputs, indent=4))
def execute(collector, **kwargs): """Exec a command using assumed credentials""" configuration = collector.configuration parts = shlex.split(configuration["$@"]) configuration["bespin"].credentials.verify_creds() if not parts: suggestion = " ".join(sys.argv) + " -- /bin/command_to_run" msg = "No command was provided. Try something like:\n\t\t{0}".format(suggestion) raise BespinError(msg) os.execvpe(parts[0], parts, os.environ)
def convert_stack(path, val): log.info("Converting %s", path) configuration.converters.started(path) environment = configuration['bespin'].environment config_as_dict = configuration.as_dict(ignore=["stacks"]) val_as_dict = val.as_dict(ignore=["stacks"]) if not environment or environment is NotSpecified: raise BespinError("No environment was provided", available=list( configuration["environments"].keys())) env = configuration[["environments", environment]] if isinstance(env, six.string_types): environment_as_dict = configuration[["environments", env]].as_dict() env = configuration[["environments", env]] else: environment_as_dict = configuration[[ "environments", environment ]].as_dict() stack_environment = {} stack_environment_as_dict = {} if ["stacks", stack, environment] in configuration: stack_environment = configuration[[ "stacks", stack, environment ]] stack_environment_as_dict = stack_environment.as_dict() # `base` is used for the majority of the values base = path.configuration.root().wrapped() base.update(config_as_dict) base.update(val_as_dict) base.update(environment_as_dict) base.update(stack_environment_as_dict) # `everything` is used for formatting options # Ideally it matches base # The difference here is that we want to maintain source information everything = path.configuration.root().wrapped() everything[path] = MergedOptions.using(configuration, val, env, stack_environment) for thing in (base, everything): thing["bespin"] = configuration["bespin"] thing["environment"] = environment thing["configuration"] = configuration thing["__stack__"] = val thing["__environment__"] = configuration["environments"][ environment] thing["__stack_name__"] = stack meta = Meta(everything, [("stacks", ""), (stack, "")]) return bespin_spec.stack_spec.normalise(meta, base)
def switch_dns_traffic_to(collector, stack, artifact, site=NotSpecified, **kwargs): """Switch dns traffic to some environment""" if stack.dns is NotSpecified: raise BespinError("No dns options are specified!") if site is NotSpecified and artifact not in ("", None, NotSpecified): site = artifact if site is NotSpecified or not site: site = None all_sites = stack.dns.sites() available = list(all_sites.keys()) if site: if site not in available: raise BespinError("Have no dns options for specified site", available=available, wanted=site) sites = [site] else: sites = available sites = [all_sites[s] for s in sites] environment = collector.configuration["bespin"].environment errors = [] for site in sorted(sites): if environment not in site.environments: errors.append(BespinError("Site doesn't have specified environment", site=site.name, wanted=environment, available=list(stack.environments.keys()))) try: rtype, rdata = site.current_value if rtype != site.record_type: errors.append(BespinError("Site is a different record type!", recorded_as=rtype, wanted=site.record_type)) log.info("%s is currently %s (%s)", site.domain, rdata, rtype) except BespinError as error: errors.append(error) continue if errors: raise BespinError("Prechecks failed", _errors=errors) log.info("Switching traffic to %s\tsites=%s", environment, [site.domain for site in sites]) for site in sorted(sites): site.switch_to(collector.configuration["bespin"].environment, dry_run=collector.configuration["bespin"].dry_run)
def downtime(collector, stack, method="downtime", **kwargs): """Downtime this stack in alerting systems""" if stack.downtimer_options is NotSpecified: raise BespinError("Nothing to downtime!") env = sb.listof(env_spec()).normalise(Meta({}, []), ["USER", "DURATION", "COMMENT"]) missing = [e.env_name for e in env if e.missing] if missing: raise BespinError("Missing environment variables", missing=missing) provided_env = dict(e.pair for e in env) author = provided_env["USER"] comment = provided_env["COMMENT"] duration = provided_env["DURATION"] downtimer = Downtimer(stack.downtimer_options, dry_run=collector.configuration["bespin"].dry_run) for system, options in stack.alerting_systems.items(): downtimer.register_system(system, options) getattr(downtimer, method)(duration, author, comment)
def assume(self): assumed_role = self.account_role_arn(self.assume_role) log.info("Assuming role as %s", assumed_role) for name in [ 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN' ]: if name in os.environ and not os.environ[name]: del os.environ[name] try: conn = boto3.client('sts', region_name=self.region) session_user = re.sub('[^\w+=,.@-]+', '', os.environ.get("USER", "")) if session_user: session_name = "{0}@bespin{1}".format(session_user, VERSION) else: session_name = "bespin{0}".format(VERSION) response = conn.assume_role(RoleArn=assumed_role, RoleSessionName=session_name) role = response['AssumedRoleUser'] creds = response['Credentials'] log.info("Assumed role (%s)", role['Arn']) self.session = boto3.session.Session( aws_access_key_id=creds['AccessKeyId'], aws_secret_access_key=creds['SecretAccessKey'], aws_session_token=creds['SessionToken'], region_name=self.region) os.environ['AWS_ACCESS_KEY_ID'] = creds["AccessKeyId"] os.environ['AWS_SECRET_ACCESS_KEY'] = creds["SecretAccessKey"] os.environ['AWS_SECURITY_TOKEN'] = creds["SessionToken"] os.environ['AWS_SESSION_TOKEN'] = creds["SessionToken"] except botocore.exceptions.NoCredentialsError: raise BespinError( "Export AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY before running this script (your aws credentials)" ) except botocore.exceptions.ClientError as error: raise BespinError("Unable to assume role", error=error.message)
def chmod_keys(self): error = False bastion_key_path = None if self.bastion is not NotSpecified: try: bastion_key_path = self.chmod_bastion_key_path() except MissingSSHKey: error = True try: instance_key_path = self.chmod_instance_key_path() except MissingSSHKey: error = True if error: raise BespinError("Couldn't find ssh keys") return bastion_key_path, instance_key_path
def create_event(self, message, sent_by): log.info("Making an event in stackdriver!\tmessage=%s\tannotation=%s", message, sent_by) api_key = self.api_key if callable(api_key): api_key = api_key() headers = { "content-type": "application/json" , "x-stackdriver-apikey": api_key } url = "https://event-gateway.stackdriver.com/v1/annotationevent" event = { "message": message , "annotated_by": sent_by , "level": "INFO" , "event_epoch": time.time() } res = requests.post(url, headers=headers, data=json.dumps(event)) if res.status_code != 201 or res.content != b'Published': raise BespinError("Failed to send stackdriver event", status_code=res.status_code, error=res.content)
def wait_for(self, bucket, key, timeout, start=None): if start is None: start = datetime.utcnow() log.info("Looking for key with last_modified greater than %s", start) for _ in hp.until(timeout=timeout, step=5): try: bucket_obj = self.get_bucket(bucket) except BadS3Bucket as error: log.error(error) continue if key == '/': log.info("The bucket exists! and that is all we are looking for") return k = Key(bucket_obj) k.key = key try: k.read() except boto.exception.S3ResponseError as error: if error.status == 404: log.info("Key doesn't exist yet\tbucket=%s\tkey=%s", bucket_obj.name, key) continue else: log.error(error) continue last_modified = k.last_modified log.info("Found key in the bucket\tbucket=%s\tkey=%s\tlast_modified=%s", bucket_obj.name, key, last_modified) date = datetime.strptime(last_modified, "%a, %d %b %Y %H:%M:%S GMT") if date > start: log.info("Found key and it's newer than our start time!") return else: log.info("Found key but it's older than our start time, hasn't been updated yet") raise BespinError("Couldn't find the s3 key with a newer last modified")
def run(self): jb = None defaults = config.load_default_settings() original_paramiko_agent = paramiko.Agent with hp.a_temp_file() as fle: if self.proxy and self.proxy_ssh_key: fle.write("keyfile|{0}|{1}\n".format(self.proxy, self.proxy_ssh_key).encode('utf-8')) if self.ssh_key: fle.write("keyfile|*|{0}\n".format(self.ssh_key).encode('utf-8')) fle.close() auth_file = fle.name if (self.ssh_key or self.proxy_ssh_key) else None if self.proxy: jb = plugins.load_plugin(jumpbox.__file__) jb.init(auth=AuthManager(self.proxy_ssh_user, auth_file=auth_file), defaults=defaults) login = AuthManager(self.ssh_user, auth_file=auth_file, include_agent=True) keys = {} for key in login.agent_connection.get_keys(): ident = str(uuid.uuid1()) identity = type("Identity", (object, ), { "__str__": lambda s: ident , "get_name": lambda s: key.get_name() , "asbytes": lambda s: key.asbytes() , "sign_ssh_data": lambda s, *args, **kwargs: key.sign_ssh_data(*args, **kwargs) })() keys[identity] = key login.deferred_keys[identity] = key try: outputs = defaultdict(lambda: {"stdout": [], "stderr": []}) class TwoQueue(object): def __init__(self): self.q = queue.Queue(300) def put(self, thing): (host, is_stderr), line = thing outputs[host][["stdout", "stderr"][is_stderr]].append(line) self.q.put(thing) def __getattr__(self, key): if key in ("q", "put"): return object.__getattribute__(self, key) else: return getattr(self.q, key) console = RadSSHConsole(q=TwoQueue()) connections = [(ip, None) for ip in self.ips] if jb: jb.add_jumpbox(self.proxy) connections = list((ip, socket) for _, ip, socket in jb.do_jumpbox_connections(self.proxy, self.ips)) cluster = None try: log.info("Connecting") authenticated = False for _ in hp.until(timeout=120): if authenticated: break cluster = Cluster(connections, login, console=console, defaults=defaults) for _ in hp.until(timeout=10, step=0.5): if not any(cluster.pending): break if cluster.pending: raise BespinError("Timedout waiting to connect to some hosts", waiting_for=cluster.pending.keys()) for _ in hp.until(timeout=10, step=0.5): connections = list(cluster.connections.values()) if any(isinstance(connection, socket.gaierror) for connection in connections): raise BespinError("Some connections failed!", failures=[conn for conn in connections if isinstance(conn, socket.gaierror)]) if all(conn.authenticated for conn in connections): break authenticated = all(conn.authenticated for conn in cluster.connections.values()) if not authenticated: unauthenticated = [host for host, conn in cluster.connections.items() if not conn.authenticated] log.info("Failed to authenticate will try to reconnect in 5 seconds\tunauthenticate=%s", unauthenticated) time.sleep(5) # Try to reauth if not authenticated yet unauthenticated = [host for host, conn in cluster.connections.items() if not conn.authenticated] if unauthenticated: for host in unauthenticated: print('{0:14s} : {1}'.format(str(host), cluster.connections[host])) raise BespinError("Timedout waiting to authenticate all the hosts, do you have an ssh-agent running?", unauthenticated=unauthenticated) failed = [] for host, status in cluster.connections.items(): print('{0:14s} : {1}'.format(str(host), status)) if type(status) is socket.gaierror: failed.append(host) if failed: raise BespinError("Failed to connect to some hosts", failed=failed) cluster.run_command(self.command) error = False for host, job in cluster.last_result.items(): if not job.completed or job.result.return_code not in self.acceptable_return_codes: log.error('%s -%s', host, cluster.connections[host]) log.error('%s, %s', job, job.result.status) error = True if error: raise BespinError("Failed to run the commands") return outputs finally: if cluster: cluster.close_connections() finally: paramiko.Agent = original_paramiko_agent
def note_deployment_in_newrelic(collector, stack, **kwargs): """Note the deployment in newrelic""" if stack.newrelic is NotSpecified: raise BespinError("Please specify newrelic configuration for your stack") stack.newrelic.note_deployment() log.info("Great success!")
def create_stackdriver_event(collector, stack, **kwargs): """Create an event in stackdriver""" env = get_from_env(["MESSAGE", "SENT_BY"]) if stack.stackdriver is NotSpecified: raise BespinError("Please specify stackdriver options for your stack") stack.stackdriver.create_event(env["MESSAGE"], env['SENT_BY'])
def deploy_stack(self, stack, stacks, made=None, ignore_deps=False, checked=None, start=None, is_dependency=False): """Deploy a stack and all it's dependencies""" if start is None: start = datetime.utcnow() made = [] if made is None else made checked = [] if checked is None else checked if stack.name in made: return Builder().sanity_check(stack, stacks, ignore_deps=ignore_deps, checked=checked) made.append(stack.name) if stack.name not in stacks: raise NoSuchStack(looking_for=stack.name, available=stacks.keys()) sent_by = "bespin=={0}({1})".format( VERSION, os.environ.get("USER", "<unknown_user>")) if not is_dependency and stack.notify_stackdriver: if stack.stackdriver is NotSpecified: raise BespinError( "Need to specify stackdriver options when specifying notify_stackdriver" ) stack.stackdriver.create_event( "{0}-{1} - Deploying cloudformation".format( stack.stack_name, stack.stackdriver.format_version(stack.env)), sent_by) if not ignore_deps and not stack.ignore_deps: for dependency in stack.dependencies(stacks): self.deploy_stack(stacks[dependency], stacks, made=made, ignore_deps=True, checked=checked, start=start, is_dependency=True) # Should have all our dependencies now log.info("Making stack for '%s' (%s)", stack.name, stack.stack_name) self.build_stack(stack) if any(stack.build_after): for dependency in stack.build_after: self.deploy_stack(stacks[dependency], stacks, made=made, ignore_deps=True, checked=checked, start=start, is_dependency=True) if not is_dependency and stack.notify_stackdriver: stack.stackdriver.create_event( "{0}-{1} - Finished cloudformation".format( stack.stack_name, stack.stackdriver.format_version(stack.env)), sent_by) if stack.artifact_retention_after_deployment: Builder().clean_old_artifacts(stack) self.confirm_deployment(stack, start)