def subscribe(self, endpoint, protocol='email'): try: resp = self.client.subscribe(TopicArn = self.arn, Protocol = 'email', Endpoint = address) except Exception as ex: console.warning("Could not subscribe address {} ({})".format(address, ex))
def load(self, resource_type, filename, filters=[]): """Load previously exported data from disk Note: Loaded data is compaired against the filter list and any filtered out items will produce a warning message Args: resource_type (str): One of - groups, roles, policies filename (str): Name of the file with data to load filters (list[str]): List of group/role/policy names that should be loaded Returns: list[objects]: List of filtered objects """ with open(filename, 'r') as fh: data = fh.read() data = self.from_generic(data) # Replace generic values data = json.loads(data) key = { 'groups': 'GroupName', 'roles': 'RoleName', 'policies': 'PolicyName', }[resource_type] # Verify that the loaded data is valid objects = [] for item in data: if item[key] not in filters: fmt = "{} {} not in whitelist, not importing" console.warning(fmt.format(resource_type, item[key])) else: objects.append(item) return objects
def unsubscribe(self, endpoint): subs = self.client.list_subscriptions_by_topic(TopicArn = self.arn)['Subscriptions'] for sub in subs: if sub['Endpoint'] == endpoint: try: resp = self.client.unsubscribe(SubscriptionArn = sub['SubscriptionArn']) except Exception as ex: console.warning("Could not unsubscribe address {} ({})".format(sub['Endpoint'], ex))
def update_aws(self, resource_type, current, desired): """Compare loaded data against the current data in IAM and create or update IAM to reflect the loaded data Args: resource_type (str): One of - groups, roles, policies current (list[object]): List of objects loaded from IAM desired (list[object]): List of objects loaded from disk """ key = { 'groups': 'GroupName', 'roles': 'RoleName', 'policies': 'PolicyName', }[resource_type] lookup = {resource[key]: resource for resource in current} for resource in desired: resource_ = lookup.get(resource[key]) if resource_ is None: # Doesn't exist currently, create console.info("Creating {} {}".format(key[:-4], resource[key])) try: if resource_type == 'groups': self.group_create(resource) elif resource_type == 'roles': self.role_create(resource) elif resource_type == 'policies': self.policy_create(resource) except ClientError as ex: if ex.response['Error']['Code'] == 'EntityAlreadyExists': console.error( "{} {} already exists cannot load again.".format( key, resource[key])) else: console.error("Problem creating {}: {}".format( resource_type, resource[key])) console.error("\tDetails: {}".format(str(ex))) else: # Currently exists, compare and update console.info("Updating {} {}".format(key[:-4], resource[key])) if resource['Path'] != resource_['Path']: console.warning( "Paths differ for {} {}: '{}' != '{}'".format( key, resource[key], resource['Path'], resource_['Path'])) console.info( "You will need to manually delete the old resource for the Path to be changed" ) continue if resource_type == 'groups': self.group_update(resource, resource_) elif resource_type == 'roles': self.role_update(resource, resource_) elif resource_type == 'policies': self.policy_update(resource, resource_)
def pre_update(bosslet_config): # With version 2 the DNS records are now part of the CloudFormation template, so # remove the existing DNS record so the update can happen console.warning( "Removing existing Api public DNS entry, so CloudFormation can manage the DNS record" ) aws.route53_delete_records(bosslet_config.session, bosslet_config.EXTERNAL_DOMAIN, bosslet_config.names.public_dns('api'))
def pre_init(bosslet_config): # NOTE: In version 2 the public DNS records are managed by CloudFormation # If the DNS record currently exists in Route53 the creation of the # CloudFormation template will fail, so check to see if it exists # due to previous launches of a Boss stack session = bosslet_config.session ext_domain = bosslet_config.EXTERNAL_DOMAIN ext_cname = bosslet_config.names.public_dns('auth') target = aws.get_dns_resource_for_domain_name(session, ext_cname, ext_domain) if target is not None: console.warning( "Removing existing Auth public DNS entry, so CloudFormation can manage the DNS record" ) aws.route53_delete_records(session, ext_domain, ext_cname)
def policy_update(self, resource, resource_): """Compare and potentially update the referenced IAM Policy Args: resource (object): Desired IAM Policy definition resource_ (object): Current IAM Policy definition """ policy = json_dumps(resource['PolicyDocument']) policy_ = json_dumps(resource['PolicyDocument']) if policy != policy_: console.warning("Default policy differs") arn = self.policy_arn(resource) self.delete_oldest_policy_version(resource['PolicyName'], arn) self.client.create_policy_version( PolicyArn=arn, PolicyDocument=resource['PolicyDocument'], SetAsDefault=True)
def pre_update(bosslet_config): # Alert about the change that will happen if not console.confirm( "This updated will recreate the Vault cluster, proceed?", default=False): raise BossManageCanceled() # Save the existing data do we can rebuild Vault path = export_path(bosslet_config) with bosslet_config.call.vault() as vault: vault_data = vault.export("secret/") with open(path, 'w') as outfile: json.dump(vault_data, outfile, indent=3, sort_keys=True) print("Vault data exported to {}".format(path)) # With version 2 the DNS records are now part of the CloudFormation template, so # remove the existing DNS record so the update can happen console.warning( "Removing existing Auth public DNS entry, so CloudFormation can manage the DNS record" ) aws.route53_delete_records(bosslet_config.session, bosslet_config.EXTERNAL_DOMAIN, bosslet_config.names.public_dns('auth'))
def load_tags(self): values = self.read_tags() for key in ('min', 'max', 'desired'): try: # Check to see if the loaded value is a number value = int(values[key]) except ValueError: # Not int msg = '{} value for {} is not an integer'.format( key, self.name) console.warning(msg) value = 0 except IndexError: # Doesn't exist value = 0 # ??? Is Zero a valid value? (use -1 instead?) # If loaded value is invalid, warn and set to 1 if key not in values or value == 0: if key not in values: fmt = 'No saved {} value for {}, setting to 1' else: fmt = 'Save {} value is zero, setting to 1' msg = fmt.format(key, self.name) console.warning(msg) value = 1 # Verify we won't override another value current = getattr(self, key) if current > 0 and current != value: fmt = 'Override curent {} value ({}) with loaded value ({}) ?' msg = fmt.format(key, current, value) if not console.confirm(msg): value = current setattr(self, key, value)
def build_dependency_graph(action, bosslet_config, modules): """ Given the list of bossDB modules (CloudFormation Stacks) to be installed and the action (Ex: create, update, delete) figure, create a dependency graph and then order the modules in the correct order (It reverses the order for deletes) Args: action(str): bosslet_config(BossConfiguration): bosslet_config based on a configuration file like hiderrt.boss or bossdb.boss modules(list[(str,module)]: List of tuples (cloudformation stack, module) Returns: """ class Node(object): """Directed Dependency Graph Node""" def __init__(self, name): self.name = name self.edges = [] def __repr__(self): return "<Node: {}>".format(self.name) def depends_on(self, node): self.edges.append(node) def resolve(node, resolved, seen=None): """From a root node, add all sub elements and then the root""" if seen is None: seen = [] seen.append(node) for edge in node.edges: if edge not in resolved: if edge in seen: raise exceptions.CircularDependencyError(node.name, edge.name) resolve(edge, resolved, seen) resolved.append(node) nums = {} # Mapping of config to index in modules list nodes = {} # Mapping of config to Node no_deps = [] # List of configs that are not the target of a dependency # meaning that they are the root of a dependency tree # Populate variables for i in range(len(modules)): config = modules[i][0] nums[config] = i nodes[config] = Node(config) no_deps.append(config) # lookup the existing stacks to so we can verify that all dependencies will # be satisfied (by either existing or being launched) existing = { k : v['StackStatus'] for k, v in aws.get_existing_stacks(bosslet_config).items() } stop = False for name, status in existing.items(): if status.endswith('_IN_PROGRESS'): console.warning("Config '{}' is in progress".format(name)) stop = True elif status.endswith('_FAILED'): if name not in nodes: console.fail("Config '{}' is failed and should be acted upon".format(name)) stop = True else: if action == 'delete': console.info("Config '{}' is failed, deleting".format(name)) elif status == 'UPDATE_ROLLBACK_FAILED': console.fail("Config '{}' needs to be manually resolved in the AWS console".format(name)) stop = True else: # CREATE, DELETE, or ROLLBACK FAILED console.fail("Config '{}' is failed, needs to be deleted".format(name)) stop = True if stop: raise exceptions.BossManageError('Problems with existing stacks') # Create dependency graph and locate root nodes for config, module in modules: deps = module.__dict__.get('DEPENDENCIES') if deps is None: continue if type(deps) == str: deps = [deps] for dep in deps: if dep not in nodes: # dependency not part of configs to launch if dep not in existing and action == "create": raise exceptions.MissingDependencyError(config, dep) else: # If action is update, post-init, pre-init, verify that # the config is already existing if action not in ('create', 'delete') and dep not in existing: raise exceptions.MissingDependencyError(config, dep) nodes[config].depends_on(nodes[dep]) try: no_deps.remove(dep) except: pass # Doesn't exist in no_deps list # Resolve dependency graph resolved = [] for no_dep in no_deps: # Don't have any dependencies resolve(nodes[no_dep], resolved) # Reorder input reordered = [ modules[nums[node.name]] for node in resolved ] # Extra check if len(reordered) != len(modules): raise exceptions.CircularDependencyError() # Removed configs that don't need to be created if action == "create": for config, module in reordered[:]: if config in existing: # If the config already exists, don't try to create it again console.debug('Not executing create on {} as it already exists'.format(config)) reordered.remove((config, module)) # Remove configs that don't need to be deleted / updated / etc else: for config, module in reordered[:]: if config not in existing: # If the config doesn't exist, don't try to delete it again # If the config doesn't exist, don't try to update it console.debug("Not executing {} on {} as it doesn't exist".format(action, config)) reordered.remove((config, module)) if action == "delete": # Delete in reverse order reordered.reverse() # Make sure that the target configs are not currently being processed for config, module in reordered: if config in existing and existing[config].endswith("_IN_PROGRESS"): raise exceptions.DependencyInProgressError(config) return reordered
def role_update(self, resource, resource_): """Compare and potentially update the referenced IAM Role Args: resource (object): Desired IAM Role definition resource_ (object): Current IAM Role definition """ policy = json_dumps(resource['AssumeRolePolicyDocument']) policy_ = json_dumps(resource_['AssumeRolePolicyDocument']) if policy != policy_: console.warning('Role policy document differs') self.iw.update_assume_role_policy( resource['RoleName'], resource['AssumeRolePolicyDocument']) lookup = { policy['PolicyName']: policy['PolicyDocument'] for policy in resource_['RolePolicyList'] } for policy in resource['RolePolicyList']: policy_ = lookup.get(policy['PolicyName']) if policy_ is None: self.iw.put_role_policy(resource['RoleName'], policy['PolicyName'], policy['PolicyDocument']) else: del lookup[policy['PolicyName']] document = json_dumps(policy['PolicyDocument']) document_ = json_dumps(policy_) if document != document_: self.iw.put_role_policy(resource['RoleName'], policy['PolicyName'], policy['PolicyDocument']) for policy in lookup.keys(): # AWS has a policy that is not in the desired version, it should be deleted self.iw.delete_role_policy(resource['RoleName'], policy) for arn in resource['AttachedManagedPolicies']: if arn not in resource_['AttachedManagedPolicies']: self.iw.attach_role_policy(resource["RoleName"], arn) for arn in resource_['AttachedManagedPolicies']: if arn not in resource['AttachedManagedPolicies']: # AWS has a managed policy that is not in the desired version, it should be deleted. self.iw.detach_role_policy(resource["RoleName"], arn) lookup = { profile['InstanceProfileName']: profile for profile in resource_['InstanceProfileList'] } for profile in resource['InstanceProfileList']: profile_ = lookup.get(profile['InstanceProfileName']) if profile_ is None: self.iw.create_instance_profile(profile['InstanceProfileName'], profile['Path']) self.iw.add_role_to_instance_profile( resource['RoleName'], profile['InstanceProfileName']) else: del lookup[profile['InstanceProfileName']] if profile['Path'] != profile_['Path']: console.warning( "Paths differ for {} Instance Profile {}: '{}' != '{}'" .format(resource['RoleName'], profile['InstanceProfileName'], profile['Path'], profile_['Path'])) console.info( 'You will need to manually delete the old instance profile for the Path to be changed' ) for profile in lookup.keys(): # AWS has an instance profile that is not in the desired version, it should be deleted self.iw.remove_role_from_instance_profile(resource['RoleName'], profile) self.iw.delete_instance_profile(profile)
def load_lambdas_on_s3(bosslet_config, lambda_name = None, lambda_dir = None): """Package up the lambda files and send them through the lambda build process where the lambda code zip is produced and uploaded to S3 NOTE: This function is also used to build lambda layer code zips, the only requirement for a layer is that the files in the resulting zip should be in the correct subdirectory (`python/` for Python libraries) so that when a lambda uses the layer the libraries included in the layer can be correctly loaded NOTE: If lambda_name and lambda_dir are both None then lambda_dir is set to 'multi_lambda' for backwards compatibility Args: bosslet_config (BossConfiguration): Configuration object of the stack the lambda will be deployed into lambda_name (str): Name of the lambda, which will be mapped to the name of the lambda directory that contains the lambda's code lambda_dir (str): Name of the directory in `cloud_formation/lambda/` that contains the `lambda.yml` configuration file for the lambda Raises: BossManageError: If there was a problem with building the lambda code zip or uploading it to the given S3 bucket """ # For backwards compatibility build the multi_lambda code zip if lambda_name is None and lambda_dir is None: lambda_dir = 'multi_lambda' # Map from lambda_name to lambda_dir if needed if lambda_dir is None: try: lambda_dir = lambda_dirs(bosslet_config)[lambda_name] except KeyError: console.error("Cannot build a lambda that doesn't use a code zip file") return None # To prevent rubuilding a lambda code zip multiple times during an individual execution memorize what has been built if lambda_dir in BUILT_ZIPS: console.debug('Lambda code {} has already be build recently, skipping...'.format(lambda_dir)) return BUILT_ZIPS.append(lambda_dir) lambda_dir = pathlib.Path(const.repo_path('cloud_formation', 'lambda', lambda_dir)) lambda_config = lambda_dir / 'lambda.yml' with lambda_config.open() as fh: lambda_config = yaml.full_load(fh.read()) if lambda_config.get('layers'): for layer in lambda_config['layers']: # Layer names should end with `layer` if not layer.endswith('layer'): console.warning("Layer '{}' doesn't conform to naming conventions".format(layer)) load_lambdas_on_s3(bosslet_config, lambda_dir=layer) console.debug("Building {} lambda code zip".format(lambda_dir)) domain = bosslet_config.INTERNAL_DOMAIN tempname = tempfile.NamedTemporaryFile(delete=True) zipname = pathlib.Path(tempname.name + '.zip') tempname.close() console.debug('Using temp zip file: {}'.format(zipname)) cwd = os.getcwd() # Copy the lambda files into the zip for filename in lambda_dir.glob('*'): zip.write_to_zip(str(filename), zipname, arcname=filename.name) # Copy the other files that should be included if lambda_config.get('include'): for src in lambda_config['include']: dst = lambda_config['include'][src] src_path, src_file = src.rsplit('/', 1) os.chdir(const.repo_path(src_path)) # Generate dynamic configuration files, as needed if src_file == 'ndingest.git': with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl: # Generate settings.ini file for ndingest. create_ndingest_settings(bosslet_config, tmpl) zip.write_to_zip(src_file, zipname, arcname=dst) os.chdir(cwd) # Currently any Docker CLI compatible container setup can be used (like podman) CONTAINER_CMD = '{EXECUTABLE} run --rm -it --env AWS_* --volume {HOST_DIR}:/var/task/ lambci/lambda:build-{RUNTIME} {CMD}' BUILD_CMD = 'python3 {PREFIX}/build_lambda.py {DOMAIN} {BUCKET}' BUILD_ARGS = { 'DOMAIN': domain, 'BUCKET': bosslet_config.LAMBDA_BUCKET, } # DP NOTE: not sure if this should be in the bosslet_config, as it is more about the local dev # environment instead of the stack's environment. Different maintainer may have different # container commands installed. container_executable = os.environ.get('LAMBDA_BUILD_CONTAINER') lambda_build_server = bosslet_config.LAMBDA_SERVER if lambda_build_server is None: staging_target = pathlib.Path(const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files', 'staging')) if not staging_target.exists(): staging_target.mkdir() console.debug("Copying build zip to {}".format(staging_target)) staging_zip = staging_target / (domain + '.zip') try: zipname.rename(staging_zip) except OSError: # rename only works within the same filesystem # Using the shell version, as using copy + chmod doesn't always work depending on the filesystem utils.run('mv {} {}'.format(zipname, staging_zip), shell=True) # Provide the AWS Region and Credentials (for S3 upload) via environmental variables env_extras = { 'AWS_REGION': bosslet_config.REGION, 'AWS_DEFAULT_REGION': bosslet_config.REGION } if container_executable is None: BUILD_ARGS['PREFIX'] = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files') CMD = BUILD_CMD.format(**BUILD_ARGS) if bosslet_config.PROFILE is not None: env_extras['AWS_PROFILE'] = bosslet_config.PROFILE console.info("calling build lambda on localhost") else: BUILD_ARGS['PREFIX'] = '/var/task' CMD = BUILD_CMD.format(**BUILD_ARGS) CMD = CONTAINER_CMD.format(EXECUTABLE = container_executable, HOST_DIR = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files'), RUNTIME = lambda_config['runtime'], CMD = CMD) if bosslet_config.PROFILE is not None: # Cannot set the profile as the container will not have the credentials file # So extract the underlying keys and provide those instead creds = bosslet_config.session.get_credentials() env_extras['AWS_ACCESS_KEY_ID'] = creds.access_key env_extras['AWS_SECRET_ACCESS_KEY'] = creds.secret_key console.info("calling build lambda in {}".format(container_executable)) try: utils.run(CMD, env_extras=env_extras) except Exception as ex: raise BossManageError("Problem building {} lambda code zip: {}".format(lambda_dir, ex)) finally: os.remove(staging_zip) else: BUILD_ARGS['PREFIX'] = '~' CMD = BUILD_CMD.format(**BUILD_ARGS) lambda_build_server_key = bosslet_config.LAMBDA_SERVER_KEY lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key) ssh_target = SSHTarget(lambda_build_server_key, lambda_build_server, 22, 'ec2-user') bastions = [bosslet_config.outbound_bastion] if bosslet_config.outbound_bastion else [] ssh = SSHConnection(ssh_target, bastions) console.debug("Copying build zip to lambda-build-server") target_file = '~/staging/{}.zip'.format(domain) ret = ssh.scp(zipname, target_file, upload=True) console.debug("scp return code: " + str(ret)) os.remove(zipname) console.info("calling build lambda on lambda-build-server") ret = ssh.cmd(CMD) if ret != 0: raise BossManageError("Problem building {} lambda code zip: Return code: {}".format(lambda_dir, ret))
def stopInstances(bosslet_config): """ Method used to stop currently running instances """ filename = VAULT_FILE.format(bosslet_config.names.vault.dns) asg_problem = False asgs = load_aws(bosslet_config, 'off') with console.status_line(spin=True) as status: for asg in asgs: status('Working on ASG {}'.format(asg.name)) ############################## # Pre-stop actions or checks # ############################## if 'Vault' in asg.name: if DRY_RUN: print("Export Vault data into {}".format(filename)) else: print("Exporting current Vault data") try: with bosslet_config.call.vault() as vault: # TODO: figure out what configuration information should be exported data = vault.export("secret/") with open(filename, 'w') as fh: json.dump(data, fh, indent=3, sort_keys=True) console.warning( "Please protect {} as it contains personal passwords" .format(filename)) console.green("Successful Vault export") except Exception as e: console.fail("Unsuccessful vault export") print(ex) print("Cannot continue") return elif 'Auth' in asg.name: if not bosslet_config.AUTH_RDS: console.warning( "Cannot turn off Auth ASG without an external RDS database" ) continue ################## # Stop Instances # ################## print("Turning off {}".format(asg.name)) try: asg.stop() console.green("{} is off".format(asg.name)) except Exception as ex: asg_problem = True console.warning("{} is not off".format(asg.name)) print(ex) # XXX: What to do? ############################### # Post-stop actions or checks # ############################### # None right now if asg_problem: # XXX: The problem is that if 'off' is re-run the ASGs that were previously # turned off will have 0/0/0 saved to the DEFINITION_FILE and mess up # turning ASGs back on console.warning("Problem turning off bosslet") else: console.blue("TheBoss is off")
def startInstances(bosslet_config): """ Method used to start necessary instances """ # Verify Vault data exists before continuing filename = VAULT_FILE.format(bosslet_config.names.vault.dns) if not os.path.exists(filename): msg = "File {} doesn't exist, cannot reimport Vault data".format( filename) if DRY_RUN: console.warning(msg) else: console.fail(msg) return asg_problem = False asgs = load_aws(bosslet_config, 'on') with console.status_line(spin=True) as status: for asg in asgs: status('Working on {}'.format(asg.name)) # TODO: Add error handling # If Vault error, stop # If error starting ASG log error and continue ############################### # Pre-start actions or checks # ############################### if 'Auth' in asg.name: if not bosslet_config.AUTH_RDS: console.warning( "Skipping starting Auth ASG, as it was not stopped") continue ################### # Start Instances # ################### print("Turning on {}".format(asg.name)) try: asg.start() console.green("{} is on".format(asg.name)) except Exception as ex: asg_problem = True console.warning("{} is not on".format(asg.name)) print(ex) ################################ # Post-start actions or checks # ################################ if 'Vault' in asg.name: if DRY_RUN: print("Waiting for Vault to start") print("Vault import {}".format(filename)) continue print("Waiting for Vault to start") # XXX: May need to wait a little before creating call, so that # vault instances are named and can be resolved bosslet_config.call.check_vault(constants.TIMEOUT_VAULT) with bosslet_config.call.vault() as vault: print("Importing previous Vault data") try: with open(filename) as fh: data = json.load(fh) vault.import_(data) console.green("Successful import") except Exception as e: console.fail("Unsuccessful import") print(ex) print("Cannot continue restore") return if asg_problem: console.warning("Problems turning on bosslet") else: console.blue("Bosslet is on")
import argparse import sys import os import alter_path from lib import aws from lib import configuration from lib import console from pprint import pprint try: import simpleeval eval = simpleeval.EvalWithCompoundTypes(functions={'range': range}).eval except ImportError: console.warning("Library 'simpleeval' not available, using the default python implementation") class SubscriptionList(object): def __init__(self, bosslet_config, topic): self.bosslet_config = bosslet_config self.client = bosslet_config.session.client('sns') self.topic = topic self.arn = self.to_arn(topic) def to_arn(self, topic): return 'arn:aws:sns:{}:{}:{}'.format(self.bosslet_config.REGION, self.bosslet_config.ACCOUNT_ID, topic) def create(self): console.info("Creating {} SNS topic".format(self.topic))
if name is None: unlabled.append(inst) elif is_excluded(name): pass elif state == 'stopped': stopped[name] = inst elif state == 'running': running[name] = inst kwargs['NextToken'] = resp.get('NextToken') if len(running) == 0: console.info("No unexpected running EC2 instances exist") if not args.quiet: for inst in unlabled: console.warning('Instance {} is not labeled'.format( inst['InstanceId'])) for name, inst in items(stopped): console.warning('Instance {} ({}) is stopped'.format( inst['InstanceId'], name)) if args.delete: for name, inst in items(running): console.debug('Deleting EC2 instance {} ({})'.format( inst['InstanceId'], name)) if console.confirm('Are you sure', default=False): for inst in running.values(): client.terminate_instances( InstanceIds=[inst['InstanceId']]) else: for name, inst in items(running): console.debug('Instance {} ({})'.format( inst['InstanceId'], name))