def main(prog_args): atexit.register(cleanup) try: create_pid() parser = get_arg_parser() args = parser.parse_args(prog_args[1:]) if args.time: all_data = [] iteration_interval = parse_time(args.interval) time_delta = parse_time(args.time) stop_loop_at_time = datetime.now() + time_delta while datetime.now() < stop_loop_at_time: start_iteration = datetime.now() iteration_time_left = iteration_interval - (datetime.now() - start_iteration) time_stats = stats.load_time(args.url) all_data.append(time_stats) sleep_time = iteration_time_left if iteration_time_left.seconds > 0 else 0 sleep(sleep_time.seconds) if args.graph: create_line_chart(all_data, html_output_file=args.graph_file) console.log(all_data) else: console.log(all_data) else: time_stats = stats.load_time(args.url) console.log(time_stats) except ScriptAlreadyRunningException, e: console.error(e) os._exit(0) # exit the script without cleaning up,
def get_thresholds(self): try: thresholds = eval(self.bosslet_config.BILLING_THRESHOLDS) console.info("Creating {} billing alarms".format(len(thresholds))) except AttributeError: # Assume BILLING_THRESHOLDS is not provided console.error("Bosslet value 'BILLING_THRESHOLDS' needs to be defined before creating alarms") thresholds = None return thresholds
def exists(self): if super().exists() is False: return False thresholds = self.get_thresholds() if thresholds is None: return False threshold_names = ['Billing_{}'.format(str(t)) for t in thresholds] resp = self.client_cw.describe_alarms(AlarmNamePrefix = 'Billing_', MaxRecords=100) # No more then 100 records will work here. alarm_names = [a['AlarmName'] for a in resp['MetricAlarms']] missing_alarms = 0 for threshold in thresholds: if threshold not in alarm_names: missing_alarms += 1 console.error("Missing {} alarms".format(missing_alarms)) return missing_alarms == 0
def update_aws(self, resource_type, current, desired): """Compare loaded data against the current data in IAM and create or update IAM to reflect the loaded data Args: resource_type (str): One of - groups, roles, policies current (list[object]): List of objects loaded from IAM desired (list[object]): List of objects loaded from disk """ key = { 'groups': 'GroupName', 'roles': 'RoleName', 'policies': 'PolicyName', }[resource_type] lookup = {resource[key]: resource for resource in current} for resource in desired: resource_ = lookup.get(resource[key]) if resource_ is None: # Doesn't exist currently, create console.info("Creating {} {}".format(key[:-4], resource[key])) try: if resource_type == 'groups': self.group_create(resource) elif resource_type == 'roles': self.role_create(resource) elif resource_type == 'policies': self.policy_create(resource) except ClientError as ex: if ex.response['Error']['Code'] == 'EntityAlreadyExists': console.error( "{} {} already exists cannot load again.".format( key, resource[key])) else: console.error("Problem creating {}: {}".format( resource_type, resource[key])) console.error("\tDetails: {}".format(str(ex))) else: # Currently exists, compare and update console.info("Updating {} {}".format(key[:-4], resource[key])) if resource['Path'] != resource_['Path']: console.warning( "Paths differ for {} {}: '{}' != '{}'".format( key, resource[key], resource['Path'], resource_['Path'])) console.info( "You will need to manually delete the old resource for the Path to be changed" ) continue if resource_type == 'groups': self.group_update(resource, resource_) elif resource_type == 'roles': self.role_update(resource, resource_) elif resource_type == 'policies': self.policy_update(resource, resource_)
def load_lambdas_on_s3(bosslet_config, lambda_name = None, lambda_dir = None): """Package up the lambda files and send them through the lambda build process where the lambda code zip is produced and uploaded to S3 NOTE: This function is also used to build lambda layer code zips, the only requirement for a layer is that the files in the resulting zip should be in the correct subdirectory (`python/` for Python libraries) so that when a lambda uses the layer the libraries included in the layer can be correctly loaded NOTE: If lambda_name and lambda_dir are both None then lambda_dir is set to 'multi_lambda' for backwards compatibility Args: bosslet_config (BossConfiguration): Configuration object of the stack the lambda will be deployed into lambda_name (str): Name of the lambda, which will be mapped to the name of the lambda directory that contains the lambda's code lambda_dir (str): Name of the directory in `cloud_formation/lambda/` that contains the `lambda.yml` configuration file for the lambda Raises: BossManageError: If there was a problem with building the lambda code zip or uploading it to the given S3 bucket """ # For backwards compatibility build the multi_lambda code zip if lambda_name is None and lambda_dir is None: lambda_dir = 'multi_lambda' # Map from lambda_name to lambda_dir if needed if lambda_dir is None: try: lambda_dir = lambda_dirs(bosslet_config)[lambda_name] except KeyError: console.error("Cannot build a lambda that doesn't use a code zip file") return None # To prevent rubuilding a lambda code zip multiple times during an individual execution memorize what has been built if lambda_dir in BUILT_ZIPS: console.debug('Lambda code {} has already be build recently, skipping...'.format(lambda_dir)) return BUILT_ZIPS.append(lambda_dir) lambda_dir = pathlib.Path(const.repo_path('cloud_formation', 'lambda', lambda_dir)) lambda_config = lambda_dir / 'lambda.yml' with lambda_config.open() as fh: lambda_config = yaml.full_load(fh.read()) if lambda_config.get('layers'): for layer in lambda_config['layers']: # Layer names should end with `layer` if not layer.endswith('layer'): console.warning("Layer '{}' doesn't conform to naming conventions".format(layer)) load_lambdas_on_s3(bosslet_config, lambda_dir=layer) console.debug("Building {} lambda code zip".format(lambda_dir)) domain = bosslet_config.INTERNAL_DOMAIN tempname = tempfile.NamedTemporaryFile(delete=True) zipname = pathlib.Path(tempname.name + '.zip') tempname.close() console.debug('Using temp zip file: {}'.format(zipname)) cwd = os.getcwd() # Copy the lambda files into the zip for filename in lambda_dir.glob('*'): zip.write_to_zip(str(filename), zipname, arcname=filename.name) # Copy the other files that should be included if lambda_config.get('include'): for src in lambda_config['include']: dst = lambda_config['include'][src] src_path, src_file = src.rsplit('/', 1) os.chdir(const.repo_path(src_path)) # Generate dynamic configuration files, as needed if src_file == 'ndingest.git': with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl: # Generate settings.ini file for ndingest. create_ndingest_settings(bosslet_config, tmpl) zip.write_to_zip(src_file, zipname, arcname=dst) os.chdir(cwd) # Currently any Docker CLI compatible container setup can be used (like podman) CONTAINER_CMD = '{EXECUTABLE} run --rm -it --env AWS_* --volume {HOST_DIR}:/var/task/ lambci/lambda:build-{RUNTIME} {CMD}' BUILD_CMD = 'python3 {PREFIX}/build_lambda.py {DOMAIN} {BUCKET}' BUILD_ARGS = { 'DOMAIN': domain, 'BUCKET': bosslet_config.LAMBDA_BUCKET, } # DP NOTE: not sure if this should be in the bosslet_config, as it is more about the local dev # environment instead of the stack's environment. Different maintainer may have different # container commands installed. container_executable = os.environ.get('LAMBDA_BUILD_CONTAINER') lambda_build_server = bosslet_config.LAMBDA_SERVER if lambda_build_server is None: staging_target = pathlib.Path(const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files', 'staging')) if not staging_target.exists(): staging_target.mkdir() console.debug("Copying build zip to {}".format(staging_target)) staging_zip = staging_target / (domain + '.zip') try: zipname.rename(staging_zip) except OSError: # rename only works within the same filesystem # Using the shell version, as using copy + chmod doesn't always work depending on the filesystem utils.run('mv {} {}'.format(zipname, staging_zip), shell=True) # Provide the AWS Region and Credentials (for S3 upload) via environmental variables env_extras = { 'AWS_REGION': bosslet_config.REGION, 'AWS_DEFAULT_REGION': bosslet_config.REGION } if container_executable is None: BUILD_ARGS['PREFIX'] = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files') CMD = BUILD_CMD.format(**BUILD_ARGS) if bosslet_config.PROFILE is not None: env_extras['AWS_PROFILE'] = bosslet_config.PROFILE console.info("calling build lambda on localhost") else: BUILD_ARGS['PREFIX'] = '/var/task' CMD = BUILD_CMD.format(**BUILD_ARGS) CMD = CONTAINER_CMD.format(EXECUTABLE = container_executable, HOST_DIR = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files'), RUNTIME = lambda_config['runtime'], CMD = CMD) if bosslet_config.PROFILE is not None: # Cannot set the profile as the container will not have the credentials file # So extract the underlying keys and provide those instead creds = bosslet_config.session.get_credentials() env_extras['AWS_ACCESS_KEY_ID'] = creds.access_key env_extras['AWS_SECRET_ACCESS_KEY'] = creds.secret_key console.info("calling build lambda in {}".format(container_executable)) try: utils.run(CMD, env_extras=env_extras) except Exception as ex: raise BossManageError("Problem building {} lambda code zip: {}".format(lambda_dir, ex)) finally: os.remove(staging_zip) else: BUILD_ARGS['PREFIX'] = '~' CMD = BUILD_CMD.format(**BUILD_ARGS) lambda_build_server_key = bosslet_config.LAMBDA_SERVER_KEY lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key) ssh_target = SSHTarget(lambda_build_server_key, lambda_build_server, 22, 'ec2-user') bastions = [bosslet_config.outbound_bastion] if bosslet_config.outbound_bastion else [] ssh = SSHConnection(ssh_target, bastions) console.debug("Copying build zip to lambda-build-server") target_file = '~/staging/{}.zip'.format(domain) ret = ssh.scp(zipname, target_file, upload=True) console.debug("scp return code: " + str(ret)) os.remove(zipname) console.info("calling build lambda on lambda-build-server") ret = ssh.cmd(CMD) if ret != 0: raise BossManageError("Problem building {} lambda code zip: Return code: {}".format(lambda_dir, ret))
stop_loop_at_time = datetime.now() + time_delta while datetime.now() < stop_loop_at_time: start_iteration = datetime.now() iteration_time_left = iteration_interval - (datetime.now() - start_iteration) time_stats = stats.load_time(args.url) all_data.append(time_stats) sleep_time = iteration_time_left if iteration_time_left.seconds > 0 else 0 sleep(sleep_time.seconds) if args.graph: create_line_chart(all_data, html_output_file=args.graph_file) console.log(all_data) else: console.log(all_data) else: time_stats = stats.load_time(args.url) console.log(time_stats) except ScriptAlreadyRunningException, e: console.error(e) os._exit(0) # exit the script without cleaning up, # cause the script that's already running should do his own cleaning. except WebsiteException, e: console.error(e) except KeyboardInterrupt, e: pass if __name__ == '__main__': sys.exit(main(sys.argv))
if args.command == 'billing': list = BillingList(args.bosslet_config) if args.delete_billing_alarms: list.delete_billing_alarms() sys.exit(0) elif args.command == 'alerts': list = AlertList(args.bosslet_config) if args.create: if list.exists(): console.warning("List already exists, not creating") else: if list.create() is False: sys.exit(1) elif not list.exists(): console.error("List doesn't exists, create it first") sys.exit(2) if args.add: for address in args.add: list.subscribe(address) if args.rem: for address in args.rem: list.unsubscribe(address) if args.ls: list.list()