def load_lambdas_on_s3(session, domain, bucket): """Zip up spdb, bossutils, lambda and lambda_utils. Upload to S3. Uses the lambda build server (an Amazon Linux AMI) to compile C code and prepare the virtualenv that's ultimately contained in the zip file placed in S3. Args: session (Session): boto3.Session domain (string): The VPC's domain name such as integration.boss. """ tempname = tempfile.NamedTemporaryFile(delete=True) zipname = tempname.name + '.zip' tempname.close() print('Using temp zip file: ' + zipname) cwd = os.getcwd() os.chdir(const.repo_path("salt_stack", "salt", "spdb", "files")) zip.write_to_zip('spdb.git', zipname, False) os.chdir(cwd) os.chdir(const.repo_path("salt_stack", "salt", "boss-tools", "files", "boss-tools.git")) zip.write_to_zip('bossutils', zipname) zip.write_to_zip('lambda', zipname) zip.write_to_zip('lambdautils', zipname) os.chdir(cwd) with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl: # Generate settings.ini file for ndingest. create_ndingest_settings(domain, tmpl) os.chdir(const.repo_path("salt_stack", "salt", "ndingest", "files")) zip.write_to_zip('ndingest.git', zipname) os.chdir(cwd) print("Copying local modules to lambda-build-server") #copy the zip file to lambda_build_server lambda_build_server = aws.get_lambda_server(session) lambda_build_server_key = aws.get_lambda_server_key(session) lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key) ssh = SSHConnection(lambda_build_server_key, (lambda_build_server, 22, 'ec2-user')) target_file = "sitezips/{}.zip".format(domain) ret = ssh.scp(zipname, target_file, upload=True) print("scp return code: " + str(ret)) os.remove(zipname) # This section will run makedomainenv on lambda-build-server print("calling makedomainenv on lambda-build-server") cmd = 'source /etc/profile && source ~/.bash_profile && /home/ec2-user/makedomainenv {} {}'.format(domain, bucket) ssh.cmd(cmd)
def load_lambdas_on_s3(session, domain, bucket): """Zip up spdb, bossutils, lambda and lambda_utils. Upload to S3. Uses the lambda build server (an Amazon Linux AMI) to compile C code and prepare the virtualenv that's ultimately contained in the zip file placed in S3. Args: session (Session): boto3.Session domain (string): The VPC's domain name such as integration.boss. """ tempname = tempfile.NamedTemporaryFile(delete=True) zipname = tempname.name + '.zip' tempname.close() print('Using temp zip file: ' + zipname) cwd = os.getcwd() os.chdir(const.repo_path("salt_stack", "salt", "spdb", "files")) zip.write_to_zip('spdb.git', zipname, False) os.chdir(cwd) os.chdir(const.repo_path("salt_stack", "salt", "boss-tools", "files", "boss-tools.git")) zip.write_to_zip('bossutils', zipname) zip.write_to_zip('lambda', zipname) zip.write_to_zip('lambdautils', zipname) os.chdir(cwd) with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl: # Generate settings.ini file for ndingest. create_ndingest_settings(domain, tmpl) os.chdir(const.repo_path("salt_stack", "salt", "ndingest", "files")) zip.write_to_zip('ndingest.git', zipname) os.chdir(cwd) os.chdir(const.repo_path("lib")) zip.write_to_zip('heaviside.git', zipname) # Let lambdas look up names by creating a bossnames module. zip.write_to_zip('names.py', zipname, arcname='bossnames/names.py') zip.write_to_zip('hosts.py', zipname, arcname='bossnames/hosts.py') zip.write_to_zip('bucket_object_tags.py', zipname, arcname='bossnames/bucket_object_tags.py') zip.write_to_zip('__init__.py', zipname, arcname='bossnames/__init__.py') os.chdir(cwd) print("Copying local modules to lambda-build-server") #copy the zip file to lambda_build_server lambda_build_server = aws.get_lambda_server(session) lambda_build_server_key = aws.get_lambda_server_key(session) lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key) ssh = SSHConnection(lambda_build_server_key, (lambda_build_server, 22, 'ec2-user')) target_file = "sitezips/{}.zip".format(domain) ret = ssh.scp(zipname, target_file, upload=True) print("scp return code: " + str(ret)) os.remove(zipname) # This section will run makedomainenv on lambda-build-server print("calling makedomainenv on lambda-build-server") cmd = 'source /etc/profile && source ~/.bash_profile && /home/ec2-user/makedomainenv {} {}'.format(domain, bucket) ssh.cmd(cmd)
def load_lambdas_on_s3(bosslet_config, lambda_name = None, lambda_dir = None): """Package up the lambda files and send them through the lambda build process where the lambda code zip is produced and uploaded to S3 NOTE: This function is also used to build lambda layer code zips, the only requirement for a layer is that the files in the resulting zip should be in the correct subdirectory (`python/` for Python libraries) so that when a lambda uses the layer the libraries included in the layer can be correctly loaded NOTE: If lambda_name and lambda_dir are both None then lambda_dir is set to 'multi_lambda' for backwards compatibility Args: bosslet_config (BossConfiguration): Configuration object of the stack the lambda will be deployed into lambda_name (str): Name of the lambda, which will be mapped to the name of the lambda directory that contains the lambda's code lambda_dir (str): Name of the directory in `cloud_formation/lambda/` that contains the `lambda.yml` configuration file for the lambda Raises: BossManageError: If there was a problem with building the lambda code zip or uploading it to the given S3 bucket """ # For backwards compatibility build the multi_lambda code zip if lambda_name is None and lambda_dir is None: lambda_dir = 'multi_lambda' # Map from lambda_name to lambda_dir if needed if lambda_dir is None: try: lambda_dir = lambda_dirs(bosslet_config)[lambda_name] except KeyError: console.error("Cannot build a lambda that doesn't use a code zip file") return None # To prevent rubuilding a lambda code zip multiple times during an individual execution memorize what has been built if lambda_dir in BUILT_ZIPS: console.debug('Lambda code {} has already be build recently, skipping...'.format(lambda_dir)) return BUILT_ZIPS.append(lambda_dir) lambda_dir = pathlib.Path(const.repo_path('cloud_formation', 'lambda', lambda_dir)) lambda_config = lambda_dir / 'lambda.yml' with lambda_config.open() as fh: lambda_config = yaml.full_load(fh.read()) if lambda_config.get('layers'): for layer in lambda_config['layers']: # Layer names should end with `layer` if not layer.endswith('layer'): console.warning("Layer '{}' doesn't conform to naming conventions".format(layer)) load_lambdas_on_s3(bosslet_config, lambda_dir=layer) console.debug("Building {} lambda code zip".format(lambda_dir)) domain = bosslet_config.INTERNAL_DOMAIN tempname = tempfile.NamedTemporaryFile(delete=True) zipname = pathlib.Path(tempname.name + '.zip') tempname.close() console.debug('Using temp zip file: {}'.format(zipname)) cwd = os.getcwd() # Copy the lambda files into the zip for filename in lambda_dir.glob('*'): zip.write_to_zip(str(filename), zipname, arcname=filename.name) # Copy the other files that should be included if lambda_config.get('include'): for src in lambda_config['include']: dst = lambda_config['include'][src] src_path, src_file = src.rsplit('/', 1) os.chdir(const.repo_path(src_path)) # Generate dynamic configuration files, as needed if src_file == 'ndingest.git': with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl: # Generate settings.ini file for ndingest. create_ndingest_settings(bosslet_config, tmpl) zip.write_to_zip(src_file, zipname, arcname=dst) os.chdir(cwd) # Currently any Docker CLI compatible container setup can be used (like podman) CONTAINER_CMD = '{EXECUTABLE} run --rm -it --env AWS_* --volume {HOST_DIR}:/var/task/ lambci/lambda:build-{RUNTIME} {CMD}' BUILD_CMD = 'python3 {PREFIX}/build_lambda.py {DOMAIN} {BUCKET}' BUILD_ARGS = { 'DOMAIN': domain, 'BUCKET': bosslet_config.LAMBDA_BUCKET, } # DP NOTE: not sure if this should be in the bosslet_config, as it is more about the local dev # environment instead of the stack's environment. Different maintainer may have different # container commands installed. container_executable = os.environ.get('LAMBDA_BUILD_CONTAINER') lambda_build_server = bosslet_config.LAMBDA_SERVER if lambda_build_server is None: staging_target = pathlib.Path(const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files', 'staging')) if not staging_target.exists(): staging_target.mkdir() console.debug("Copying build zip to {}".format(staging_target)) staging_zip = staging_target / (domain + '.zip') try: zipname.rename(staging_zip) except OSError: # rename only works within the same filesystem # Using the shell version, as using copy + chmod doesn't always work depending on the filesystem utils.run('mv {} {}'.format(zipname, staging_zip), shell=True) # Provide the AWS Region and Credentials (for S3 upload) via environmental variables env_extras = { 'AWS_REGION': bosslet_config.REGION, 'AWS_DEFAULT_REGION': bosslet_config.REGION } if container_executable is None: BUILD_ARGS['PREFIX'] = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files') CMD = BUILD_CMD.format(**BUILD_ARGS) if bosslet_config.PROFILE is not None: env_extras['AWS_PROFILE'] = bosslet_config.PROFILE console.info("calling build lambda on localhost") else: BUILD_ARGS['PREFIX'] = '/var/task' CMD = BUILD_CMD.format(**BUILD_ARGS) CMD = CONTAINER_CMD.format(EXECUTABLE = container_executable, HOST_DIR = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files'), RUNTIME = lambda_config['runtime'], CMD = CMD) if bosslet_config.PROFILE is not None: # Cannot set the profile as the container will not have the credentials file # So extract the underlying keys and provide those instead creds = bosslet_config.session.get_credentials() env_extras['AWS_ACCESS_KEY_ID'] = creds.access_key env_extras['AWS_SECRET_ACCESS_KEY'] = creds.secret_key console.info("calling build lambda in {}".format(container_executable)) try: utils.run(CMD, env_extras=env_extras) except Exception as ex: raise BossManageError("Problem building {} lambda code zip: {}".format(lambda_dir, ex)) finally: os.remove(staging_zip) else: BUILD_ARGS['PREFIX'] = '~' CMD = BUILD_CMD.format(**BUILD_ARGS) lambda_build_server_key = bosslet_config.LAMBDA_SERVER_KEY lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key) ssh_target = SSHTarget(lambda_build_server_key, lambda_build_server, 22, 'ec2-user') bastions = [bosslet_config.outbound_bastion] if bosslet_config.outbound_bastion else [] ssh = SSHConnection(ssh_target, bastions) console.debug("Copying build zip to lambda-build-server") target_file = '~/staging/{}.zip'.format(domain) ret = ssh.scp(zipname, target_file, upload=True) console.debug("scp return code: " + str(ret)) os.remove(zipname) console.info("calling build lambda on lambda-build-server") ret = ssh.cmd(CMD) if ret != 0: raise BossManageError("Problem building {} lambda code zip: Return code: {}".format(lambda_dir, ret))
".", boss_position)[boss_position] bastion = aws.machine_lookup(session, bastion_host) if args.private_ip: private = args.internal else: private = aws.machine_lookup(session, args.internal, public_ip=False) ssh = SSHConnection(args.ssh_key, private, bastion) if args.command in ("ssh", ): ssh.shell() elif args.command in ("scp", ): ret = ssh.scp(*args.arguments) sys.exit(ret) elif args.command in ("ssh-cmd", ): ret = ssh.cmd(*args.arguments) sys.exit(ret) elif args.command in ("ssh-tunnel", ): ssh.external_tunnel(*args.arguments) elif args.command in ("ssh-all", ): addrs = aws.machine_lookup_all(session, args.internal, public_ip=False) for addr in addrs: print("{} at {}".format(args.internal, addr)) ssh = SSHConnection(args.ssh_key, addr, bastion) ssh.cmd(*args.arguments) print() elif args.command in vault.COMMANDS: with vault_tunnel(args.ssh_key, bastion): vault.COMMANDS[args.command](Vault(args.internal, private), *args.arguments) else:
# the bastion server (being an AWS AMI) has a differnt username if args.user is None: user = "******" if args.hostname.startswith("bastion") else "ubuntu" else: user = args.user if args.bosslet_config.outbound_bastion: bastions = [args.bosslet_config.outbound_bastion] else: bastions = [] ssh_key = keypair_to_file( args.key) if args.key else args.bosslet_config.ssh_key ssh_target = SSHTarget(ssh_key, args.ip, 22, user) ssh = SSHConnection(ssh_target, bastions) if args.cmd: ret = ssh.cmd(args.cmd) if args.scp: a, b = args.scp.split() t_a, a = a.split(":") t_b, b = b.split(":") scp_args = { t_a.lower() + '_file': a, t_b.lower() + '_file': b, 'upload': t_a.lower() == "local" } ret = ssh.scp(**scp_args) else: ret = ssh.shell() sys.exit(ret)