def compute(file, yaml_file, stack, section, client, bucket, key): t0 = time() img_fn = os.path.join(os.environ['ROOT_DIR'], file) setup_download_from_s3(file, recursive=False) run('python {0}/extractPatches.py {1} {2}'.format(os.environ['REPO_DIR'], img_fn, yaml_file)) params = configuration(yaml_file).getParams() size_thresholds = params['normalization']['size_thresholds'] dot = img_fn.rfind('.') slash = img_fn.rfind('/') local_dir = 'cells/' + img_fn[slash + 1:dot] + '_cells/' for size in size_thresholds: key_item = 'size_of_' + str(size) local_fp = local_dir + str(size) + '.bin' s3_fp = stack + '/cells/' + str(section) + '_cells/' + str( size) + '.bin' def s3_exist(s3_fp): try: report = client.stat_object(bucket, s3_fp) return True except: return False while not s3_exist(s3_fp): setup_upload_from_s3(s3_fp, local_fp, recursive=False) report = client.stat_object(bucket, s3_fp) key[key_item] = int(report.size / 1000) os.remove(os.path.join(os.environ['ROOT_DIR'], local_fp)) print(file, 'finished in', time() - t0, 'seconds') os.remove(img_fn) return key
def prepare(self): """ Prepare directory structure for System C. """ self.mount_tmpfs() if not self.chroot: # Create + mount a disk for QEMU to use disk_path = os.path.join(self.tmp_dir, 'disk.img') self.dev_name = create_disk(disk_path, "msdos", "ext4", '8G') self.rootfs_dir = os.path.join(self.tmp_dir, 'mnt') os.mkdir(self.rootfs_dir) mount(self.dev_name + "p1", self.rootfs_dir, 'ext4') # Use chown to allow executing user to access it run('sudo', 'chown', getpass.getuser(), self.dev_name) run('sudo', 'chown', getpass.getuser(), self.rootfs_dir) else: self.rootfs_dir = self.tmp_dir self.get_packages() copytree(self.sys_dir, self.rootfs_dir, ignore=shutil.ignore_patterns("tmp")) # Unmount tmp/mnt if it exists if not self.chroot: umount(self.rootfs_dir)
def vscode(settings): config_path = settings['extensions'] log.info("Updating Visual Studio Code extensions") # show currently installed extensions cmd = ['code', '--list-extensions'] current_extensions = set( map(str.strip, run(cmd, cap='stdout').splitlines())) expected_extensions = set(map(str.strip, open(config_path))) fmt = lambda s: ', '.join(sorted(s, key=str.lower)) log.debug(f"Current extensions are: {fmt(current_extensions)}") log.debug(f"Expected extensions are: {fmt(expected_extensions)}") # install any missing extensions missing = expected_extensions - current_extensions for package in sorted(missing): log.info(f"Installing missing package: {package}") run(['code', '--install-extension', package]) # report any extensions that are installed that aren't in source control unexpected = current_extensions - expected_extensions if unexpected: log.info( f"The following extensions are installed but not in source control: {fmt(unexpected)}" )
def __del__(self): if not self.preserve_tmp: if not self.chroot: print(f"Deleting {self.dev_name}") run('sudo', 'losetup', '-d', self.dev_name) print(f"Unmounting tmpfs from {self.tmp_dir}") umount(self.tmp_dir) os.rmdir(self.tmp_dir)
def setup_upload_from_s3(rel_fp, recursive=True): s3_fp = 's3://mousebrainatlas-data/' + rel_fp local_fp = os.environ['ROOT_DIR'] + rel_fp if recursive: run('aws s3 cp --recursive {0} {1}'.format(local_fp, s3_fp)) else: run('aws s3 cp {0} {1}'.format(local_fp, s3_fp))
def test_run_call_cmd(): cmd = ['echo', 'hello'] with patch('lib.utils.subprocess.run') as run: utils.run(cmd) run.assert_called_with( cmd, check=True, shell=False, stdout=None, stderr=None, executable=None, input=None, cwd=None, env=None )
def test_run_call_shell_output(): cmd = 'echo "hello"' with patch('lib.utils.subprocess.run') as run: utils.run(cmd, cap=True) run.assert_called_with( cmd, check=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable=EXECUTABLE, input=None, cwd=None, env=None )
def test_run_call_cmd_cap_input(): cmd = ['cat'] input = 'hello' with patch('lib.utils.subprocess.run') as run: utils.run(cmd, cap=True, input=input) run.assert_called_with( cmd, check=True, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable=None, input=input.encode(), cwd=None, env=None )
def test_run_call_shell_input(): cmd = 'cat' input = 'hello' with patch('lib.utils.subprocess.run') as run: utils.run(cmd, input=input) run.assert_called_with( cmd, check=True, shell=True, stdout=None, stderr=None, executable=EXECUTABLE, input=input.encode(), cwd=None, env=None )
def bootstrap(args, tmp_dir, initramfs_path): """Kick off bootstrap process.""" print("Bootstrapping %s" % (args.arch)) if args.chroot: find_chroot = """ import shutil print(shutil.which('chroot')) """ chroot_binary = run('sudo', 'python3', '-c', find_chroot, capture_output=True).stdout.decode().strip() init = os.path.join(os.sep, 'bootstrap-seeds', 'POSIX', args.arch, 'kaem-optional-seed') run('sudo', 'env', '-i', 'PATH=/bin', chroot_binary, tmp_dir, init) return if args.minikernel: if os.path.isdir('kritis-linux'): shutil.rmtree('kritis-linux') run('git', 'clone', '--depth', '1', '--branch', 'v0.7', 'https://github.com/bittorf/kritis-linux.git') run('kritis-linux/ci_helper.sh', '--private', '--multi', '1', '--repeat', '1', '--arch', args.arch, '--qemucpu', '486', '--kernel', '3.18.140', '--features', 'kflock,highrestimers', '--ramsize', str(args.qemu_ram) + 'M', '--initrd', initramfs_path, '--log', '/tmp/bootstrap.log') return run(args.qemu_cmd, '-enable-kvm', '-m', str(args.qemu_ram) + 'M', '-nographic', '-no-reboot', '-kernel', args.kernel, '-initrd', initramfs_path, '-append', "console=ttyS0")
def setup_download_from_s3(rel_fp, recursive=True): s3_fp = 's3://mousebrainatlas-data/' + rel_fp local_fp = os.environ['ROOT_DIR'] + rel_fp if os.path.exists(local_fp): print('ALREADY DOWNLOADED FILE') return if recursive: run('aws s3 cp --recursive {0} {1}'.format(s3_fp, local_fp)) else: run('aws s3 cp {0} {1}'.format(s3_fp, local_fp))
def bootstrap(args, system_a, system_b, system_c): """Kick off bootstrap process.""" print(f"Bootstrapping {args.arch} -- SysA") if args.chroot: find_chroot = """ import shutil print(shutil.which('chroot')) """ chroot_binary = run('sudo', 'python3', '-c', find_chroot, capture_output=True).stdout.decode().strip() # sysa arch = stage0_arch_map.get(args.arch, args.arch) init = os.path.join(os.sep, 'bootstrap-seeds', 'POSIX', arch, 'kaem-optional-seed') run('sudo', 'env', '-i', 'PATH=/bin', chroot_binary, system_a.tmp_dir, init) elif args.minikernel: if os.path.isdir('kritis-linux'): shutil.rmtree('kritis-linux') run('git', 'clone', '--depth', '1', '--branch', 'v0.7', 'https://github.com/bittorf/kritis-linux.git') run('kritis-linux/ci_helper.sh', '--private', '--multi', '1', '--repeat', '1', '--arch', args.arch, '--qemucpu', '486', '--kernel', '3.18.140', '--features', 'kflock,highrestimers', # Hack to add -hda /dev/blah '--ramsize', str(args.qemu_ram) + 'M -hda ' + system_b.dev_name, '--initrd', system_a.initramfs_path, '--log', '/tmp/bootstrap.log') elif args.bare_metal: print("Please:") print(" 1. Take sysa/tmp/initramfs and your kernel, boot using this.") print(" 2. Take sysc/tmp/disk.img and put this on a writable storage medium.") else: run(args.qemu_cmd, '-enable-kvm', '-m', str(args.qemu_ram) + 'M', '-no-reboot', '-hda', system_c.dev_name, '-kernel', args.kernel, '-initrd', system_a.initramfs_path, '-nographic', '-append', 'console=ttyS0')
def install_package(name, settings): log.info(f"Installing packages for: {name}") module = globals() if name in module: # if the name matches a function in this module, call it and pass settings log.debug(f"Found package function for {name}") module[name](settings) else: # otherwise, expect a cmd to run cmd = settings['cmd'] log.debug(f"Executing: {cmd}") run(cmd) # post_install should be a list of shell commands passed directly to 'run' post_install = settings.get('post_install') if post_install: log.info("Running post-install operations") for cmd in post_install: run(cmd)
def test_run_shell_output(): output = utils.run('echo "hello"', cap=True) expected_output = 'hello\n' assert output == expected_output
def test_run_shell_input(): output = utils.run(['cat'], cap=True, input='hello') expected_output = 'hello' assert output == expected_output
def load_lambdas_on_s3(bosslet_config, lambda_name = None, lambda_dir = None): """Package up the lambda files and send them through the lambda build process where the lambda code zip is produced and uploaded to S3 NOTE: This function is also used to build lambda layer code zips, the only requirement for a layer is that the files in the resulting zip should be in the correct subdirectory (`python/` for Python libraries) so that when a lambda uses the layer the libraries included in the layer can be correctly loaded NOTE: If lambda_name and lambda_dir are both None then lambda_dir is set to 'multi_lambda' for backwards compatibility Args: bosslet_config (BossConfiguration): Configuration object of the stack the lambda will be deployed into lambda_name (str): Name of the lambda, which will be mapped to the name of the lambda directory that contains the lambda's code lambda_dir (str): Name of the directory in `cloud_formation/lambda/` that contains the `lambda.yml` configuration file for the lambda Raises: BossManageError: If there was a problem with building the lambda code zip or uploading it to the given S3 bucket """ # For backwards compatibility build the multi_lambda code zip if lambda_name is None and lambda_dir is None: lambda_dir = 'multi_lambda' # Map from lambda_name to lambda_dir if needed if lambda_dir is None: try: lambda_dir = lambda_dirs(bosslet_config)[lambda_name] except KeyError: console.error("Cannot build a lambda that doesn't use a code zip file") return None # To prevent rubuilding a lambda code zip multiple times during an individual execution memorize what has been built if lambda_dir in BUILT_ZIPS: console.debug('Lambda code {} has already be build recently, skipping...'.format(lambda_dir)) return BUILT_ZIPS.append(lambda_dir) lambda_dir = pathlib.Path(const.repo_path('cloud_formation', 'lambda', lambda_dir)) lambda_config = lambda_dir / 'lambda.yml' with lambda_config.open() as fh: lambda_config = yaml.full_load(fh.read()) if lambda_config.get('layers'): for layer in lambda_config['layers']: # Layer names should end with `layer` if not layer.endswith('layer'): console.warning("Layer '{}' doesn't conform to naming conventions".format(layer)) load_lambdas_on_s3(bosslet_config, lambda_dir=layer) console.debug("Building {} lambda code zip".format(lambda_dir)) domain = bosslet_config.INTERNAL_DOMAIN tempname = tempfile.NamedTemporaryFile(delete=True) zipname = pathlib.Path(tempname.name + '.zip') tempname.close() console.debug('Using temp zip file: {}'.format(zipname)) cwd = os.getcwd() # Copy the lambda files into the zip for filename in lambda_dir.glob('*'): zip.write_to_zip(str(filename), zipname, arcname=filename.name) # Copy the other files that should be included if lambda_config.get('include'): for src in lambda_config['include']: dst = lambda_config['include'][src] src_path, src_file = src.rsplit('/', 1) os.chdir(const.repo_path(src_path)) # Generate dynamic configuration files, as needed if src_file == 'ndingest.git': with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl: # Generate settings.ini file for ndingest. create_ndingest_settings(bosslet_config, tmpl) zip.write_to_zip(src_file, zipname, arcname=dst) os.chdir(cwd) # Currently any Docker CLI compatible container setup can be used (like podman) CONTAINER_CMD = '{EXECUTABLE} run --rm -it --env AWS_* --volume {HOST_DIR}:/var/task/ lambci/lambda:build-{RUNTIME} {CMD}' BUILD_CMD = 'python3 {PREFIX}/build_lambda.py {DOMAIN} {BUCKET}' BUILD_ARGS = { 'DOMAIN': domain, 'BUCKET': bosslet_config.LAMBDA_BUCKET, } # DP NOTE: not sure if this should be in the bosslet_config, as it is more about the local dev # environment instead of the stack's environment. Different maintainer may have different # container commands installed. container_executable = os.environ.get('LAMBDA_BUILD_CONTAINER') lambda_build_server = bosslet_config.LAMBDA_SERVER if lambda_build_server is None: staging_target = pathlib.Path(const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files', 'staging')) if not staging_target.exists(): staging_target.mkdir() console.debug("Copying build zip to {}".format(staging_target)) staging_zip = staging_target / (domain + '.zip') try: zipname.rename(staging_zip) except OSError: # rename only works within the same filesystem # Using the shell version, as using copy + chmod doesn't always work depending on the filesystem utils.run('mv {} {}'.format(zipname, staging_zip), shell=True) # Provide the AWS Region and Credentials (for S3 upload) via environmental variables env_extras = { 'AWS_REGION': bosslet_config.REGION, 'AWS_DEFAULT_REGION': bosslet_config.REGION } if container_executable is None: BUILD_ARGS['PREFIX'] = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files') CMD = BUILD_CMD.format(**BUILD_ARGS) if bosslet_config.PROFILE is not None: env_extras['AWS_PROFILE'] = bosslet_config.PROFILE console.info("calling build lambda on localhost") else: BUILD_ARGS['PREFIX'] = '/var/task' CMD = BUILD_CMD.format(**BUILD_ARGS) CMD = CONTAINER_CMD.format(EXECUTABLE = container_executable, HOST_DIR = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files'), RUNTIME = lambda_config['runtime'], CMD = CMD) if bosslet_config.PROFILE is not None: # Cannot set the profile as the container will not have the credentials file # So extract the underlying keys and provide those instead creds = bosslet_config.session.get_credentials() env_extras['AWS_ACCESS_KEY_ID'] = creds.access_key env_extras['AWS_SECRET_ACCESS_KEY'] = creds.secret_key console.info("calling build lambda in {}".format(container_executable)) try: utils.run(CMD, env_extras=env_extras) except Exception as ex: raise BossManageError("Problem building {} lambda code zip: {}".format(lambda_dir, ex)) finally: os.remove(staging_zip) else: BUILD_ARGS['PREFIX'] = '~' CMD = BUILD_CMD.format(**BUILD_ARGS) lambda_build_server_key = bosslet_config.LAMBDA_SERVER_KEY lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key) ssh_target = SSHTarget(lambda_build_server_key, lambda_build_server, 22, 'ec2-user') bastions = [bosslet_config.outbound_bastion] if bosslet_config.outbound_bastion else [] ssh = SSHConnection(ssh_target, bastions) console.debug("Copying build zip to lambda-build-server") target_file = '~/staging/{}.zip'.format(domain) ret = ssh.scp(zipname, target_file, upload=True) console.debug("scp return code: " + str(ret)) os.remove(zipname) console.info("calling build lambda on lambda-build-server") ret = ssh.cmd(CMD) if ret != 0: raise BossManageError("Problem building {} lambda code zip: Return code: {}".format(lambda_dir, ret))
os.remove(img_fn) return key if __name__ == "__main__": import argparse from time import time parser = argparse.ArgumentParser() parser.add_argument("--yaml", type=str, default=os.environ['REPO_DIR'] + 'shape_params.yaml', help="Path to Yaml file with parameters") parser.add_argument("image_dir", type=str, help="Path to directory saving images") parser.add_argument("--save_dir", type=str, default=os.path.join(os.environ['ROOT_DIR'], 'cells/'), help="Path to directory saving images") args = parser.parse_args() yamlfile = args.yaml img_dir = args.image_dir cell_dir = args.save_dir t0 = time() for img in glob(img_dir + '/*'): run('python {0}/extractPatches.py {1} {2} --save_dir {3}'.format( os.environ['REPO_DIR'], img, yamlfile, cell_dir)) print('Cell extraction finished in', time() - t0, 'seconds')