def cmd_build(args): LOG.info('build: %s %s', args.rule, args.version) scripts.run([ REPO_ROOT_PATH / 'shipyard2' / 'scripts' / 'foreman.sh', 'build', *(('--debug', ) if shipyard2.is_debug() else ()), *_read_args_file(args.args_file or ()), *('--parameter', '//bases:inside-builder-pod=false'), *( '--parameter', '//%s:%s=%s' % ( args.rule.path, args.rule.name.with_name('version'), args.version, ), ), args.rule, ]) if args.also_release: if _look_like_pod_rule(args.rule): release = _get_envs_dir(args).release_pod elif _look_like_xar_rule(args.rule): release = _get_envs_dir(args).release_xar else: ASSERT.predicate(args.rule, _look_like_image_rule) release = None if release: label = _guess_label_from_rule(args.rule) LOG.info('release: %s %s to %s', label, args.version, args.env) release(args.env, label, args.version) return 0
def uninstall(self): if not self.metadata_path.exists(): LOG.info('skip: pods uninstall: metadata was removed') ASSERT.predicate(self.path, g1.files.is_empty_dir) return False log_args = (self.label, self.version) LOG.debug('pods uninstall: systemd units: %s %s', *log_args) for config in self.metadata.systemd_unit_configs: systemds.uninstall(config) systemds.daemon_reload() LOG.debug('pods uninstall: pods: %s %s', *log_args) g1.files.remove(self.refs_dir_path) for pod_id in self._get_pod_id_set(self.metadata): ctr_scripts.ctr_remove_pod(pod_id) LOG.debug('pods uninstall: tokens: %s %s', *log_args) with tokens.make_tokens_database().writing() as active_tokens: for config in self.metadata.systemd_unit_configs: active_tokens.unassign_all(config.pod_id) LOG.debug('pods uninstall: images: %s %s', *log_args) for image in self.metadata.images: ctr_scripts.ctr_remove_image(image, skip_active=True) LOG.debug('pods uninstall: volumes: %s %s', *log_args) g1.files.remove(self.volumes_dir_path) LOG.debug('pods uninstall: metadata: %s %s', *log_args) g1.files.remove(self.metadata_path) # Remove metadata last. ASSERT.predicate(self.path, g1.files.is_empty_dir) return True
def _fixup(parameters, src_path): """Fix up installed paths. Custom-built Python sometimes creates pythonX.Ym rather than pythonX.Y header directory (same for libpython). """ del src_path # Unused. header_dir_path = _add_version(parameters, 'include/python{}.{}') if not header_dir_path.exists(): alt_header_dir_path = ASSERT.predicate( _add_version(parameters, 'include/python{}.{}m'), Path.is_dir, ) LOG.info('symlink cpython headers') with scripts.using_sudo(): scripts.ln(alt_header_dir_path.name, header_dir_path) libpython_path = _add_version(parameters, 'lib/libpython{}.{}.so') if parameters['shared'] and not libpython_path.exists(): alt_libpython_path = ASSERT.predicate( _add_version(parameters, 'lib/libpython{}.{}m.so'), Path.is_file, ) LOG.info('symlink cpython library') with scripts.using_sudo(): scripts.ln(alt_libpython_path.name, libpython_path)
def cmd_build_image(name, version, rootfs_path, output_path): # Although root privilege is not required, most likely you need it # to finish this. ASSERT.predicate(rootfs_path, Path.is_dir) build_image( ImageMetadata(name=name, version=version), lambda dst_path: bases.rsync_copy(rootfs_path, dst_path), output_path, )
def __post_init__(self): ASSERT.not_empty(self.sources) for i, source in enumerate(self.sources): # Empty source path means host's /var/tmp. if source: ASSERT.predicate(Path(source), Path.is_absolute) else: ASSERT.equal(i, len(self.sources) - 1) ASSERT.predicate(Path(self.target), Path.is_absolute)
def git_clone(parameters): repo_path = parameters['//bases:drydock'] / foreman.get_relpath() repo_path /= repo_path.name git_dir_path = repo_path / '.git' if git_dir_path.is_dir(): LOG.info('skip: git clone: %s', repo_url) return LOG.info('git clone: %s', repo_url) scripts.git_clone(repo_url, repo_path=repo_path, treeish=treeish) ASSERT.predicate(git_dir_path, Path.is_dir)
def using_ops_dir(self, label, version): ops_dir_path = self._get_ops_dir_path(label, version) with locks.acquiring_shared(self.active_dir_path): ASSERT.predicate(ops_dir_path, Path.is_dir) ops_dir_lock = locks.acquire_exclusive(ops_dir_path) try: yield self.ops_dir_type(ops_dir_path) finally: ops_dir_lock.release() ops_dir_lock.close()
def __init__(self, top_path, path): ASSERT.predicate(path, Path.is_dir) for name, predicate in ( (shipyard2.POD_DIR_RELEASE_METADATA_FILENAME, Path.is_file), (shipyard2.POD_DIR_DEPLOY_INSTRUCTION_FILENAME, Path.is_file), (shipyard2.POD_DIR_IMAGES_DIR_NAME, Path.is_dir), (shipyard2.POD_DIR_VOLUMES_DIR_NAME, Path.is_dir), ): ASSERT.predicate(path / name, predicate) super().__init__(top_path, path)
def cmd_unassign(args): oses.assert_root_privilege() # You can only unassign a token from a removed pod (in this case, we # treat the host as removed). active_pod_ids = tokens.load_active_pod_ids(pod_ops_dirs.make_ops_dirs()) active_pod_ids.remove(ctr_models.read_host_pod_id()) ASSERT.not_in(args.pod_id, active_pod_ids) with tokens.make_tokens_database().writing() as active_tokens: ASSERT.predicate(args.token_name, active_tokens.has_definition) active_tokens.unassign(args.token_name, args.pod_id, args.name) return 0
def cmd_assign(args): oses.assert_root_privilege() ASSERT.in_( args.pod_id, tokens.load_active_pod_ids(pod_ops_dirs.make_ops_dirs()), ) with tokens.make_tokens_database().writing() as active_tokens: ASSERT.predicate(args.token_name, active_tokens.has_definition) active_tokens.assign(args.token_name, args.pod_id, args.name, args.value) return 0
def extract(parameters): archive = parameters[parameter_archive] archive_path = _archive_get_archive_path(parameters, archive) output_path = _archive_get_output_path(parameters, archive) if output_path.exists(): LOG.info('skip: extract archive: %s', archive_path) return LOG.info('extract archive: %s', archive_path) scripts.mkdir(output_path.parent) scripts.extract(archive_path, directory=output_path.parent) ASSERT.predicate(output_path, Path.is_dir)
def make_relative_symlink(target_path, link_path): # TODO: We require both paths being absolute for now as I am not # sure whether os.path.relpath will work correctly. target_path = ASSERT.predicate(Path(target_path), Path.is_absolute) link_path = ASSERT.predicate(Path(link_path), Path.is_absolute) # Use os.path.relpath because Path.relative_to cannot derive this # type of relative path. target_relpath = os.path.relpath(target_path, link_path.parent) mkdir(link_path.parent) with bases.using_cwd(link_path.parent): ln(target_relpath, link_path.name)
def function_caller(queue): """Actor that interprets messages as function calls.""" LOG.debug('start') while True: try: call = ASSERT.isinstance(queue.get(), MethodCall) except queues.Closed: break with call.future.catching_exception(reraise=False): ASSERT.predicate(call.method, callable) call.future.set_result(call.method(*call.args, **call.kwargs)) del call LOG.debug('exit')
def unassign(self, token_name, pod_id, name): """Unassign a token from a pod. NOTE: For now we do not guarantee uniqueness among assignment names, and this method will unassign **all** matched assignment names. """ ASSERT.predicate(token_name, self.has_definition) ctr_models.validate_pod_id(pod_id) self._remove( 'unassign', lambda t, a: (t == token_name and a.pod_id == pod_id and a.name == name), )
def _create_xar_dir(xar_dir_path, image_id, exec_relpath): LOG.info('create xar: %s', xar_dir_path) xar_name = _get_name(xar_dir_path) try: bases.make_dir(xar_dir_path, 0o750, bases.chown_app) bases.make_dir(_get_deps_path(xar_dir_path), 0o750, bases.chown_app) _add_ref_image_id(xar_dir_path, image_id) exec_path = _get_exec_path(xar_dir_path) exec_path.symlink_to(_get_exec_target(image_id, exec_relpath)) ASSERT.predicate(exec_path, Path.exists) _create_xar_runner_script(xar_name) except: _remove_xar_dir(xar_dir_path) raise
def post_init(self): ASSERT.predicate(self.path, Path.is_dir) ASSERT.predicate(self.deploy_instruction_path, Path.is_file) if self.deploy_instruction.is_zipapp(): ASSERT.predicate(self.zipapp_path, Path.is_file) else: ASSERT.predicate(self.image_path, Path.is_file)
def __init__(self, top_path, path): ASSERT.predicate(path, Path.is_dir) for name, predicate in ( (shipyard2.XAR_DIR_RELEASE_METADATA_FILENAME, Path.is_file), (shipyard2.XAR_DIR_DEPLOY_INSTRUCTION_FILENAME, Path.is_file), ): ASSERT.predicate(path / name, predicate) ASSERT.any( ( path / shipyard2.XAR_DIR_IMAGE_FILENAME, path / shipyard2.XAR_DIR_ZIPAPP_FILENAME, ), Path.is_file, ) super().__init__(top_path, path)
def cmd_export_overlay(pod_id, output_path, filter_patterns, *, debug=False): oses.assert_root_privilege() ASSERT.not_predicate(output_path, g1.files.lexists) # Exclude pod-generated files. # TODO: Right now we hard-code the list, but this is fragile. filter_args = [ '--exclude=/etc/machine-id', '--exclude=/var/lib/dbus/machine-id', '--exclude=/etc/hostname', '--exclude=/etc/hosts', '--exclude=/etc/systemd', '--exclude=/etc/.pwd.lock', '--exclude=/etc/mtab', # Remove distro unit files. '--exclude=/etc/systemd/system', '--exclude=/lib/systemd/system', '--exclude=/usr/lib/systemd/system', ] filter_args.extend('--%s=%s' % pair for pair in filter_patterns) if debug: # Log which files are included/excluded due to filter rules. filter_args.append('--debug=FILTER2') with locks.acquiring_exclusive(_get_active_path()): pod_dir_path = ASSERT.predicate(_get_pod_dir_path(pod_id), Path.is_dir) pod_dir_lock = ASSERT.true(locks.try_acquire_exclusive(pod_dir_path)) try: upper_path = _get_upper_path(pod_dir_path) bases.rsync_copy(upper_path, output_path, filter_args) finally: pod_dir_lock.release() pod_dir_lock.close()
def add_ref(image_id, dst_path): os.link( ASSERT.predicate( _get_metadata_path(get_image_dir_path(image_id)), Path.is_file ), dst_path, )
def uninstall(self): if not self.metadata_path.exists(): LOG.info('skip: xars uninstall: metadata was removed') ASSERT.predicate(self.path, g1.files.is_empty_dir) return False log_args = (self.label, self.version) if self.metadata.is_zipapp(): LOG.info('xars uninstall: zipapp: %s %s', *log_args) g1.files.remove(self.zipapp_target_path) else: LOG.info('xars uninstall: xar: %s %s', *log_args) ctr_scripts.ctr_uninstall_xar(self.metadata.name) ctr_scripts.ctr_remove_image(self.metadata.image) g1.files.remove(self.metadata_path) # Remove metadata last. ASSERT.predicate(self.path, g1.files.is_empty_dir) return True
def assign(self, token_name, pod_id, name, value=None): """Assign a token to a pod.""" ASSERT.predicate(token_name, self.has_definition) ctr_models.validate_pod_id(pod_id) definition = self.definitions[token_name] assignments = self.assignments.setdefault(token_name, []) assigned_values = [a.value for a in assignments] if value is None: value = definition.next_available(assigned_values) else: assigned_values.append(value) definition.validate_assigned_values(assigned_values) assignment = self.Assignment(pod_id=pod_id, name=name, value=value) assignments.append(assignment) LOG.info('tokens assign: %s %r', token_name, assignment) return value
def download(parameters): archive = parameters[parameter_archive] archive_path = _archive_get_archive_path(parameters, archive) if archive_path.exists(): LOG.info('skip: download archive: %s', archive.url) return LOG.info('download archive: %s', archive.url) scripts.mkdir(archive_path.parent) scripts.wget( archive.url, output_path=archive_path, headers=wget_headers, ) ASSERT.predicate(archive_path, Path.is_file) if archive.checksum: scripts.validate_checksum(archive_path, archive.checksum)
def cmd_add_ref(pod_id, target_path): oses.assert_root_privilege() with locks.acquiring_shared(_get_active_path()): _add_ref( ASSERT.predicate(_get_pod_dir_path(pod_id), Path.is_dir), ASSERT.not_predicate(target_path, g1.files.lexists), ) return 0
def cmd_exec(xar_name, xar_args): # Don't need root privilege here. with locks.acquiring_shared(_get_xars_repo_path()): xar_dir_path = ASSERT.predicate(_get_xar_dir_path(xar_name), Path.is_dir) exec_abspath = ASSERT.predicate(_get_exec_path(xar_dir_path), Path.exists).resolve() lock = locks.FileLock( _get_ref_path(xar_dir_path, _get_image_id(exec_abspath)), close_on_exec=False, ) lock.acquire_shared() # TODO: Or should argv[0] be exec_abspath.name? argv = [xar_name] argv.extend(xar_args) LOG.debug('exec: path=%s, argv=%s', exec_abspath, argv) os.execv(str(exec_abspath), argv)
def cmd_run_prepared(pod_id, *, debug=False): oses.assert_root_privilege() pod_dir_path = ASSERT.predicate(_get_pod_dir_path(pod_id), Path.is_dir) _lock_pod_dir_for_exec(pod_dir_path) if g1.files.is_empty_dir(_get_rootfs_path(pod_dir_path)): LOG.warning('overlay is not mounted; system probably rebooted') _mount_overlay(pod_dir_path, _read_config(pod_dir_path)) _run_pod(pod_id, debug=debug)
def build(parameters): config_data = json.loads( ASSERT.predicate(_get_config_path(parameters), Path.is_file)\ .read_text() ) src_path = _get_src_path(parameters) with scripts.using_cwd(src_path): _build(parameters, src_path, config_data) _install()
def __init__(self, status, message, headers=None, content=b''): super().__init__(message) self.status = ASSERT.in_range(_cast_status(status), (300, 600)) self.headers = ASSERT.predicate( dict(headers) if headers is not None else {}, lambda hdrs: all( isinstance(k, str) and isinstance(v, str) for k, v in hdrs.items()), ) self.content = ASSERT.isinstance(content, bytes)
def generate_unit_file(root_path, pod_name, pod_version, app): LOG.info('create unit file: %s', app.name) pod_etc_path = ASSERT.predicate(_get_pod_etc_path(root_path), Path.is_dir) ASSERT.not_predicate( _get_pod_unit_path(pod_etc_path, app), g1.files.lexists, ).write_text(_generate_unit_file_content(pod_name, pod_version, app)) ASSERT.not_predicate( _get_pod_wants_path(pod_etc_path, app), g1.files.lexists, ).symlink_to(Path('..') / _get_pod_unit_filename(app))
def _get_var_path(name): with scripts.doing_capture_stdout(): proc = scripts.run([ 'pkg-config', '--variable=%s' % name, 'capnp', ]) return ASSERT.predicate( Path(proc.stdout.decode('utf-8').strip()), Path.is_dir, )
def setup(parameters): src_path = ASSERT.predicate( _find_project(parameters, foreman.get_relpath()), _is_root_project, ) if (src_path / 'gradlew').exists(): LOG.info('skip: generate gradle wrapper') return LOG.info('generate gradle wrapper') with scripts.using_cwd(src_path): scripts.run(['gradle', 'wrapper'])