def create_host_folders(self): """ Make sure that all configured shared folders exist on the host system. If a folder is missing, create it! """ if self._suppress_shared_folders(): return host_folders = self._get_folder_list( 'edi_current_user_host_home_directory', 'folder') for folder in host_folders: if os.path.exists(folder): if not os.path.isdir(folder): raise FatalError( '''The location '{}' does ''' '''exist on the host system but it is not a folder that ''' '''can be shared to a container.'''.format(folder)) else: logging.debug(( '''The shared folder '{}' on the host system has already been created.''' ).format(folder)) else: cmd = ['mkdir', '-p', folder] # Use the current user (not root) to create the folder! result = run(cmd, check=False, stderr=subprocess.PIPE) if result.returncode != 0: raise FatalError(( '''The creation of the folder '{}' failed with the message '{}'.''' ).format(folder, result.stderr)) else: logging.debug(( '''Successfully created the shared folder '{}' on the host system.''' ).format(folder))
def run(self, project_name, config_template): workdir = os.getcwd() if os.getuid() == 0: raise FatalError('Do not initialize a configuration as root!') if not os.access(workdir, os.W_OK): raise FatalError('''No write access to '{}'.'''.format(workdir)) if os.listdir(workdir): raise FatalError( 'Please initialize your new configuration within an empty folder!' ) source = get_project_tree() copy_tree(source, workdir) template = ConfigurationTemplate(workdir) with open(get_template(config_template), encoding="UTF-8", mode="r") as template_file: t = Template(template_file.read()) template_dict = yaml.load(t.render(get_base_dictionary())).get( 'parameters', {}) template_dict['edi_project_name'] = project_name template_dict["edi_edi_version"] = get_stripped_version( get_edi_version()) template.render(template_dict) print_success( '''Configuration for project '{}' generated in folder '{}'.'''. format(project_name, workdir))
def _get_commands(self): artifactdir = get_artifact_dir() commands = self.config.get_ordered_path_items(self.config_section) augmented_commands = [] artifacts = dict() if self.input_artifact: artifacts['edi_input_artifact'] = self.input_artifact for name, path, dictionary, raw_node in commands: output = raw_node.get('output') if type(output) != dict: raise FatalError(('''The output specification in command node '{}' is not a key value dictionary.''' ).format(name)) new_artifacts = dict() for artifact_key, artifact_item in output.items(): if str(artifact_item) != os.path.basename(artifact_item): raise FatalError((('''The specified output artifact '{}' within the ''' '''command node '{}' is invalid.\n''' '''The output shall be a file or a folder (no '/' in string).''') ).format(artifact_key, name)) artifact_path = os.path.join(artifactdir, artifact_item) new_artifacts[artifact_key] = str(artifact_path) artifacts.update(new_artifacts) dictionary.update(artifacts) filename, content = self._render_command_file(path, dictionary) augmented_commands.append((filename, content, name, path, dictionary, raw_node, new_artifacts)) return augmented_commands
def verify_container_mountpoints(self, container_name): """ Verify that all mount points exist within the target system. If a target mount point is missing, raise a fatal error. Hint: It is assumed that the mount points within the target get created during the configuration phase. """ if self._suppress_shared_folders(): return test_cmd = ['lxc', 'exec', container_name, '--', 'true'] result = run(test_cmd, check=False, stderr=subprocess.PIPE) if result.returncode != 0: raise FatalError(( '''The communicate with the container '{}' failed with the message '{}'.''' ).format(container_name, result.stderr)) mountpoints = self.get_mountpoints() for mountpoint in mountpoints: cmd = [ 'lxc', 'exec', container_name, '--', 'test', '-d', mountpoint ] if run(cmd, check=False).returncode != 0: raise FatalError(( '''Please make sure that '{}' is valid mount point in the container '{}'.\n''' '''Hint: Use an appropriate playbook that generates those mount points\n''' ''' by using the variable 'edi_shared_folder_mountpoints'.''' ).format(mountpoint, container_name))
def _get_mandatory_item(folder_name, folder_config, item): result = folder_config.get(item, None) if not result: raise FatalError('''Missing mandatory item '{}' in shared folder '{}'.'''.format(item, folder_name)) if '/' in result: raise FatalError(('''The item '{}' in shared folder '{}' must not contain sub folders.''' ).format(item, folder_name)) return result
def __init__(self, repository=None, repository_key=None, architectures=None): if not repository: raise FatalError('''Missing argument 'repository'.''') if not architectures: raise FatalError('''Missing (non empty) list 'architectures'.''') self._repository = repository self._repository_key = repository_key self._architectures = architectures self._source = SourceEntry(repository) self._source.uri = self._source.uri.rstrip('/') self._compressions = ['gz', 'bz2', 'xz'] self._checksum_algorithms = ['SHA512', 'SHA256'] # strongest first
def _run(self): if os.path.isfile(self._result()): logging.info( ("{0} is already there. " "Delete it to regenerate it.").format(self._result())) return self._result() self._require_sudo() qemu_executable = Fetch().run(self.config.get_base_config_file()) print("Going to bootstrap initial image - be patient.") if self.config.get_bootstrap_tool() != "debootstrap": raise FatalError(("At the moment only debootstrap " "is supported for bootstrapping!")) workdir = get_workdir() with tempfile.TemporaryDirectory(dir=workdir) as tempdir: chown_to_user(tempdir) key_data = fetch_repository_key( self.config.get_bootstrap_repository_key()) keyring_file = build_keyring(tempdir, "temp_keyring.gpg", key_data) rootfs = self._run_debootstrap(tempdir, keyring_file, qemu_executable) self._postprocess_rootfs(rootfs, key_data) archive = self._pack_image(tempdir, rootfs) chown_to_user(archive) create_artifact_dir() shutil.move(archive, self._result()) print_success("Bootstrapped initial image {}.".format(self._result())) return self._result()
def _verify_version_compatibility(self): current_version = get_edi_version() required_version = str(self._get_general_item('edi_required_minimal_edi_version', current_version)) if Version(get_stripped_version(current_version)) < Version(get_stripped_version(required_version)): raise FatalError(('The current configuration requires a newer version of edi (>={}).\n' 'Please update your edi installation!' ).format(get_stripped_version(required_version)))
def _parse_release_file(self, release_file): with open(release_file) as file: main_content = next(debian.deb822.Release.iter_paragraphs(file)) section = None for algoritm in self._checksum_algorithms: section = main_content.get(algoritm) if section: break if not section: raise FatalError(( "No valid section ({}) found in release file downloaded from '{}'." ).format(' or '.join(a for a in self._checksum_algorithms), self._get_release_file_url(''))) packages_filter = [ '{}/binary-{}/Packages.{}'.format(component, architecture, compression) for component in self._source.comps for architecture in self._architectures for compression in self._compressions ] package_files = [ element for element in section if element.get('name') in packages_filter ] return package_files
def _find_package_in_package_files(self, package_name, package_files): downloaded_package_prefix = [] for package_file in package_files: match = re.match('^(.*)Packages\.*([a-z2]{1,3})$', package_file['name']) if not match or not len(match.groups()) <= 2: raise FatalError( 'Error parsing package name string {}.'.format( package_file['name'])) prefix = match.group(1).replace('/', '_') if prefix in downloaded_package_prefix: continue package_url = '{}/dists/{}/{}'.format(self._source.uri, self._source.dist, package_file['name']) package_file_data = self._try_fetch_archive_element(package_url) if package_file_data: self._verify_checksum(package_file_data, package_file) downloaded_package_prefix.append(prefix) decompressed_package_data = decompress(package_file_data) with tempfile.SpooledTemporaryFile() as f: f.write(decompressed_package_data) f.seek(0) for section in debian.deb822.Packages.iter_paragraphs(f): if section['Package'] == package_name: return section return None
def run(self, container_name, config_file): self._setup_parser(config_file) self.container_name = container_name if not is_valid_hostname(container_name): raise FatalError( ("The provided container name '{}' " "is not a valid host name.").format(container_name)) if self._is_container_existing(): logging.info( ("Container {0} is already existing. " "Destroy it to regenerate it or reconfigure it.").format( self._result())) if not self._is_container_running(): logging.info(("Starting existing container {0}.").format( self._result())) self._start_container() print_success("Started container {}.".format(self._result())) else: image = Import().run(config_file) profiles = Profile().run(config_file) print("Going to launch container.") self._launch_container(image, profiles) print_success("Launched container {}.".format(self._result())) return self._result()
def main(): try: cli_interface = _setup_command_line_interface() cli_args = cli_interface.parse_args(sys.argv[1:]) _setup_logging(cli_args) if cli_args.command_name is None: raise FatalError("Missing command. Use 'edi --help' for help.") command_name = "{0}.{1}".format(EdiCommand._get_command_name(), cli_args.command_name) get_command(command_name)().run_cli(cli_args) except FatalError as fatal_error: print_error_and_exit(fatal_error.message) except KeyboardInterrupt: print_error_and_exit("Command interrupted by user.") except CalledProcessError as subprocess_error: print_error_and_exit( "{}\nFor more information increase the log level.".format( subprocess_error)) except requests.exceptions.SSLError as ssl_error: print_error_and_exit( "{}\nPlease verify your ssl/proxy setup.".format(ssl_error)) except requests.exceptions.ConnectionError as connection_error: print_error_and_exit(( "{}\nPlease verify your internet connectivity and the requested url." ).format(connection_error))
def launch_container(image, name, profiles): cmd = ["lxc", "launch", "local:{}".format(image), name] for profile in profiles: cmd.extend(["-p", profile]) result = run(cmd, check=False, stderr=subprocess.PIPE, log_threshold=logging.INFO) if result.returncode != 0: if 'Missing parent' in result.stderr and 'lxdbr0' in result.stderr: raise FatalError(('''Launching image '{}' failed with the following message:\n{}''' 'Please make sure that lxdbr0 is available. Use one of the following commands to ' 'create lxdbr0:\n' 'lxd init\n' 'or (for lxd >= 2.3)\n' 'lxc network create lxdbr0').format(image, result.stderr)) else: raise FatalError(('''Launching image '{}' failed with the following message:\n{}''' ).format(image, result.stderr))
def get_bootstrap_architecture(self): architecture = self._get_bootstrap_item("architecture", None) if not architecture: raise FatalError( '''Missing mandatory element 'architecture' in section 'bootstrap'.''' ) return architecture
def get_bootstrap_repository(self): repository = self._get_bootstrap_item("repository", None) if not repository: raise FatalError( '''Missing mandatory element 'repository' in section 'bootstrap'.''' ) return repository
def _get_replacements(parameters): replacements = parameters.get('edi_doc_replacements', []) if not isinstance(replacements, list): raise FatalError( "'edi_doc_replacements' should contain a list of replacement instructions." ) return replacements
def _get_package_value(key, package_dict): value = package_dict.get(key) if not value: raise FatalError( "Missing '{}' key in dictionary of package ({}).".format( key, package_dict)) return value
def _post_process_artifacts(command_name, expected_artifacts): for _, artifact in expected_artifacts.items(): if not os.path.isfile(artifact) and not os.path.isdir(artifact): raise FatalError(('''The command '{}' did not generate ''' '''the specified output artifact '{}'.'''.format(command_name, artifact))) elif os.path.isfile(artifact): chown_to_user(artifact)
def _verify_signature(self, homedir, keyring, signed_file, detached_signature=None): cmd = ['gpg'] cmd.extend(['--homedir', homedir]) cmd.extend(['--weak-digest', 'SHA1']) cmd.extend(['--weak-digest', 'RIPEMD160']) cmd.extend(['--no-default-keyring', '--keyring', keyring]) cmd.extend(['--status-fd', '1']) cmd.append('--verify') if detached_signature: cmd.append(detached_signature) cmd.append(signed_file) with gpg_agent(str(homedir)): output = subprocess.run(cmd, input=None, timeout=None, check=False, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) logging.info(output.stdout) goodsig = re.search(r'^\[GNUPG:\] GOODSIG', output.stdout, re.MULTILINE) validsig = re.search(r'^\[GNUPG:\] VALIDSIG', output.stdout, re.MULTILINE) if goodsig and validsig: logging.info('Signature check ok!') else: if detached_signature: release_file_url = self._get_release_file_url('Release') else: release_file_url = self._get_release_file_url('InRelease') if output.stderr: raise FatalError( ("Signature check for '{}' failed with error message '{}'!" ).format(release_file_url, output.stderr)) else: raise FatalError("Signature check for '{}' failed!".format( release_file_url))
def download(self, package_name=None, dest='/tmp'): if not package_name: raise FatalError('Missing argument package_name!') with tempfile.TemporaryDirectory() as tempdir: inrelease_data = self._try_fetch_archive_element( self._get_release_file_url('InRelease')) release_file = os.path.join(tempdir, 'InRelease') signature_file = None if inrelease_data: with open(release_file, mode='wb') as f: f.write(inrelease_data) else: release_file = os.path.join(tempdir, 'Release') signature_file = os.path.join(tempdir, 'Release.gpg') release_data = self._fetch_archive_element( self._get_release_file_url('Release')) with open(release_file, mode='wb') as f: f.write(release_data) if self._repository_key: signature_data = self._fetch_archive_element( self._get_release_file_url('Release.gpg')) with open(signature_file, mode='wb') as f: f.write(signature_data) if self._repository_key: key_data = fetch_repository_key(self._repository_key) keyring = build_keyring(tempdir, 'trusted.gpg', key_data) self._verify_signature(tempdir, keyring, release_file, signature_file) else: logging.warning( 'Package {} will get downloaded without verification!'. format(package_name)) package_files = self._parse_release_file(release_file) requested_package = self._find_package_in_package_files( package_name, package_files) if not requested_package: raise FatalError( ("Package '{}' not found in repository '{}'.").format( package_name, self._source.uri)) else: result = self._download_package(requested_package, dest) return result
def _resolve_path(self, path): if os.path.isabs(path): if not os.path.isfile(path): raise FatalError(("'{}' does not exist." ).format(path)) return path else: locations = [self.get_project_plugin_directory(), get_edi_plugin_directory()] for location in locations: abspath = os.path.join(location, path) if os.path.isfile(abspath): return abspath raise FatalError(("'{0}' not found in the " "following locations:\n{1}" ).format(path, "\n".join(locations)))
def check_for_absence_of_output_files(self): for name, _, _, raw_node in self._get_documentation_steps(): file = self._get_output_file(name, raw_node) file_path = os.path.join(self.rendered_output, file) if os.path.exists(file_path): raise FatalError( "Output file '{}' already exists.".format(file_path))
def __new__(cls, clsname, bases, attrs): new_class = super(CommandFactory, cls).__new__(cls, clsname, bases, attrs) if clsname.lower() != _command_anchor: new_key = new_class._get_command_name() if _command_registry.get(new_key): raise FatalError(("A command named '{}' has " "already been registered").format(new_key)) _command_registry[new_key] = new_class return new_class
def _fetch_archive_element_impl(url, check=True): req = requests.get(url) if req.status_code != 200: if check: raise FatalError( ("Unable to fetch archive element '{0}'.").format(url)) else: return None return req.content
def fetch_repository_key(key_url): if key_url: key_req = requests.get(key_url, proxies=ProxySetup().get_requests_dict()) if key_req.status_code != 200: raise FatalError(("Unable to fetch repository key '{0}'" ).format(key_url)) return key_req.text else: return None
def _render_chunk(template_path, context, outfile): try: template_loader = jinja2.FileSystemLoader( searchpath=os.path.dirname(template_path)) environment = jinja2.Environment(loader=template_loader) template_file = os.path.basename(template_path) template = environment.get_template(template_file) except jinja2.TemplateError as te: raise FatalError( "Encountered template error while processing '{}':\n{}".format( template_path, str(te))) try: if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug("Rendering chunk with context:") logging.debug(yaml.dump(context, default_flow_style=False)) outfile.write(template.render(context)) except Exception: raise FatalError("Failed to render '{}':\n{}".format( template_path, traceback.format_exc(limit=-1)))
def get_stripped_version(version): """ Strips the suffixes from the version string. :param version: Version string that needs to be parsed :return: a stripped version string of the format MAJOR[.MINOR[.PATCH]] """ result = re.match('\d+(\.\d+){0,2}', version) if result: return result.group(0) else: raise FatalError('''Unable to parse version '{}'.'''.format(version))
def check(): if LxdVersion._check_done: return if Version(get_stripped_version(get_lxd_version())) < Version( LxdVersion._required_minimal_version): raise FatalError(( 'The current lxd installation ({}) does not meet the minimal requirements (>={}).\n' 'Please update your lxd installation using snaps or xenial-backports!' ).format(get_lxd_version(), LxdVersion._required_minimal_version)) else: LxdVersion._check_done = True
def annotated_yaml_load(stream, context_hint): """ Load a yaml configuration and throw a FatalError containing a hint if the yaml stream can not be parsed. :param stream: A yaml formatted stream. :param context_hint: A hint for the user where the yaml configuration comes from. :return: The content of the yaml as a Python object. """ try: return yaml.load(stream) except yaml.parser.ParserError as e: raise FatalError("Invalid yaml configuration '{}':\n{}".format( context_hint, str(e))) from e
def func_wrapper(*args, **kwargs): if not Executables.has(executable): if not installation_command: installation_hint = 'apt or snap' else: installation_hint = installation_command raise FatalError(("Missing executable '{0}'.\n" "Use e.g. {1} to install it.").format(executable, installation_hint)) elif version_check: version_check() return func(*args, **kwargs)