def _start_container(self): cmd = [] cmd.append("lxc") cmd.append("start") cmd.append(self._result()) run(cmd)
def _delete_image(self): cmd = [] cmd.append("lxc") cmd.append("image") cmd.append("delete") cmd.append("local:{}".format(self._result())) run(cmd)
def test_target_configure(config_files, monkeypatch, capsys): def fakerun(*popenargs, **kwargs): if get_command(popenargs) == "ansible-playbook": return subprocess.CompletedProcess("fakerun", 0, '') else: print('Passthrough: {}'.format(get_command(popenargs))) return subprocess.run(*popenargs, **kwargs) monkeypatch.setattr(mockablerun, 'run_mockable', fakerun) suppress_chown_during_debuild(monkeypatch) with workspace(): edi_exec = os.path.join(get_project_root(), 'bin', 'edi') project_name = 'pytest-{}'.format(get_random_string(6)) config_command = [ edi_exec, 'config', 'init', project_name, 'debian-jessie-amd64' ] run(config_command) # run as non root parser = edi._setup_command_line_interface() cli_args = parser.parse_args([ 'target', 'configure', 'remote-target', '{}-develop.yml'.format(project_name) ]) Configure().run_cli(cli_args) out, err = capsys.readouterr() print(out) assert not err or 'is shallow and may cause errors' in err
def verify_container_mountpoints(self, container_name): """ Verify that all mount points exist within the target system. If a target mount point is missing, raise a fatal error. Hint: It is assumed that the mount points within the target get created during the configuration phase. """ if self._suppress_shared_folders(): return test_cmd = ['lxc', 'exec', container_name, '--', 'true'] result = run(test_cmd, check=False, stderr=subprocess.PIPE) if result.returncode != 0: raise FatalError(( '''The communicate with the container '{}' failed with the message '{}'.''' ).format(container_name, result.stderr)) mountpoints = self.get_mountpoints() for mountpoint in mountpoints: cmd = [ 'lxc', 'exec', container_name, '--', 'test', '-d', mountpoint ] if run(cmd, check=False).returncode != 0: raise FatalError(( '''Please make sure that '{}' is valid mount point in the container '{}'.\n''' '''Hint: Use an appropriate playbook that generates those mount points\n''' ''' by using the variable 'edi_shared_folder_mountpoints'.''' ).format(mountpoint, container_name))
def _launch_container(self, image, profiles): cmd = [] cmd.append("lxc") cmd.append("launch") cmd.append("local:{}".format(image)) cmd.append(self._result()) for profile in profiles: cmd.extend(["-p", profile]) run(cmd)
def _import_image(self, image): cmd = [] cmd.append("lxc") cmd.append("image") cmd.append("import") cmd.append(image) cmd.append("local:") cmd.extend(["--alias", self._result()]) run(cmd)
def test_is_bridge_available(): bridge_name = "edibrtesttest42" assert not is_bridge_available(bridge_name) create_bridge(bridge_name) assert is_bridge_available(bridge_name) with pytest.raises(CalledProcessError): create_bridge(bridge_name) cmd = [lxc_exec(), "network", "delete", bridge_name] run(cmd) assert not is_bridge_available(bridge_name)
def _unpack_image(self, image, tempdir, subfolder="rootfs"): target_folder = os.path.join(tempdir, subfolder) os.makedirs(target_folder, exist_ok=True) cmd = [] cmd.append("tar") cmd.append("--numeric-owner") cmd.extend(["-C", target_folder]) cmd.extend(["-axf", image]) run(cmd, sudo=True, log_threshold=logging.INFO) return target_folder
def stop_container(name, timeout=120): cmd = ["lxc", "stop", name] try: run(cmd, log_threshold=logging.INFO, timeout=timeout) except subprocess.TimeoutExpired: logging.warning( ("Timeout ({} seconds) expired while stopping container {}.\n" "Forcing container shutdown!").format(timeout, name)) cmd = ["lxc", "stop", "-f", name] run(cmd, log_threshold=logging.INFO)
def test_create_jessie_image(capsys): print(os.getcwd()) with workspace(): edi_exec = os.path.join(get_project_root(), 'bin', 'edi') project_name = 'pytest-{}'.format(get_random_string(6)) config_command = [edi_exec, 'config', 'init', project_name, 'debian-jessie-amd64'] run(config_command) # run as non root parser = edi._setup_command_line_interface() cli_args = parser.parse_args(['image', 'create', '{}-develop.yml'.format(project_name)]) Create().run_cli(cli_args) out, err = capsys.readouterr() print(out) assert not err lxc_compression_algo = get_server_image_compression_algorithm() lxc_export_extension = get_file_extension_from_image_compression_algorithm(lxc_compression_algo) images = [ os.path.join(get_artifact_dir(), '{}-develop_edicommand_image_bootstrap_di.tar.gz'.format(project_name)), os.path.join(get_artifact_dir(), '{}-develop_edicommand_image_lxc_di.tar.gz'.format(project_name)), os.path.join(get_artifact_dir(), '{}-develop_edicommand_lxc_export{}'.format(project_name, lxc_export_extension)), os.path.join(get_artifact_dir(), '{}-develop.result'.format(project_name)), ] for image in images: assert os.path.isfile(image) image_store_items = [ "{}-develop_edicommand_lxc_import_di".format(project_name), "{}-develop_edicommand_lxc_publish".format(project_name) ] lxc_image_list_cmd = ['lxc', 'image', 'list'] result = run(lxc_image_list_cmd, stdout=subprocess.PIPE) for image_store_item in image_store_items: assert image_store_item in result.stdout parser = edi._setup_command_line_interface() cli_args = parser.parse_args(['image', 'create', '--clean', '{}-develop.yml'.format(project_name)]) Create().run_cli(cli_args) parser = edi._setup_command_line_interface() cli_args = parser.parse_args(['image', 'create', '--recursive-clean', '8', '{}-develop.yml'.format(project_name)]) Create().run_cli(cli_args) for image in images: assert not os.path.isfile(image) result = run(lxc_image_list_cmd, stdout=subprocess.PIPE) for image_store_item in image_store_items: assert image_store_item not in result.stdout
def test_artifacts_folder_removal(monkeypatch): suppress_chown_during_debuild(monkeypatch) with workspace() as workdir: create_artifact_dir() artifacts_dir = get_artifact_dir() assert str(workdir) in str(artifacts_dir) random_dir_name = get_random_string(20) abs_dir_name = os.path.join(artifacts_dir, random_dir_name) run(['mkdir', '-p', abs_dir_name]) assert os.path.isdir(abs_dir_name) safely_remove_artifacts_folder(abs_dir_name) assert not os.path.isdir(abs_dir_name)
def _pack_image(self, tempdir, datadir, name="result"): # advanced options such as numeric-owner are not supported by # python tarfile library - therefore we use the tar command line tool tempresult = "{0}.tar.{1}".format(name, self.config.get_compression()) archive_path = os.path.join(tempdir, tempresult) cmd = [] cmd.append("tar") cmd.append("--numeric-owner") cmd.extend(["-C", datadir]) cmd.extend(["-acf", archive_path]) cmd.extend(os.listdir(datadir)) run(cmd, sudo=True, log_threshold=logging.INFO) return archive_path
def verify_shared_folder(container_name): base_dict = get_base_dictionary() random_file = get_random_string(20) workspace_folder = 'edi-workspace' cmd = [lxc_exec(), 'exec', container_name, '--', 'sudo', '-u', base_dict.get('edi_current_user_name'), 'touch', os.path.join(base_dict.get('edi_current_user_target_home_directory'), workspace_folder, random_file)] run(cmd) shared_file = os.path.join(base_dict.get('edi_current_user_host_home_directory'), workspace_folder, random_file) stat = os.stat(shared_file) assert stat.st_gid == base_dict.get('edi_current_user_gid') assert stat.st_uid == base_dict.get('edi_current_user_uid') os.remove(shared_file)
def get_server_image_compression_algorithm(): cmd = ['lxc', 'config', 'get', 'images.compression_algorithm'] algorithm = run(cmd, stdout=subprocess.PIPE).stdout.strip('\n') if not algorithm: return 'gzip' else: return algorithm
def get_user_ssh_pub_keys(): """ Search for all ssh public keys of the current user (not the root user when called with sudo). :return: A list of ssh public keys. The list will be empty if the tool ssh is not installed. """ if not edi.lib.helpers.which('ssh'): return [] random_host = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(10)) cmd = ['ssh', '-G', random_host] ssh_config = run(cmd, stdout=subprocess.PIPE).stdout user_home = get_user_home_directory(edi.lib.helpers.get_user()) identity_files = re.findall(r'^identityfile (.*)$', ssh_config, flags=re.MULTILINE) ssh_pub_keys = [] for file in identity_files: expanded_file = re.sub(r'^~', user_home, file) expanded_pub_file = '{}.pub'.format(expanded_file) if os.path.isfile(expanded_file) and os.path.isfile(expanded_pub_file): ssh_pub_keys.append(expanded_pub_file) return ssh_pub_keys
def create_host_folders(self): """ Make sure that all configured shared folders exist on the host system. If a folder is missing, create it! """ if self._suppress_shared_folders(): return host_folders = self._get_folder_list( 'edi_current_user_host_home_directory', 'folder') for folder in host_folders: if os.path.exists(folder): if not os.path.isdir(folder): raise FatalError( '''The location '{}' does ''' '''exist on the host system but it is not a folder that ''' '''can be shared to a container.'''.format(folder)) else: logging.debug(( '''The shared folder '{}' on the host system has already been created.''' ).format(folder)) else: cmd = ['mkdir', '-p', folder] # Use the current user (not root) to create the folder! result = run(cmd, check=False, stderr=subprocess.PIPE) if result.returncode != 0: raise FatalError(( '''The creation of the folder '{}' failed with the message '{}'.''' ).format(folder, result.stderr)) else: logging.debug(( '''Successfully created the shared folder '{}' on the host system.''' ).format(folder))
def _is_container_existing(self): cmd = [] cmd.append("lxc") cmd.append("info") cmd.append(self._result()) result = run(cmd, check=False, stderr=subprocess.PIPE) return result.returncode == 0
def get_container_ip_addr(container_name, interface): cmd = [ lxc_exec(), 'exec', container_name, '--', 'ip', '-4', 'addr', 'show', interface ] raw_ip_result = run(cmd, stdout=subprocess.PIPE).stdout return re.findall(r'^\s*inet\s([0-9\.]*)/.*', raw_ip_result, re.MULTILINE)[0]
def _is_profile_existing(self, name): cmd = [] cmd.append("lxc") cmd.append("profile") cmd.append("show") cmd.append(name) result = run(cmd, check=False, stderr=subprocess.PIPE) return result.returncode == 0
def get_lxd_version(): cmd = ['lxd', '--version'] result = run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if not result.stdout: # recent lxd versions print the version to stderr! return result.stderr.strip('\n') else: return result.stdout.strip('\n')
def _is_in_image_store(self): cmd = [] cmd.append("lxc") cmd.append("image") cmd.append("show") cmd.append("local:{}".format(self._result())) result = run(cmd, check=False, stderr=subprocess.PIPE) return result.returncode == 0
def _run_playbook(self, playbook, inventory, extra_vars, ansible_user): require_executable("ansible-playbook", "sudo apt install ansible") cmd = [] cmd.append("ansible-playbook") cmd.extend(["--connection", self.connection]) cmd.extend(["--inventory", inventory]) cmd.extend(["--extra-vars", "@{}".format(extra_vars)]) if self.connection == "ssh": cmd.extend(["--user", ansible_user]) cmd.append(playbook) if logging.getLogger().isEnabledFor(logging.DEBUG): cmd.append("-vvvv") ansible_env = os.environ.copy() ansible_env['ANSIBLE_REMOTE_TEMP'] = '/tmp/ansible-{}'.format(get_user()) run(cmd, env=ansible_env, log_threshold=logging.INFO)
def write_lxc_profile(profile_text): new_profile = False profile_yaml = yaml.load(profile_text) profile_hash = hashlib.sha256(profile_text.encode()).hexdigest()[:20] profile_name = profile_yaml.get("name", "anonymous") ext_profile_name = "{}_{}".format(profile_name, profile_hash) profile_yaml["name"] = ext_profile_name profile_content = yaml.dump(profile_yaml, default_flow_style=False) if not is_profile_existing(ext_profile_name): create_cmd = ["lxc", "profile", "create", ext_profile_name] run(create_cmd) new_profile = True edit_cmd = ["lxc", "profile", "edit", ext_profile_name] run(edit_cmd, input=profile_content) return ext_profile_name, new_profile
def test_build_stretch_container(capsys): print(os.getcwd()) with workspace(): edi_exec = os.path.join(get_project_root(), 'bin', 'edi') project_name = 'pytest-{}'.format(get_random_string(6)) config_command = [edi_exec, 'config', 'init', project_name, 'debian-stretch-amd64'] run(config_command) # run as non root container_name = 'pytest-{}'.format(get_random_string(6)) parser = edi._setup_command_line_interface() cli_args = parser.parse_args(['-v', 'lxc', 'configure', container_name, '{}-develop.yml'.format(project_name)]) Configure().run_cli(cli_args) out, err = capsys.readouterr() print(out) assert not err images = [ '{}-develop_edicommand_image_bootstrap.tar.gz'.format(project_name), '{}-develop_edicommand_image_lxc.tar.gz'.format(project_name) ] for image in images: assert os.path.isfile(image) lxc_image_list_cmd = ['lxc', 'image', 'list'] result = run(lxc_image_list_cmd, stdout=subprocess.PIPE) assert project_name in result.stdout parser = edi._setup_command_line_interface() cli_args = parser.parse_args(['-v', 'clean', '{}-develop.yml'.format(project_name)]) Clean().run_cli(cli_args) for image in images: assert not os.path.isfile(image) result = run(lxc_image_list_cmd, stdout=subprocess.PIPE) assert project_name not in result.stdout verification_command = ['lxc', 'exec', container_name, '--', 'cat', '/etc/os-release'] result = run(verification_command, stdout=subprocess.PIPE) assert '''VERSION_ID="9"''' in result.stdout assert 'ID=debian' in result.stdout stop_command = ['lxc', 'stop', container_name] run(stop_command) delete_command = ['lxc', 'delete', container_name] run(delete_command)
def get_gsettings_value(schema, key, default=None): cmd = ['gsettings', 'get', schema, key] # On Ubuntu the following command will cause some output on stderr. # This could be avoided by running the command as root but with this attempt # we would fail to retrieve the correct gsettings values on Debian. result = run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) if result.returncode == 0: return result.stdout.strip('\n').strip("'") else: logging.debug('''The command '{}' failed: {}'''.format(cmd, result.stderr)) return default
def _write_lxc_profile(self, profile_text): profile_yaml = yaml.load(profile_text) profile_hash = hashlib.sha256(profile_text.encode() ).hexdigest()[:20] profile_name = profile_yaml.get("name", "anonymous") ext_profile_name = "{}_{}".format(profile_name, profile_hash) profile_yaml["name"] = ext_profile_name profile_content = yaml.dump(profile_yaml, default_flow_style=False) if not self._is_profile_existing(ext_profile_name): create_cmd = ["lxc", "profile", "create", ext_profile_name] run(create_cmd) print_success("Created lxc profile {}.".format(ext_profile_name)) edit_cmd = ["lxc", "profile", "edit", ext_profile_name] run(edit_cmd, input=profile_content) return ext_profile_name
def get_lxd_version(): if not Executables.has('lxd'): return '0.0.0' cmd = [Executables.get('lxd'), '--version'] result = run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if not result.stdout: # recent lxd versions print the version to stderr! return result.stderr.strip('\n') else: return result.stdout.strip('\n')
def test_flake8(): path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) cmd = ['flake8', '--max-line-length=120', path] result = run(cmd, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if result.returncode != 0: print(result.stdout) print(result.stderr) assert False, "flake8 reported errors!"
def _postprocess_rootfs(self, rootfs, key_data): if key_data: key_cmd = get_chroot_cmd(rootfs) key_cmd.append("apt-key") key_cmd.append("add") key_cmd.append("-") run(key_cmd, input=key_data, sudo=True) clean_cmd = get_chroot_cmd(rootfs) clean_cmd.append("apt-get") clean_cmd.append("clean") run(clean_cmd, sudo=True) apt_list_cmd = get_chroot_cmd(rootfs) apt_list_cmd.append("rm") apt_list_cmd.append("-rf") apt_list_cmd.append("/var/lib/apt/lists/") run(apt_list_cmd, sudo=True) # after a cross debootstrap /etc/apt/sources.list points # to the wrong repository sources_list = os.path.join(rootfs, 'etc', 'apt', 'sources.list') with open(sources_list, mode='w', encoding='utf-8') as f: f.write(('# edi bootstrap repository\n{}\n').format( self.config.get_bootstrap_repository()))
def _run_playbook(self, playbook, inventory, extra_vars, ansible_user): snap_path = '/snap/bin' cmd = list() # on a Debian system the snap path might not be found in the PATH variable if snap_path not in os.environ['PATH']: cmd.extend( ["env", "PATH={}:{}".format(os.environ['PATH'], snap_path)]) cmd.append("ansible-playbook") cmd.extend(["--connection", self.connection]) cmd.extend(["--inventory", inventory]) cmd.extend(["--extra-vars", "@{}".format(extra_vars)]) if self.connection == "ssh": cmd.extend(["--user", ansible_user]) cmd.append(playbook) if logging.getLogger().isEnabledFor(logging.DEBUG): cmd.append("-vvvv") ansible_env = os.environ.copy() ansible_env['ANSIBLE_REMOTE_TEMP'] = '/tmp/ansible-{}'.format( get_user()) run(cmd, env=ansible_env, log_threshold=logging.INFO)