def _network(network_name): shell(["docker", "network", "create", network_name]) try: yield finally: shell(["docker", "network", "rm", network_name])
def test_secure_connection_should_succeed_with_root_ca(engine_fqdn, engine_ip, engine_cert, engine_webadmin_url): shell([ "curl", "-sS", "--resolve", "{}:443:{}".format(engine_fqdn, engine_ip), "--cacert", engine_cert, engine_webadmin_url ])
def _log_issues(pod_name, hub_name, node_names): LOGGER.error("Pod inspection: \n%s" % shell(["podman", "pod", "inspect", pod_name])) LOGGER.error("Hub logs: \n%s" % shell(["podman", "logs", hub_name])) for name in node_names: LOGGER.error("Node %s logs: \n%s" % (name, shell(["podman", "logs", name])))
def engine_cert(engine_ip): with tempfile.NamedTemporaryFile(prefix="engine-cert", suffix=".pem", delete=False) as cert_file: url = "https://%s/ovirt-engine/services/pki-resource?resource=ca-certificate&format=X509-PEM-CA" % engine_ip shell(["curl", "--insecure", "--output", cert_file.name, url]) yield cert_file.name
def grid_health_check(hub_url, expected_node_count=None): status_url = hub_url + "/status" for i in range(GRID_STARTUP_WAIT_RETRIES): try: out = shell(["curl", "-sSL", status_url]) if json.loads(out)["value"]["ready"] == True: break except ShellError: pass time.sleep(0.1) else: raise SeleniumGridError("Selenium grid didn't start up properly") if expected_node_count is not None: api_url = "/".join(hub_url.split("/")[:-2] + ["grid/api/hub"]) for i in range(GRID_STARTUP_WAIT_RETRIES): try: out = shell(["curl", "-sSL", api_url]) node_count = json.loads(out)["slotCounts"]["total"] if node_count == expected_node_count: break except ShellError: pass time.sleep(0.1) else: raise SeleniumGridError("Not enough nodes in selenium grid")
def _get_vms(self, deployment_path): vm_names = [ name for name in shell("virsh list --name".split()).splitlines() if name[8:13] == "-ost-" ] vms = {} for libvirt_name in vm_names: xml_str = shell(f"virsh dumpxml {libvirt_name}".split()).strip() xml = ET.fromstring(xml_str) try: vm_working_dir = xml.find( "./metadata/{OST:metadata}ost/ost-working-dir[@comment]" ).get("comment") except AttributeError: continue if vm_working_dir != deployment_path: continue name = libvirt_name[9:] deploy_scripts = [ node.get("name") for node in xml.findall( "./metadata/{OST:metadata}ost/ost-deploy-scripts/" "script[@name]") ] nics = VMNics(xml, self._networks) vms[name] = VMInfo(name, libvirt_name, nics, deploy_scripts) return vms
def rsa_pair(engine_admin_service): with tempfile.TemporaryDirectory(prefix='/tmp/') as tmpdir: key_path = f'{tmpdir}/id_rsa' shell(['ssh-keygen', '-t', 'rsa', '-f', f'{key_path}', '-N', '']) with open(f'{key_path}.pub') as f: public_key_content = f.read() engine_admin_service.ssh_public_keys_service().add(key=sdk4.types.SshPublicKey(content=public_key_content)) yield public_key_content, key_path
def dump_dhcp_leases(artifacts_dir, backend, management_network_name): yield shell.shell( [ 'bash', '-c', f'virsh net-dhcp-leases {backend.libvirt_net_name(management_network_name)} > {artifacts_dir}/libvirt-leases', ] )
def _hub(image, hub_port, pod_name): name = shell([ "podman", "run", "-d", "-e", "SE_OPTS=-port {}".format(hub_port), "-v", "/dev/shm:/dev/shm", "--pod", pod_name, image ]).strip() try: yield name finally: shell(["podman", "rm", "-f", name])
def _hub(image, pod_name): name = shell([ "podman", "run", "-d", "-v", "/dev/shm:/dev/shm", "--pod", pod_name, image ]).strip() try: yield name finally: shell(["podman", "rm", "-f", name])
def _hub(image, port, network_name): name = shell([ "docker", "run", "-d", "-p", "{0}:{0}".format(port), "--net", network_name, image ]).strip() try: yield name, _get_ip(name) finally: shell(["docker", "rm", "-f", name])
def _pod(hub_port, podman_cmd): network_backend = ["--network=slirp4netns"] if os.getuid() == 0 else [] name = shell([ podman_cmd, "pod", "create", *network_backend, "-p", f"{hub_port}:{hub_port}" ]).strip() try: yield name finally: shell([podman_cmd, "pod", "rm", "-f", name])
def test_secure_connection_should_fail_without_root_ca(engine_fqdn, engine_ip, engine_webadmin_url): with pytest.raises(ShellError) as e: shell([ "curl", "-sS", "--resolve", "{}:443:{}".format(engine_fqdn, engine_ip), engine_webadmin_url ]) # message is different in el7 and el8 curl versions assert "self signed certificate in certificate chain" in e.value.err or \ "not trusted by the user" in e.value.err
def engine_cert(engine_fqdn, engine_ip): with http_proxy_disabled(): with tempfile.NamedTemporaryFile(prefix="engine-cert", suffix=".pem") as cert_file: shell([ "curl", "-fsS", "-m", "10", "--resolve", "{}:80:{}".format(engine_fqdn, engine_ip), "-o", cert_file.name, "http://{}/ovirt-engine/services/pki-resource?resource=ca-certificate&format=X509-PEM-CA" .format(engine_fqdn) ]) yield cert_file.name
def collect(hostname, artifacts_list, target_dir): artifacts_list_string = ','.join(artifacts_list) module_mapper = mm.module_mapper_for(hostname) archive_name = "artifacts.tar.gz" local_archive_dir = os.path.join(target_dir, "test_logs", hostname) local_archive_path = os.path.join(local_archive_dir, archive_name) remote_archive_path = os.path.join("/tmp", archive_name) os.makedirs(local_archive_dir, exist_ok=True) module_mapper.archive(path=artifacts_list_string, dest=remote_archive_path) module_mapper.fetch( src=remote_archive_path, dest=local_archive_path, flat='yes' ) shell.shell( ["tar", "-xf", local_archive_path, "-C", local_archive_dir] ) shell.shell(["rm", local_archive_path])
def ansible_inventory_str(self): if self._ansible_inventory_str is None: contents = shell(["cat", "hosts"], bytes_output=True, cwd=self._deployment_path) self._ansible_inventory_str = contents return self._ansible_inventory_str
def _nodes(images, hub_ip, hub_port, network_name, engine_dns_entry): names = [] for image in images: name = shell([ "docker", "run", "-d", "--add-host={}".format(engine_dns_entry), "--net", network_name, "-e", "HUB_HOST={}".format(hub_ip), "-e", "HUB_PORT={}".format(hub_port), "-v", "/dev/shm:/dev/shm", image ]).strip() names.append(name) try: yield names finally: for name in names: shell(["docker", "rm", "-f", name])
def _nodes(images, hub_port, pod_name, engine_dns_entry): names = [] for image in images: name = shell([ "podman", "run", "-d", "-v", "/dev/shm:/dev/shm", "--add-host={}".format(engine_dns_entry), "-e", "HUB_HOST={}".format(HUB_IP), "-e", "HUB_PORT={}".format(hub_port), "-e", "SE_OPTS=-port {}".format(next(NODE_PORT_GEN)), "-e", "DISPLAY=:{}".format(next(NODE_DISPLAY_ADDR_GEN)), "--pod", pod_name, image ]).strip() names.append(name) try: yield names finally: for name in names: shell(["podman", "rm", "-f", name])
def _get_ip(name): ip = shell([ "docker", "inspect", "-f", "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}", name ]).strip() if ip == "": raise RuntimeError(("Could not get ip address of container. " "See previous messages for probable docker failure")) return ip
def run_net_update_add(*extra_args): cmd = ( 'virsh', '-c', 'qemu:///system', 'net-update', libvirt_net_name, 'add', ) + extra_args try: shell.shell(cmd) except shell.ShellError: # TODO: Optionally/Conditionally fail. # Why not to always fail? Because it's comfortable to be able to # retry stuff with lagofy.sh/run_tc. # So need to do one or more of: # 1. Check if exists, do not try to add # 2. Remove and then add? # 3. raise an exception and let caller catch/handle # 4. Let caller decide whether we fail LOGGER.warn(f"Failed '{cmd}', ignoring", exc_info=True)
def ansible_inventory(self): with self._lock: if self._ansible_inventory is None: contents = shell.shell(["lago", "ansible_hosts"], bytes_output=True, cwd=self._prefix_path) inventory = tempfile.NamedTemporaryFile() inventory.write(contents) inventory.flush() os.fsync(inventory.fileno()) self._ansible_inventory = inventory return self._ansible_inventory.name
def _grid_health_check(hub_url, browser_name): status_url = hub_url + "/status" for i in range(GRID_STARTUP_WAIT_RETRIES): try: status_json = shell(["curl", "-sSL", status_url]) status_dict = json.loads(status_json) if status_dict["value"]["ready"] and _node_ready(status_dict, browser_name): return True except ShellError: pass time.sleep(0.1) raise SeleniumGridError("Selenium grid didn't start up properly")
def download(url, path=None, timeout=10): args = ["curl", "-fsS", "-m", str(timeout)] if url.startswith("https"): args.extend([ "--resolve", "{}:443:{}".format(engine_fqdn, engine_ip), "--cacert", request.getfixturevalue("engine_cert") ]) else: args.extend([ "--resolve", "{}:80:{}".format(engine_fqdn, engine_ip), ]) if path is not None: args.extend(["-o", path]) args.append(url) return shell(args, bytes_output=True)
def _get_libvirt_names_for_ost_nets_on_machine(self): libvirt_net_names = [ name for name in shell("virsh net-list --name".split()).splitlines() if name.startswith("ost") ] return libvirt_net_names
def _pod(hub_port): name = shell(["podman", "pod", "create", "-p", str(hub_port)]).strip() try: yield name finally: shell(["podman", "pod", "rm", name])
def _log_issues(hub_name, node_names): LOGGER.error("Hub logs: \n%s" % shell(["podman", "logs", hub_name])) for name in node_names: LOGGER.error("Node %s logs: \n%s" % (name, shell(["podman", "logs", name])))
def collect_artifacts(root_dir): yield ost_script = os.path.join(root_dir, "ost.sh") shell.shell(f"{ost_script} fetch-artifacts", shell=True)
def _status(self): status = shell.shell(["lago", "--out-format", "json", "status"], cwd=self._prefix_path) return json.loads(status)
def test_he_deploy(suite_dir): shell.shell([suite_dir + '/he_deploy.sh'])
if up_status_seen: break up_status_seen = True else: up_status_seen = False return all_hosts @order_by(_TEST_LIST) def test_he_deploy(suite_dir): shell.shell([suite_dir + '/he_deploy.sh']) @pytest.mark.parametrize("key_format, verification_fn", [ pytest.param('X509-PEM-CA', lambda path: shell.shell( ["openssl", "x509", "-in", path, "-text", "-noout"]), id="CA certificate"), pytest.param('OPENSSH-PUBKEY', lambda path: shell.shell(["ssh-keygen", "-l", "-f", path]), id="ssh pubkey"), ]) @order_by(_TEST_LIST) def test_verify_engine_certs(key_format, verification_fn, engine_fqdn, engine_download): #engine_fqdn = "ost-hc-basic-suite-master-engine" url = 'http://{}/ovirt-engine/services/pki-resource?resource=ca-certificate&format={}' with tempfile.NamedTemporaryFile() as tmp: engine_download(url.format(engine_fqdn, key_format), tmp.name) try: verification_fn(tmp.name)