def smoke_test(release, openrc, image_path, **kwargs): """ Run an initial test setup on a new cloud """ if kwargs["net_type"] not in ("VLAN", "FLAT"): error("--net-type should be VLAN or FLAT") if kwargs["net_type"] == "VLAN" and kwargs["vlan_id"] == "": error("--vlan-id is required with --net-type VLAN", exit=True) openstack.smoke_test(release, openrc, image_path, **kwargs)
def connect(self): """Connect to the configured VMWare service & set self.conn""" try: SSLVerificationError = _get_ssl_error() try: debug("Connecting with SmartConnect - regular SSL") self.conn = connect.SmartConnect(host=self.ip_addr, user=self.username, pwd=self.password) except SSLVerificationError: try: ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.verify_mode = ssl.CERT_NONE debug("Connecting with SmartConnec - TLSv1 and verify off") self.conn = connect.SmartConnect(host=self.ip_addr, user=self.username, pwd=self.password, sslContext=ctx) except (ssl.SSLEOFError, OSError): debug("Connecting with SmartConnectNoSSL") self.conn = connect.SmartConnectNoSSL(host=self.ip_addr, user=self.username, pwd=self.password) except vim.fault.InvalidLogin: error(f"ERROR: Invalid login for VMware server {self.ip_addr}", exit=True) debug("Connection successful")
def sync_local_registry(release, keep, registry, image): """ Pull OpenStack images, push local registry """ if registry.startswith("http"): error("Registry must not start with http/https.", exit=False) error("If push failed, ensure /etc/docker/daemon.json is correct", exit=True) openstack.sync_local_registry(release, keep, registry, image=image)
def sync_image_to_registry_offline(name, tag, path, keep, local_registry): """ Sync registry with offline image""" if local_registry.startswith("http"): error("Registry must not start with http/https.", exit=False) error("If push failed, ensure /etc/docker/daemon.json is correct", exit=True) registry.sync_offline_single_image(name, tag, path, keep, local_registry)
def pull_image_from_offline_registry(name, tag, local_registry): """ Pull image from offline registry """ if local_registry.startswith("http"): error("Registry must not start with http/https.", exit=False) error("If push failed, ensure /etc/docker/daemon.json is correct", exit=True) registry.pull_image_from_registry(name, tag, local_registry)
def _container_exists(container_name): """ Checks and throws error if container doesn't exist """ docker_client = docker.from_env() containers_list = docker_client.containers.list() exist = any(container.name == container_name for container in containers_list) if not exist: error("ERROR: Container: {} not found".format(container_name), exit=True)
def save(image_name_tag, images_dir_path, force): """ Save docker image to offline path """ client = docker.from_env() try: image = client.images.get(image_name_tag) except docker.errors.ImageNotFound: return image_path = get_image_filename_path(image_name_tag, images_dir_path) if os.path.exists(image_path) and not force: error( f"Warning: {image_name_tag} already exists: use --force to overwrite.", exit=False, ) return echo("Saving: {}\n".format(image_path)) try: with open(image_path, "wb") as _file: for chunk in image.save(named=image_name_tag): _file.write(chunk) except ReadTimeout: # Sometimes Docker will time out trying to export the image err = "Docker timeout trying to export file. Check CPU usage?\n" sys.stderr.write(f"ERROR: {err}\n") if os.path.exists(image_path): # If ReadTimeout leaves a 0b file behind if os.path.getsize(image_path) == 0: sys.stderr.write("WARN: Removing empty file {}\n".format(image_path)) os.remove(image_path) else: os.chmod(image_path, 0o755) else: sys.stderr.write(f"ERROR: Failed to create {image_path}\n")
def start( release, api_ip, openstack_ip, glance_https, arcus_https, cert_path, cert_key, http_port, https_port, ): """ Launch the arcus-client service """ click.echo("starting arcus client") if arcus_https and (cert_path is None or cert_key is None): error("ERROR: Invalid HTPPS configuration for Arcus Client", exit=False) error( " Expected --cert-path and --cert-key when using --arcus-https", exit=True) arcus_client.start( release, api_ip, openstack_ip, glance_https, arcus_https=arcus_https, cert_path=cert_path, cert_key_path=cert_key, http_port=http_port, https_port=https_port, )
def integrations_delete(api_addr, username, password, intg_id): """ Delete an integration """ _validate_addr(api_addr) success = intgs.delete_integration(api_addr, username, password, intg_id) if success: click.echo("Successfully deleted Integration") else: error("Failed to delete Integration")
def _validate_type(api_addr, type_name): """ Ensure that the given type name exists """ types = intgs.list_types(api_addr) matching_type = next((t for t in types if t["type"] == type_name), None) if matching_type is None: error( f"ERROR: The given type '{type_name}' is not valid. Type is case sensitive.", exit=True)
def start( release, fqdn, rabbit_pass, rabbit_ips_list, sql_ip, sql_password, https, port, secret, ): """ Start the arcus api """ image = f"breqwatr/arcus-api:{release}" rabbit_ips_csv = ",".join(rabbit_ips_list) env_vars = { "OPENSTACK_VIP": fqdn, "PUBLIC_ENDPOINT": "true", "HTTPS_OPENSTACK_APIS": str(https).lower(), "RABBITMQ_USERNAME": "******", "RABBITMQ_PASSWORD": rabbit_pass, "RABBIT_IPS_CSV": rabbit_ips_csv, "SQL_USERNAME": "******", "SQL_PASSWORD": sql_password, "SQL_IP": sql_ip, "ARCUS_INTEGRATION_SECRET": secret, } env_str = env_string(env_vars) daemon = "-d --restart=always" run = "" dev_mount = "" ceph_mount = "" network = "--network host" log_mount = "-v /var/log/arcus-api:/var/log/arcusweb" hosts_mount = "-v /etc/hosts:/etc/hosts" if DEV_MODE: log_mount = "" hosts_mount = "" if "ARCUS_API_DIR" not in os.environ: error("ERROR: must set $ARCUS_API_DIR when $VOITHOS_DEV==true", exit=True) api_dir = os.environ["ARCUS_API_DIR"] assert_path_exists(api_dir) daemon = "-it --rm" dev_mount = volume_opt(api_dir, "/app") network = f"-p 0.0.0.0:{port}:{port}" run = ('bash -c "' "/env_config.py && " "pip install -e . && " "gunicorn --workers 4 --error-logfile=- --access-logfile '-' " "--reload " f"--bind 0.0.0.0:{port}" ' arcusapi.wsgi:app" ') name = "arcus_api" shell(f"docker rm -f {name} 2>/dev/null || true") cmd = (f"docker run --name {name} {daemon} {network} " f"{hosts_mount} {log_mount} " f"{env_str} {ceph_mount} {dev_mount} {image} {run}") shell(cmd)
def _find_integration(api_addr, username, password, intg_id, exit=False): intg_list = list_integrations(api_addr, username, password) intg_obj = next((i for i in intg_list if i["id"] == intg_id), None) if intg_obj is None: error(f"ERROR: Failed to find an integration with ID = {intg_id}", exit=exit) if not exit: return False return intg_obj
def get_aws_iam(): """ Extract AWS IAM credentials from the license key """ config.require_license() license = config.get_license() if len(license) != 61 or license[20] != "-": error(f"ERROR: The license {license} is invalid", exit=True) iam_id = license[0:20] iam_secret = license[21:61] return {"id": iam_id, "secret": iam_secret}
def sync_local_registry_offline(kolla_tag, bw_tag, ceph_release, path, keep, local_registry): """ Sync registry with offline images""" if local_registry.startswith("http"): error("Registry must not start with http/https.", exit=False) error("If push failed, ensure /etc/docker/daemon.json is correct", exit=True) registry.sync_offline_images(kolla_tag, bw_tag, ceph_release, path, keep, local_registry)
def get_package_dependencies_list(package, apt_packages_dir): """ Returns a list of package dependencies""" output = subprocess.getoutput(f'apt-rdepends {package}|grep -v "^ "') if "Unable to locate package" in output: shell(f"rm -r {apt_packages_dir}") error(f"ERROR: Unable to locate package: {package}", exit=True) dependencies = subprocess.check_output( f'apt-rdepends {package}|grep -v "^ "', shell=True).decode("utf-8") return dependencies.replace("\n", " ").split()
def fstab(self): """Return the parsed content of the root volume's /etc/fstab file. Parses UUIDs into device paths, quits with an error if that fails. Return value is a list of dicts with the following keys: - path - mountpoint - fstype - options """ if self._fstab: return self._fstab self.debug_action(action="PARSE FSTAB") _fstab = [] try: if not self.was_root_mounted: self.mount_root() fstab_lines = get_file_contents( f"{self.ROOT_MOUNT}/etc/fstab").replace("\t", "") debug("/etc/fstab contents:") debug(fstab_lines) for line in fstab_lines.split("\n"): # Skip comments, swap tabs with spaces line = line.strip().replace("\t", "") if line.startswith("#"): continue split = [word for word in line.split(" ") if word] if len(split) < 3: continue path = split[0] if path.startswith("UUID="): uuid = path.split("=")[1] debug(f"fstab line has UUID: {uuid}") debug(line) path = next((path for path in self.blkid if self.blkid[path]["UUID"] == uuid), None) if path is None: error( f"ERROR: Failed to find path to fstab UUID in {line}", exit=True) debug(f"Mapped UUID {uuid} to device path: {path}") elif not path.startswith("/dev"): debug( f"Skipping /etc/fstab path: {path} - does not start with /dev" ) continue _fstab.append({ "path": path, "mountpoint": split[1], "fstype": split[2], "options": split[3] if len(split) > 3 else "", }) finally: if not self.was_root_mounted: self.unmount_root() self.debug_action(end=True) self._fstab = _fstab return _fstab
def get_http_auth_headers(username, password, api_url): """ Return the headers for admin scope requests """ token = _get_token(api_url, username, password) projects = _get_projects(api_url, token) admin_project = next( (proj for proj in projects if proj["name"] == "admin"), None) if admin_project is None: error("ERROR: No 'admin' project found - check roles?", exit=True) return {"X-Auth-Token": token, "X-Project-ID": admin_project["id"]}
def integrations_show_type(api_addr, type_name): """ Show the properties of an integration-type """ _validate_addr(api_addr) intg_type = intgs.show_type(api_addr, type_name) if intg_type is None: error(f"ERROR: type {type_name} is not valid") return click.echo(intg_type["type"]) for field in intg_type["fields"]: click.echo(f" {field}: {intg_type['fields'][field]}")
def offline_start(ip_address, port, path): """ Load and start offline registry """ if not os.path.exists(path): error(f"ERROR: Registry image not found at {path}", exit=True) else: shell(f"docker load --input {path}") # Filename from file path filename = path.rsplit("/", 1)[1] image_name_tag = filename_to_image_name_tag(filename) shell(f"docker run -d --name registry -p {ip_address}:{port}:5000 {image_name_tag}")
def convert(input_format, output_format, input_path, output_path): """ Run: qemu-img -f <input-format> -O <output-format> <input-path> <output-path> """ print( f"qemu-img -f {input_format} -O {output_format} {input_path} {output_path}" ) if input_format not in FORMATS or output_format not in FORMATS: error("ERROR - Invalid format provided. Valid formats: {FORMATS}", exit=True) if not Path(input_path).is_file(): error(f"ERROR - File not found: {input_path}", exit=True) qemu_img.convert(input_format, output_format, input_path, output_path)
def integrations_create(api_addr, username, password, intg_type, fields): """ Create a new integration """ _validate_addr(api_addr) _validate_type(api_addr, intg_type) _validate_fields(api_addr, intg_type, fields) success = intgs.create_integration(api_addr, username, password, intg_type, fields) if success: click.echo("Successfully created Integration") else: error("Failed to create Integration")
def pull_and_save_kolla_tag_images(kolla_tag, path, force): """ Pull and save kolla and service images with kolla tag""" if kolla_tag not in KOLLA_IMAGE_REPOS: error( f"ERROR: kolla tag {kolla_tag} is not supported", exit=True ) all_images = KOLLA_IMAGE_REPOS[kolla_tag] kolla_tag_service_images = ["pip", "apt", "openstack-client", "kolla-ansible"] all_images.extend(kolla_tag_service_images) image_dir_path = "{}/images/".format(path) echo("Pulling dockerhub images with tag: {}\n".format(kolla_tag)) _pull_and_save_all(all_images, kolla_tag, image_dir_path, force)
def zap_disk(disk, force): """ Erase filesystem from given disk """ if not force: click.echo("") click.echo( f"WARNING: This will destroy any filesystem on the drive: {disk}") click.echo("Type the drive name again to continue:") user_in = input() if user_in != disk: system.error(f"ERROR: Confirm does not match {disk}", exit=True) system.assert_path_exists(disk) ceph.zap_disk(disk)
def _environ(name, value=None): """Safely return the value of an environment variable, else throw nice error If value!=None then it is used instead of checking the env var """ ENV_VARS = ["VMWARE_USERNAME", "VMWARE_PASSWORD", "VMWARE_IP_ADDR"] if name not in ENV_VARS: raise (f"unsupported _environ name {name}") if value is not None: return value if name not in os.environ: error(f"Env var {name} is required,missing. REQUIRED={ENV_VARS}", exit=True) return os.environ[name]
def download_vm(vm_uuid, dest_dir, username, password, ip_addr, interval): """ Download a VM with a given UUID """ mgr = VMWareMgr(username=username, password=password, ip_addr=ip_addr) vm = mgr.find_vm_by_uuid(vm_uuid) if vm is None: error(f"ERROR: Failed to find VM with UUID: {vm_uuid}", exit=True) try: exporter = VMWareExporter(mgr, vm, base_dir=dest_dir, interval=int(interval)) except VMWareOnlineVMCantMigrate: error("ERROR: This VM is not offline", exit=True) exporter.download()
def _get_token(api_url, username, password): """ Get openstack token """ token_url = f'{api_url}/auth/token' req_data = { 'username': username, 'password': password, 'domain_name': "default" } token_response = requests.post(token_url, json=req_data, verify=False) response_headers = token_response.headers token_header_name = "X-Subject-Token" if token_header_name not in response_headers: error("ERROR: Failed to get token - Authentication failed.", exit=True) return response_headers[token_header_name]
def image_exists(image_name, image_tag): """ Checks if image with image_name and tag exists else throw error "Image not found" """ docker_client = docker.from_env() arcus_image = image_name + ":" + image_tag image_list = docker_client.images.list() for image in image_list: if arcus_image in image.tags: click.echo("Image {} exists.".format(arcus_image)) return error( "ERROR: Image: {} not found. Please pull {} before running update command.".format( arcus_image, arcus_image ), exit=True, )
def _get_token(api_url, username, password): """ Get openstack token """ token_url = f'{api_url}/auth/token' req_data = { 'username': username, 'password': password, 'domain_name': "default" } token_response = requests.post(token_url, json=req_data, verify=False) response_headers = token_response.headers token_header_name = "X-Subject-Token" if "error" in token_response.json(): message = token_response.json()["error"]["message"] error(f"ERROR: Failed to get token: {message}", exit=True) return response_headers[token_header_name]
def integrations_update(api_addr, username, password, intg_id, fields, links_csv): """ Update an integration properties """ _validate_addr(api_addr) links = None if links_csv is None else links_csv.split(",") success = intgs.update_integration(api_addr, username, password, intg_id, fields, links=links) if success: click.echo("Successfully updated Integration") else: error("Failed to update Integration")
def boot_volume(self): """ Return the path of the boot volume """ if self._boot_volume: return self._boot_volume if self.boot_partition_is_on_root_volume: error( "ERROR: /boot is on the root partition, there is no boot volume", exit=True) self.debug_action(action="LOCATE BOOT VOLUME") boot_entry = next(entry for entry in self.fstab if entry["mountpoint"] == "/boot") boot_vol_path = boot_entry["path"] self._boot_volume = boot_entry["path"] self.debug_action(end=True) return boot_entry["path"]