def create_infra_env( self, name: str, ssh_public_key: Optional[str] = None, **infra_env_params ) -> models.infra_env.InfraEnv: infra_env = models.InfraEnvCreateParams(name=name, ssh_authorized_key=ssh_public_key, **infra_env_params) log.info("Creating infra-env with params %s", infra_env.__dict__) result = self.client.register_infra_env(infraenv_create_params=infra_env) return result
def create_cluster( self, name: str, ssh_public_key: Optional[str] = None, **cluster_params ) -> models.cluster.Cluster: cluster = models.ClusterCreateParams(name=name, ssh_public_key=ssh_public_key, **cluster_params) log.info("Creating cluster with params %s", cluster.__dict__) result = self.client.v2_register_cluster(new_cluster_params=cluster) return result
def download_kubeconfig(self, cluster_id: str, kubeconfig_path: str) -> None: log.info("Downloading kubeconfig to %s", kubeconfig_path) response = self.client.v2_download_cluster_credentials( cluster_id=cluster_id, file_name="kubeconfig", _preload_content=False ) with open(kubeconfig_path, "wb") as _file: _file.write(response.data)
def download_and_save_file(self, cluster_id: str, file_name: str, file_path: str) -> None: log.info("Downloading %s to %s", file_name, file_path) response = self.client.v2_download_cluster_files( cluster_id=cluster_id, file_name=file_name, _preload_content=False ) with open(file_path, "wb") as _file: _file.write(response.data)
def refresh_api_key(config: Configuration) -> None: # Get the properly padded key segment auth = config.api_key.get("Authorization", None) if auth is not None: segment = auth.split(".")[1] padding = len(segment) % 4 segment = segment + padding * "=" expires_on = json.loads(base64.b64decode(segment))["exp"] # if this key doesn't expire or if it has more than 10 minutes left, don't refresh remaining = expires_on - time.time() if expires_on == 0 or remaining > 600: return # fetch new key if expired or not set yet params = { "client_id": "cloud-services", "grant_type": "refresh_token", "refresh_token": offline_token, } log.info("Refreshing API key") response = requests.post(os.environ.get("SSO_URL"), data=params) response.raise_for_status() config.api_key["Authorization"] = response.json()["access_token"]
def _set_x_secret_key(cls, c: Configuration, pull_secret: str) -> None: if not pull_secret: log.info("pull secret not set, skipping agent authentication headers") return log.info("Setting X-Secret-Key") c.api_key["X-Secret-Key"] = json.loads(pull_secret)["auths"]["cloud.openshift.com"]["auth"]
def host_get_next_step(self, infra_env_id: str, host_id: str) -> models.Steps: log.info( f"Getting next step for host: {host_id} in cluster: {infra_env_id}" ) return self.client.v2_get_next_steps(infra_env_id=infra_env_id, host_id=host_id)
def download_and_save_infra_env_file(self, infra_env_id: str, file_name: str, file_path: str) -> None: log.info(f"Downloading {file_name} to {file_path}") response = self.client.v2_download_infra_env_files( infra_env_id=infra_env_id, file_name=file_name, _preload_content=False ) with open(file_path, "wb") as _file: _file.write(response.data)
def download_host_logs(self, cluster_id: str, host_id: str, output_file) -> None: log.info("Downloading host logs to %s", output_file) response = self.client.v2_download_cluster_logs(cluster_id=cluster_id, host_id=host_id, _preload_content=False) with open(output_file, "wb") as _file: _file.write(response.data)
def create_kube_api_client( kubeconfig_path: Optional[str] = None) -> ApiClient: log.info("creating kube client with config file: %s", kubeconfig_path) conf = KubeConfiguration() load_kube_config(config_file=kubeconfig_path, client_configuration=conf) return KubeApiClient(configuration=conf)
def download_host_ignition(self, infra_env_id: str, host_id: str, destination: str) -> None: log.info("Downloading host %s infra_env %s ignition files to %s", host_id, infra_env_id, destination) response = self.client.v2_download_host_ignition( infra_env_id=infra_env_id, host_id=host_id, _preload_content=False ) with open(os.path.join(destination, f"host_{host_id}.ign"), "wb") as _file: _file.write(response.data)
def get_host_by_name(self, cluster_id: str, host_name: str) -> Dict[str, Any]: hosts = self.get_cluster_hosts(cluster_id) for host in hosts: hostname = host.get("requested_hostname") if hostname == host_name: log.info(f"Requested host by name: {host_name}, host details: {host}") return host
def get_hosts_by_role(self, cluster_id: str, role, hosts=None): hosts = hosts or self.get_cluster_hosts(cluster_id) nodes_by_role = [] for host in hosts: if host["role"] == role: nodes_by_role.append(host) log.info(f"Found hosts: {nodes_by_role}, that has the role: {role}") return nodes_by_role
def download_infraenv_events(self, infra_env_id: str, output_file: str, categories: str = None) -> None: if categories is None: categories = ["user"] log.info("Downloading infraenv events to %s", output_file) with open(output_file, "wb") as _file: events = self.get_events(infra_env_id=infra_env_id, categories=categories) _file.write(json.dumps(events, indent=4).encode())
def download_manifests(self, cluster_id: str, dir_path: str) -> None: log.info(f"Downloading manifests for cluster {cluster_id} into {dir_path}") response = self.manifest.v2_list_cluster_manifests(cluster_id=cluster_id, _preload_content=False) for record in json.loads(response.data): response = self.manifest.v2_download_cluster_manifest( cluster_id=cluster_id, file_name=record["file_name"], folder=record["folder"], _preload_content=False ) with open(os.path.join(dir_path, record["file_name"]), "wb") as _file: _file.write(response.data)
def create_day2_cluster(self, name: str, cluster_uuid: str, **cluster_params) -> models.cluster.Cluster: cluster = models.AddHostsClusterCreateParams(name=name, id=cluster_uuid, **cluster_params) log.info("Creating day 2 cluster with params %s", cluster.__dict__) result = self.client.register_add_hosts_cluster( new_add_hosts_cluster_params=cluster) return result
def bind_host(self, infra_env_id: str, host_id: str, cluster_id: str) -> None: log.info( f"Enabling host: {host_id}, from infra_env {infra_env_id}, in cluster id: {cluster_id}" ) bind_host_params = models.BindHostParams(cluster_id=cluster_id) self.client.bind_host(infra_env_id=infra_env_id, host_id=host_id, bind_host_params=bind_host_params)
def download_cluster_events(self, cluster_id: str, output_file: str, categories=None) -> None: if categories is None: categories = ["user"] log.info("Downloading cluster events to %s", output_file) with open(output_file, "wb") as _file: events = self.get_events(cluster_id, categories=categories) _file.write(json.dumps(events, indent=4).encode()) self._events_junit_exporter.collect(events, suite_name="cluster_events", xml_suffix=cluster_id)
def create_day2_cluster(self, name: str, cluster_uuid: str, **cluster_params) -> models.cluster.Cluster: cluster = models.ImportClusterParams(name=name, openshift_cluster_id=cluster_uuid, **cluster_params) log.info("Creating day 2 cluster with params %s", cluster.__dict__) result = self.client.v2_import_cluster( new_import_cluster_params=cluster) return result
def wait_for_api_readiness(self, timeout: int) -> None: log.info("Waiting for inventory api to be ready") waiting.wait( lambda: self.clusters_list() is not None, timeout_seconds=timeout, sleep_seconds=5, waiting_for="Wait till inventory is ready", expected_exceptions=Exception, )
def patch_cluster_discovery_ignition(self, cluster_id: str, ignition_info: str) -> None: warnings.warn( "patch_cluster_discovery_ignition is deprecated. Use patch_discovery_ignition instead.", DeprecationWarning) log.info("Patching cluster %s discovery ignition", cluster_id) return self.client.update_discovery_ignition( cluster_id=cluster_id, discovery_ignition_params=models.DiscoveryIgnitionParams( config=json.dumps(ignition_info)), )
def download_metrics(self, dest: str) -> None: log.info("Downloading metrics to %s", dest) url = self.inventory_url if not (url.startswith("http://") or url.startswith("https://")): url = f"http://{url}" response = requests.get(f"{url}/metrics") response.raise_for_status() with open(dest, "w") as _file: _file.write(response.text)
def create_client( url: str, offline_token: str, pull_secret: Optional[str] = "", wait_for_api: Optional[bool] = True, timeout: Optional[int] = consts.WAIT_FOR_BM_API, ) -> InventoryClient: log.info("Creating assisted-service client for url: %s", url) c = InventoryClient(url, offline_token, pull_secret) if wait_for_api: c.wait_for_api_readiness(timeout) return c
def set_cluster_proxy( self, cluster_id: str, http_proxy: str, https_proxy: Optional[str] = "", no_proxy: Optional[str] = "") -> models.cluster.Cluster: log.info("Setting proxy for cluster %s", cluster_id) update_params = models.ClusterUpdateParams(http_proxy=http_proxy, https_proxy=https_proxy, no_proxy=no_proxy) return self.update_cluster(cluster_id=cluster_id, update_params=update_params)
def get_api_vip(self, cluster_info: dict, cluster_id: str = None): cluster = cluster_info or self.cluster_get(cluster_id) api_vip = cluster.get("api_vip") user_managed_networking = cluster.get("user_managed_networking") if not api_vip and user_managed_networking: log.info("API VIP is not set, searching for api ip on masters") hosts = cluster.get("hosts") or cluster.to_dict()["hosts"] masters = self.get_hosts_by_role(cluster["id"], consts.NodeRoles.MASTER, hosts=hosts) api_vip = self._wait_for_api_vip(masters) log.info("api vip is %s", api_vip) return api_vip
def select_installation_disk(self, infra_env_id: str, host_id: str, disk_paths: List[dict]) -> None: log.info("Setting installation disk for host %s in infra_env %s", host_id, infra_env_id) def role_to_selected_disk_config(disk_id: str, role: models.DiskRole) -> models.DiskConfigParams: return models.DiskConfigParams(id=disk_id, role=role) disks_selected_config = [ role_to_selected_disk_config(disk["disk_id"] if "disk_id" in disk else disk["path"], disk["role"]) for disk in disk_paths ] params = models.HostUpdateParams(disks_selected_config=disks_selected_config) return self.client.v2_update_host(infra_env_id=infra_env_id, host_id=host_id, host_update_params=params)
def update_hosts( self, cluster_id: str, hosts_with_roles, hosts_names: Optional[models.ClusterupdateparamsHostsNames] = None ) -> models.cluster.Cluster: warnings.warn("update_hosts is deprecated. Use update_host instead.", DeprecationWarning) log.info("Setting roles for hosts %s in cluster %s", hosts_with_roles, cluster_id) hosts = models.ClusterUpdateParams(hosts_roles=hosts_with_roles, hosts_names=hosts_names) return self.update_cluster(cluster_id=cluster_id, update_params=hosts)
def wait_for_networking( self, timeout=3 * consts.MINUTE, interval=consts.DEFAULT_CHECK_STATUSES_INTERVAL, ): log.info("Wait till %s nodes have MAC and IP address", len(self.nodes)) # Best effort with SuppressAndLog(waiting.TimeoutExpired): waiting.wait( lambda: self._are_nodes_network_prepared(), timeout_seconds=timeout, sleep_seconds=interval, waiting_for="nodes to have IP and MAC addresses", )
def download_image(self, cluster_id: str, image_path: str) -> None: warnings.warn( "download_image is deprecated and soon will be deleted. " "Use infra_env.download_image or cluster.download_image instead.", DeprecationWarning, ) log.info("Downloading image for cluster %s to %s", cluster_id, image_path) response = self.client.download_cluster_iso_with_http_info( cluster_id=cluster_id, _preload_content=False) response_obj = response[0] self._download(response=response_obj, file_path=image_path, verify_file_size=True)
def generate_image( self, cluster_id: str, ssh_key: str, image_type: str = consts.ImageType.FULL_ISO, static_network_config: Optional[list] = None, ) -> models.cluster.Cluster: self.cluster_get(cluster_id=cluster_id) log.info("Generating image for cluster %s", cluster_id) image_create_params = models.ImageCreateParams( ssh_public_key=ssh_key, static_network_config=static_network_config, image_type=image_type) log.info("Generating image with params %s", image_create_params.__dict__) return self.client.generate_cluster_iso( cluster_id=cluster_id, image_create_params=image_create_params)