def main(): args = handle_arguments() if args.sosreport: gather_sosreport_data(output_dir=args.dest) client = ClientFactory.create_client( url=args.inventory_url, timeout=CONNECTION_TIMEOUT, offline_token=get_env("OFFLINE_TOKEN") ) if args.cluster_id: cluster = client.cluster_get(args.cluster_id) download_cluster_logs( client, json.loads(json.dumps(cluster.to_dict(), sort_keys=True, default=str)), args.dest, args.must_gather, args.update_by_events, pull_secret=args.pull_secret, ) else: clusters = get_clusters(client, args.download_all) if not clusters: log.info("No clusters were found") return for cluster in clusters: if args.download_all or should_download_logs(cluster): download_cluster_logs( client, cluster, args.dest, args.must_gather, args.update_by_events, pull_secret=args.pull_secret ) log.info("Cluster installation statuses: %s", dict(Counter(cluster["status"] for cluster in clusters).items()))
def get_api_client(self, offline_token=None, **kwargs) -> InventoryClient: url = self.remote_service_url offline_token = offline_token or self.offline_token if not url: url = utils.get_local_assisted_service_url(self.namespace, "assisted-service", self.deploy_target) return ClientFactory.create_client(url, offline_token, **kwargs)
def try_get_cluster(): if args.cluster_id: try: client = ClientFactory.create_client(url=utils.get_assisted_service_url_by_args(args=args), offline_token=utils.get_env("OFFLINE_TOKEN")) return client, client.cluster_get(cluster_id=args.cluster_id) except ApiException as e: log.warning(f"Can't retrieve cluster_id={args.cluster_id}, {e}") return None, None
def get_ocp_cluster(args): if not args.cluster_id: cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' tf_folder = utils.get_tf_folder(cluster_name, args.namespace) args.cluster_id = utils.get_tfvars(tf_folder).get( 'cluster_inventory_id') client = ClientFactory.create_client( url=utils.get_assisted_service_url_by_args(args=args), offline_token=utils.get_env("OFFLINE_TOKEN")) return client.cluster_get(cluster_id=args.cluster_id)
def kube_api_logs(args): client = ClientFactory.create_kube_api_client(args.kubeconfig_path) for item in ClusterDeployment.list_all_namespaces( CustomObjectsApi(client)).get("items", []): if item["spec"]["clusterName"]: download_logs_kube_api( client, item["spec"]["clusterName"], item["metadata"]["namespace"], args.dest, args.must_gather, args.kubeconfig_path, )
def execute_day1_flow(): client, cluster = try_get_cluster() cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' if cluster: args.base_dns_domain = cluster.base_dns_domain cluster_name = cluster.name elif args.managed_dns_domains: args.base_dns_domain = args.managed_dns_domains.split(":")[0] log.info('Cluster name: %s', cluster_name) machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr, args.vm_network_cidr6, args.ns_index) image_path = args.image or os.path.join( consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso' ) set_tf_config(cluster_name) if not args.image: utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) if not client: client = ClientFactory.create_client(url=utils.get_assisted_service_url_by_args(args=args), offline_token=utils.get_env("OFFLINE_TOKEN")) if args.cluster_id: cluster = client.cluster_get(cluster_id=args.cluster_id) else: cluster = client.create_cluster(cluster_name, ssh_public_key=args.ssh_key, **_cluster_create_params(client)) static_network_config = apply_static_network_config( cluster_name=cluster_name, kube_client=None, ) client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, image_type=args.iso_image_type, ssh_key=args.ssh_key, static_network_config=static_network_config, ) # Iso only, cluster will be up and iso downloaded but vm will not be created if not args.iso_only: run_nodes_flow(client, cluster_name, cluster, machine_net, image_path) return cluster.id if cluster else None
def main(): _verify_kube_download_folder(args.kubeconfig_path) log.info("Creating assisted service client") # if not cluster id is given, reads it from latest run tf = None if not args.cluster_id: cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' tf_folder = utils.get_tf_folder(cluster_name, args.namespace) args.cluster_id = utils.get_tfvars(tf_folder).get('cluster_inventory_id') tf = terraform_utils.TerraformUtils(working_dir=tf_folder) client = ClientFactory.create_client(url=utils.get_assisted_service_url_by_args(args=args, wait=False), offline_token=utils.get_env("OFFLINE_TOKEN")) run_install_flow( client=client, cluster_id=args.cluster_id, kubeconfig_path=args.kubeconfig_path, pull_secret=args.pull_secret, tf=tf )
def __init__(self, inventory_url: str, type: str, offline_token: str): self.client = ClientFactory.create_client(url=inventory_url, offline_token=offline_token) with open("src/manage/manageable_options.yaml", "r") as f: options = yaml.load(f, Loader=yaml.FullLoader) manage_config = options.get(type, None) if not manage_config: raise ValueError( f"{type} is not a valid manageable_options option") days_back = manage_config["days_back"] measure_field = manage_config["measure_field"] clusters = self.get_clusters() clusters_to_process = list() for cluster in clusters: if is_older_then(cluster[measure_field], days_back): clusters_to_process.append(cluster["id"]) len_of_clusters_to_prcess = len(clusters_to_process) log.info(f"Running {type} of {len_of_clusters_to_prcess} clusters") if not query_yes_no(): return method = getattr(self.client, manage_config["method"]) for cluster_id in clusters_to_process: try: method(cluster_id=cluster_id) except ApiException as e: log.warning(f"Can't process cluster_id={cluster_id}, {e}")
def kube_api_client(self): yield ClientFactory.create_kube_api_client()
def create_kube_api_client(kubeconfig_path: Optional[str] = None) -> ApiClient: warnings.warn( "create_kube_api_client is deprecated. Use ClientFactory.create_kube_api_client instead.", DeprecationWarning) return ClientFactory.create_kube_api_client(kubeconfig_path)