def main(): client = None cluster = {} random_postfix = str(uuid.uuid4())[:8] cluster_name = args.cluster_name or consts.CLUSTER_PREFIX + random_postfix if args.managed_dns_domains: args.base_dns_domain = args.managed_dns_domains.split(":")[0] if args.cluster_name: cluster_name = "%s-%s" % (args.cluster_name, random_postfix) # If image is passed, there is no need to create cluster and download image, need only to spawn vms with is image if not args.image: utils.recreate_folder(consts.IMAGE_FOLDER) client = bm_inventory_api.create_client(args.inventory_url) if args.cluster_id: cluster = client.cluster_get(cluster_id=args.cluster_id) else: cluster = client.create_cluster(cluster_name, ssh_public_key=args.ssh_key, **_cluster_create_params()) client.generate_and_download_image( cluster_id=cluster.id, image_path=consts.IMAGE_PATH, ssh_key=args.ssh_key, proxy_url=args.proxy_url, ) # Iso only, cluster will be up and iso downloaded but vm will not be created if not args.iso_only: nodes_flow(client, cluster_name, cluster)
def execute_day2_flow(cluster_id, args, day2_type_flag): utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args)) cluster = client.cluster_get(cluster_id=cluster_id) cluster_name = cluster.name openshift_version = cluster.openshift_version api_vip_dnsname = "api." + cluster_name + "." + cluster.base_dns_domain api_vip_ip = cluster.api_vip terraform_cluster_dir_prefix = cluster_name if day2_type_flag == "ocp": terraform_cluster_dir_prefix = "test-infra-cluster-assisted-installer" else: cluster_id = str(uuid.uuid4()) cluster = client.create_day2_cluster( cluster_name + "-day2", cluster_id, **_day2_cluster_create_params(openshift_version, api_vip_dnsname)) set_cluster_pull_secret(client, cluster_id, args.pull_secret) config_etc_hosts(api_vip_ip, api_vip_dnsname) image_path = os.path.join(consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso') client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, ssh_key=args.ssh_key, ) day2_nodes_flow(client, terraform_cluster_dir_prefix, cluster, image_path, args.number_of_day2_workers, api_vip_ip, api_vip_dnsname, args.namespace, args.install_cluster, day2_type_flag)
def execute_day2_flow(ocp_cluster_id, args): utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args)) ocp_cluster = client.cluster_get(cluster_id=ocp_cluster_id) ocp_cluster_name = ocp_cluster.name ocp_openshift_version = ocp_cluster.openshift_version ocp_api_vip_dnsname = "api." + ocp_cluster_name + "." + ocp_cluster.base_dns_domain ocp_api_vip_ip = ocp_cluster.api_vip cluster_id = str(uuid.uuid4()) cluster = client.create_day2_cluster( ocp_cluster_name + "-day2", cluster_id, **_day2_cluster_create_params(ocp_openshift_version, ocp_api_vip_dnsname)) set_cluster_pull_secret(client, cluster_id, args.pull_secret) image_path = os.path.join(consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso') client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, ssh_key=args.ssh_key, ) day2_nodes_flow(client, ocp_cluster_name, cluster, image_path, args.number_of_day2_workers, ocp_api_vip_ip, ocp_api_vip_dnsname, args.namespace, args.install_cluster)
def main(): client = None cluster = {} internal_cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' log.info('Cluster name: %s', internal_cluster_name) image_folder = os.path.join(consts.BASE_IMAGE_FOLDER, internal_cluster_name) log.info('Image folder: %s', image_folder) if args.managed_dns_domains: args.base_dns_domain = args.managed_dns_domains.split(":")[0] if not args.vm_network_cidr: net_cidr = IPNetwork('192.168.126.0/24') net_cidr += args.ns_index args.vm_network_cidr = str(net_cidr) if not args.network_bridge: args.network_bridge = f'tt{args.ns_index}' image_path = None # If image is passed, there is no need to create cluster and download image, need only to spawn vms with is image if not args.image: utils.recreate_folder(image_folder) client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args)) if args.cluster_id: cluster = client.cluster_get(cluster_id=args.cluster_id) else: random_postfix = str(uuid.uuid4())[:8] ui_cluster_name = internal_cluster_name + f'-{random_postfix}' log.info('Cluster name on UI: %s', ui_cluster_name) cluster = client.create_cluster(ui_cluster_name, ssh_public_key=args.ssh_key, **_cluster_create_params()) image_path = os.path.join(image_folder, consts.IMAGE_NAME) client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, ssh_key=args.ssh_key, ) # Iso only, cluster will be up and iso downloaded but vm will not be created if not args.iso_only: try: nodes_flow(client, internal_cluster_name, cluster, args.image or image_path) finally: if not image_path or args.keep_iso: return log.info('deleting iso: %s', image_path) os.unlink(image_path)
def download_logs_from_all_hosts(client, cluster_id): output_folder = f'build/{cluster_id}' utils.recreate_folder(output_folder) hosts = client.get_cluster_hosts(cluster_id=cluster_id) for host in hosts: output_file = os.path.join(output_folder, f'{host["id"]}_logs.tar.gz') waiting.wait( lambda: client.download_host_logs(cluster_id=cluster_id, host_id=host["id"], output_file=output_file) is None, timeout_seconds=240, sleep_seconds=20, expected_exceptions=Exception, waiting_for="Logs", )
def execute_day1_flow(cluster_name): client = None cluster = {} if args.managed_dns_domains: args.base_dns_domain = args.managed_dns_domains.split(":")[0] if not args.vm_network_cidr: net_cidr = IPNetwork('192.168.126.0/24') net_cidr += args.ns_index args.vm_network_cidr = str(net_cidr) if not args.network_bridge: args.network_bridge = f'tt{args.ns_index}' image_path = None if not args.image: utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args) ) if args.cluster_id: cluster = client.cluster_get(cluster_id=args.cluster_id) else: cluster = client.create_cluster( cluster_name, ssh_public_key=args.ssh_key, **_cluster_create_params() ) image_path = os.path.join( consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso' ) client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, ssh_key=args.ssh_key, ) # Iso only, cluster will be up and iso downloaded but vm will not be created if not args.iso_only: try: nodes_flow(client, cluster_name, cluster, args.image or image_path) finally: if not image_path or args.keep_iso: return log.info('deleting iso: %s', image_path) os.unlink(image_path)
def execute_day2_flow(cluster_name, args): utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args)) cluster_id = str(uuid.uuid4()) cluster = client.create_day2_cluster(cluster_name, cluster_id, **_day2_cluster_create_params(args)) set_cluster_pull_secret(client, cluster_id, args.pull_secret) image_path = os.path.join(consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso') client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, ssh_key=args.ssh_key, ) day2_nodes_flow(client, cluster_name, cluster, image_path, args.number_of_workers, args.api_vip_ip, args.api_vip_dnsname, args.namespace, args.install_cluster)
def wait_till_installed(client, cluster, timeout=60 * 60 * 2): log.info("Waiting %s till cluster finished installation", timeout) # TODO: Change host validation for only previous known hosts try: utils.wait_till_all_hosts_are_in_status( client=client, cluster_id=cluster.id, nodes_count=len(cluster.hosts), statuses=[consts.NodesStatus.INSTALLED], timeout=timeout, interval=60, ) utils.wait_till_cluster_is_in_status( client=client, cluster_id=cluster.id, statuses=[consts.ClusterStatus.INSTALLED], timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, ) finally: output_folder = f'build/{cluster.id}' utils.recreate_folder(output_folder) download_logs_from_all_hosts(client=client, cluster_id=cluster.id, output_folder=output_folder)
def download_logs(client: InventoryClient, cluster: dict, dest: str): output_folder = get_logs_output_folder(dest, cluster) if os.path.isdir(output_folder): log.info(f"Skipping. The logs direct {output_folder} already exists.") return recreate_folder(output_folder) write_metadata_file(client, cluster, os.path.join(output_folder, 'metdata.json')) with suppress(assisted_service_client.rest.ApiException): client.download_cluster_events( cluster['id'], os.path.join(output_folder, f"cluster_{cluster['id']}_events.json")) with suppress(assisted_service_client.rest.ApiException): client.download_cluster_logs( cluster['id'], os.path.join(output_folder, f"cluster_{cluster['id']}_logs.tar")) run_command("chmod -R ugo+rx '%s'" % output_folder)
def download_logs(client: InventoryClient, cluster: dict, dest: str, must_gather: bool): output_folder = get_logs_output_folder(dest, cluster) if os.path.isdir(output_folder): log.info( f"Skipping. The logs directory {output_folder} already exists.") return recreate_folder(output_folder) recreate_folder(os.path.join(output_folder, "cluster_files")) write_metadata_file(client, cluster, os.path.join(output_folder, 'metdata.json')) with suppress(assisted_service_client.rest.ApiException): client.download_ignition_files( cluster['id'], os.path.join(output_folder, "cluster_files")) with suppress(assisted_service_client.rest.ApiException): client.download_cluster_events( cluster['id'], os.path.join(output_folder, f"cluster_{cluster['id']}_events.json")) with suppress(assisted_service_client.rest.ApiException): client.download_cluster_logs( cluster['id'], os.path.join(output_folder, f"cluster_{cluster['id']}_logs.tar")) kubeconfig_path = os.path.join(output_folder, "kubeconfig-noingress") with suppress(assisted_service_client.rest.ApiException): client.download_kubeconfig_no_ingress(cluster['id'], kubeconfig_path) if must_gather: recreate_folder(os.path.join(output_folder, "must-gather")) download_must_gather(kubeconfig_path, os.path.join(output_folder, "must-gather")) run_command("chmod -R ugo+rx '%s'" % output_folder)
def nodes_flow(client, cluster_name, cluster, image_path): nodes_details = _create_node_details(cluster_name) if cluster: nodes_details["cluster_inventory_id"] = cluster.id tf_folder = utils.get_tf_folder(cluster_name, args.namespace) utils.recreate_folder(tf_folder) copy_tree(consts.TF_TEMPLATE, tf_folder) tf = terraform_utils.TerraformUtils(working_dir=tf_folder) create_nodes_and_wait_till_registered(cluster_name=cluster_name, inventory_client=client, cluster=cluster, image_path=image_path, storage_path=args.storage_path, master_count=args.master_count, nodes_details=nodes_details, tf=tf) if client: cluster_info = client.cluster_get(cluster.id) macs = utils.get_libvirt_nodes_macs( nodes_details["libvirt_network_name"]) if not (cluster_info.api_vip and cluster_info.ingress_vip): utils.wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[ consts.NodesStatus.INSUFFICIENT, consts.NodesStatus.PENDING_FOR_INPUT, ], ) if args.vip_dhcp_allocation: set_cluster_machine_cidr(client, cluster.id, args.vm_network_cidr) else: set_cluster_vips(client, cluster.id) else: log.info("VIPs already configured") set_hosts_roles(client, cluster.id, nodes_details["libvirt_network_name"]) utils.wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[consts.NodesStatus.KNOWN], ) if args.install_cluster: time.sleep(10) install_cluster.run_install_flow( client=client, cluster_id=cluster.id, kubeconfig_path=consts.DEFAULT_CLUSTER_KUBECONFIG_PATH, pull_secret=args.pull_secret, tf=tf) # Validate DNS domains resolvability validate_dns(client, cluster.id) if args.wait_for_cvo: cluster_info = client.cluster_get(cluster.id) log.info("Start waiting till CVO status is available") utils.config_etc_hosts(cluster_info) utils.wait_for_cvo_available()