def _create_tf_folder(cls, cluster_name: str, platform: str): tf_folder = utils.get_tf_folder(cluster_name) logging.info("Creating %s as terraform folder", tf_folder) utils.recreate_folder(tf_folder) utils.copy_template_tree( tf_folder, none_platform_mode=platform == consts.Platforms.NONE) return tf_folder
def _create_tf_folder(self, name: str, platform: str): tf_folder = utils.get_tf_folder(name) logging.info("Creating %s as terraform folder", tf_folder) utils.recreate_folder(tf_folder) utils.copy_template_tree(tf_folder, none_platform_mode=(platform == consts.Platforms.NONE), is_infra_env=isinstance(self._entity_config, BaseInfraEnvConfig)) return tf_folder
def apply_static_network_config(cluster_name, kube_client): if not args.with_static_network_config: return None tf_folder = utils.get_tf_folder(cluster_name, args.namespace) static_network_config = static_network.generate_static_network_data_from_tf( tf_folder) if args.kube_api: if args.master_count != 1: raise NotImplementedError( "At the moment, KubeAPI workflow supports only single-node clusters" ) mac_to_interface = static_network_config[0]["mac_interface_map"] interfaces = [{ "name": item["logical_nic_name"], "macAddress": item["mac_address"] } for item in mac_to_interface] nmstate_config = NMStateConfig( kube_api_client=kube_client, name=f"{cluster_name}-nmstate-config", namespace=args.namespace, ) nmstate_config.apply( config=yaml.safe_load(static_network_config[0]["network_yaml"]), interfaces=interfaces, label=cluster_name, ) return static_network_config
def execute_day2_flow(cluster_id, args, day2_type_flag, has_ipv6): utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) client = ClientFactory.create_client( url=utils.get_assisted_service_url_by_args(args=args), offline_token=utils.get_env("OFFLINE_TOKEN")) cluster = client.cluster_get(cluster_id=cluster_id) cluster_name = cluster.name openshift_version = cluster.openshift_version api_vip_dnsname = "api." + cluster_name + "." + cluster.base_dns_domain api_vip_ip = cluster.api_vip terraform_cluster_dir_prefix = cluster_name if day2_type_flag == "ocp": terraform_cluster_dir_prefix = f"{consts.CLUSTER_PREFIX}-{consts.DEFAULT_NAMESPACE}" else: cluster_id = str(uuid.uuid4()) copy_proxy_from_cluster = cluster cluster = client.create_day2_cluster( cluster_name + "-day2", cluster_id, **_day2_cluster_create_params(openshift_version, api_vip_dnsname)) set_cluster_pull_secret(client, cluster_id, args.pull_secret) set_cluster_proxy(client, cluster_id, copy_proxy_from_cluster, args) config_etc_hosts(api_vip_ip, api_vip_dnsname) image_path = os.path.join(consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso') tf_folder = os.path.join( utils.get_tf_folder(terraform_cluster_dir_prefix, args.namespace), consts.Platforms.BARE_METAL) set_day2_tf_configuration(tf_folder, args.number_of_day2_workers, api_vip_ip, api_vip_dnsname) static_network_config = None if args.with_static_network_config: static_network_config = static_network.generate_day2_static_network_data_from_tf( tf_folder, args.number_of_day2_workers) client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, ssh_key=args.ssh_key, static_network_config=static_network_config) day2_nodes_flow( client, terraform_cluster_dir_prefix, tf_folder, cluster, has_ipv6, args.number_of_day2_workers, api_vip_ip, api_vip_dnsname, args.install_cluster, day2_type_flag, args.with_static_network_config, cluster_name, )
def get_ocp_cluster(args): if not args.cluster_id: cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' tf_folder = utils.get_tf_folder(cluster_name, args.namespace) args.cluster_id = utils.get_tfvars(tf_folder).get('cluster_inventory_id') client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args) ) return client.cluster_get(cluster_id=args.cluster_id)
def delete_cluster(cluster_name, namespace): log.info('Deleting cluster: %s in namespace: %s', cluster_name, namespace) tfvars = {} tf_folder = utils.get_tf_folder(cluster_name, namespace) if os.path.exists(tf_folder): tfvars = utils.get_tfvars(tf_folder) if not args.only_nodes: try_to_delete_cluster(namespace, tfvars) delete_nodes(cluster_name, namespace, tf_folder, tfvars)
def get_nodes_details(cluster_name, namespace, tf): tf_folder = utils.get_tf_folder(cluster_name, namespace) tf_vars = utils.get_tfvars(tf_folder) networks_names = ( tf_vars['libvirt_network_name'], tf_vars['libvirt_secondary_network_name'], ) return utils.get_libvirt_nodes_from_tf_state( network_names=networks_names, tf_state=tf.get_state(), )
def get_nodes_details(cluster_name, namespace, tf): tf_folder = utils.get_tf_folder(cluster_name, namespace) baremetal_template = os.path.join(tf_folder, consts.Platforms.BARE_METAL) tf_vars = utils.get_tfvars(baremetal_template) networks_names = ( tf_vars["libvirt_network_name"], tf_vars["libvirt_secondary_network_name"], ) return utils.get_libvirt_nodes_from_tf_state( network_names=networks_names, tf_state=tf.get_state(), )
def nodes_flow_kube_api(cluster_name, machine_net, cluster_deployment, agent_cluster_install): tf_folder = utils.get_tf_folder(cluster_name, args.namespace) baremetal_template = os.path.join(tf_folder, consts.Platforms.BARE_METAL) nodes_details = utils.get_tfvars(baremetal_template) tf = terraform_utils.TerraformUtils(working_dir=baremetal_template) is_ipv4 = machine_net.has_ip_v4 or not machine_net.has_ip_v6 nodes_number = args.master_count + args.number_of_workers create_nodes_and_wait_till_registered( inventory_client=None, cluster=None, nodes_details=nodes_details, tf=tf, is_ipv4=is_ipv4, nodes_number=nodes_number, cluster_deployment=cluster_deployment, ) if args.master_count == 1: set_single_node_ip( client=None, cluster_id=None, main_cidr=args.vm_network_cidr if is_ipv4 else args.vm_network_cidr6, is_ipv4=is_ipv4, cluster_deployment=cluster_deployment, tf=tf, ) else: log.info("VIPs already configured") kubeapi_utils.set_agents_hostnames( cluster_deployment=cluster_deployment, is_ipv4=is_ipv4, static_network_mode=args.with_static_network_config, tf=tf, nodes_number=nodes_number, ) if args.install_cluster: install_cluster.run_installation_flow_kube_api( cluster_deployment=cluster_deployment, agent_cluster_install=agent_cluster_install, nodes_number=nodes_number, kubeconfig_path=utils.get_kubeconfig_path(cluster_name))
def set_tf_config(cluster_name): nodes_details = _create_node_details(cluster_name) tf_folder = utils.get_tf_folder(cluster_name, args.namespace) utils.recreate_folder(tf_folder) utils.copy_template_tree(tf_folder, is_none_platform_mode()) machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr, args.vm_network_cidr6, args.ns_index) default_image_path = os.path.join(consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso') fill_tfvars(image_path=args.image or default_image_path, storage_path=args.storage_path, master_count=args.master_count, nodes_details=nodes_details, tf_folder=tf_folder, machine_net=machine_net)
def nodes_flow_kube_api(cluster_name, machine_net, cluster_deployment): tf_folder = utils.get_tf_folder(cluster_name, args.namespace) nodes_details = utils.get_tfvars(tf_folder) tf = terraform_utils.TerraformUtils(working_dir=tf_folder) is_ipv4 = machine_net.has_ip_v4 or not machine_net.has_ip_v6 nodes_number = args.master_count + args.number_of_workers create_nodes_and_wait_till_registered( inventory_client=None, cluster=None, nodes_details=nodes_details, tf=tf, is_ipv4=is_ipv4, nodes_number=nodes_number, cluster_deployment=cluster_deployment, ) if cluster_deployment: if args.master_count == 1: set_single_node_ip( client=None, cluster_id=None, main_cidr=args.vm_network_cidr if is_ipv4 else args.vm_network_cidr6, is_ipv4=is_ipv4, cluster_deployment=cluster_deployment, tf=tf, ) else: log.info("VIPs already configured") kubeapi_utils.set_agents_hostnames( cluster_deployment=cluster_deployment, is_ipv4=is_ipv4, static_network_mode=args.with_static_network_config, tf=tf, nodes_number=nodes_number, ) if args.install_cluster: install_cluster.run_installation_flow_kube_api( cluster_deployment=cluster_deployment, nodes_number=nodes_number, kubeconfig_path=consts.DEFAULT_CLUSTER_KUBECONFIG_PATH, )
def execute_day2_flow(cluster_id, args, day2_type_flag, has_ipv4): utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args)) cluster = client.cluster_get(cluster_id=cluster_id) cluster_name = cluster.name openshift_version = cluster.openshift_version api_vip_dnsname = "api." + cluster_name + "." + cluster.base_dns_domain api_vip_ip = cluster.api_vip terraform_cluster_dir_prefix = cluster_name if day2_type_flag == "ocp": terraform_cluster_dir_prefix = "test-infra-cluster-assisted-installer" else: cluster_id = str(uuid.uuid4()) copy_proxy_from_cluster = cluster cluster = client.create_day2_cluster( cluster_name + "-day2", cluster_id, **_day2_cluster_create_params(openshift_version, api_vip_dnsname)) set_cluster_pull_secret(client, cluster_id, args.pull_secret) set_cluster_proxy(client, cluster_id, copy_proxy_from_cluster, args) config_etc_hosts(api_vip_ip, api_vip_dnsname) image_path = os.path.join(consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso') tf_folder = utils.get_tf_folder(terraform_cluster_dir_prefix, args.namespace) set_day2_tf_configuration(tf_folder, args.number_of_day2_workers, api_vip_ip, api_vip_dnsname) static_network_config = None if args.with_static_network_config: static_network_config = static_network.generate_day2_static_network_data_from_tf( tf_folder, args.number_of_day2_workers) client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, ssh_key=args.ssh_key, static_network_config=static_network_config) day2_nodes_flow(client, terraform_cluster_dir_prefix, tf_folder, cluster, has_ipv4, args.number_of_day2_workers, api_vip_ip, api_vip_dnsname, args.install_cluster, day2_type_flag, args.with_static_network_config)
def main(): _verify_kube_download_folder(args.kubeconfig_path) log.info("Creating assisted service client") # if not cluster id is given, reads it from latest run tf = None if not args.cluster_id: cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' tf_folder = utils.get_tf_folder(cluster_name, args.namespace) args.cluster_id = utils.get_tfvars(tf_folder).get( 'cluster_inventory_id') tf = terraform_utils.TerraformUtils(working_dir=tf_folder) client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args, wait=False)) run_install_flow(client=client, cluster_id=args.cluster_id, kubeconfig_path=args.kubeconfig_path, pull_secret=args.pull_secret, tf=tf)
def day2_nodes_flow(client, terraform_cluster_dir_prefix, cluster, has_ipv_4, num_worker_nodes, api_vip_ip, api_vip_dnsname, namespace, install_cluster_flag, day2_type_flag): tf_network_name, total_num_nodes = apply_day2_tf_configuration( terraform_cluster_dir_prefix, num_worker_nodes, api_vip_ip, api_vip_dnsname, namespace) with utils.file_lock_context(): utils.run_command( f'make _apply_terraform CLUSTER_NAME={terraform_cluster_dir_prefix}' ) time.sleep(5) if day2_type_flag == "ocp": num_nodes_to_wait = total_num_nodes installed_status = consts.NodesStatus.INSTALLED else: num_nodes_to_wait = num_worker_nodes installed_status = consts.NodesStatus.DAY2_INSTALLED utils.wait_till_nodes_are_ready(nodes_count=num_nodes_to_wait, network_name=tf_network_name) waiting.wait( lambda: utils.are_libvirt_nodes_in_cluster_hosts( client, cluster.id, num_nodes_to_wait), timeout_seconds=consts.NODES_REGISTERED_TIMEOUT, sleep_seconds=10, waiting_for="Nodes to be registered in inventory service", ) if not has_ipv_4: log.info( "Set hostnames of day2 cluster %s to work around libvirt for Terrafrom not setting" " hostnames of IPv6-only hosts", cluster.id) tf_folder = utils.get_tf_folder(terraform_cluster_dir_prefix, namespace) set_hostnames_from_tf(client=client, cluster_id=cluster.id, tf_folder=tf_folder, network_name=tf_network_name) utils.wait_till_all_hosts_are_in_status( client=client, cluster_id=cluster.id, nodes_count=num_worker_nodes, statuses=[consts.NodesStatus.KNOWN], interval=30, ) if install_cluster_flag: log.info("Start installing all known nodes in the cluster %s", cluster.id) ocp_orig_ready_nodes = get_ocp_cluster_ready_nodes_num() hosts = client.get_cluster_hosts(cluster.id) [ client.install_day2_host(cluster.id, host['id']) for host in hosts if host["status"] == 'known' ] log.info( "Start waiting until all nodes of cluster %s have been installed( reached added-to-existing-clustertate)", cluster.id) utils.wait_till_all_hosts_are_in_status( client=client, cluster_id=cluster.id, nodes_count=num_nodes_to_wait, statuses=[installed_status], interval=30, ) log.info( "Start waiting until installed nodes has actually been added to the OCP cluster" ) waiting.wait( lambda: wait_nodes_join_ocp_cluster( ocp_orig_ready_nodes, num_worker_nodes, day2_type_flag), timeout_seconds=consts.NODES_REGISTERED_TIMEOUT, sleep_seconds=30, waiting_for="Day2 nodes to be added to OCP cluster", ) log.info("%d worker nodes were successfully added to OCP cluster", num_worker_nodes)
def nodes_flow(client, cluster_name, cluster, machine_net, kube_client=None, cluster_deployment=None): tf_folder = utils.get_tf_folder(cluster_name, args.namespace) nodes_details = utils.get_tfvars(tf_folder) if cluster: nodes_details["cluster_inventory_id"] = cluster.id utils.set_tfvars(tf_folder, nodes_details) tf = terraform_utils.TerraformUtils(working_dir=tf_folder) is_ipv4 = machine_net.has_ip_v4 or not machine_net.has_ip_v6 nodes_number = args.master_count + args.number_of_workers create_nodes_and_wait_till_registered( inventory_client=client, cluster=cluster, nodes_details=nodes_details, tf=tf, is_ipv4=is_ipv4, nodes_number=nodes_number, cluster_deployment=cluster_deployment, ) main_cidr = args.vm_network_cidr if is_ipv4 else args.vm_network_cidr6 secondary_cidr = machine_net.provisioning_cidr_v4 if is_ipv4 else machine_net.provisioning_cidr_v6 if client: cluster_info = client.cluster_get(cluster.id) macs = utils.get_libvirt_nodes_macs( nodes_details["libvirt_network_name"]) if is_none_platform_mode(): macs += utils.get_libvirt_nodes_macs( nodes_details["libvirt_secondary_network_name"]) if not (cluster_info.api_vip and cluster_info.ingress_vip): if not args.kube_api: utils.wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[ consts.NodesStatus.INSUFFICIENT, consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN ], ) if args.master_count == 1: set_single_node_ip( client=client, cluster_id=cluster.id, main_cidr=main_cidr, is_ipv4=is_ipv4, cluster_deployment=cluster_deployment, tf=tf, ) if not args.kube_api: set_cluster_machine_cidr( client=client, cluster_id=cluster.id, machine_net=machine_net, set_vip_dhcp_allocation=False, ) elif is_none_platform_mode(): set_cluster_vips(client, cluster.id, machine_net) elif args.vip_dhcp_allocation and not args.kube_api: set_cluster_machine_cidr(client, cluster.id, machine_net) else: set_cluster_vips(client, cluster.id, machine_net) else: log.info("VIPs already configured") if args.kube_api: kubeapi_utils.set_agents_hostnames( cluster_deployment=cluster_deployment, is_ipv4=is_ipv4, static_network_mode=args.with_static_network_config, tf=tf, nodes_number=nodes_number, ) else: set_hosts_roles( client=client, cluster=cluster, nodes_details=nodes_details, machine_net=machine_net, tf=tf, master_count=args.master_count, static_network_mode=args.with_static_network_config, ) if is_none_platform_mode() and args.master_count > 1: master_ips = helper_cluster.Cluster.get_master_ips( client, cluster.id, main_cidr) + helper_cluster.Cluster.get_master_ips( client, cluster.id, secondary_cidr) worker_ips = helper_cluster.Cluster.get_worker_ips( client, cluster.id, main_cidr) + helper_cluster.Cluster.get_worker_ips( client, cluster.id, secondary_cidr) if not worker_ips: worker_ips = master_ips load_balancer_ip = _get_host_ip_from_cidr( machine_net.cidr_v6 if machine_net.has_ip_v6 and not machine_net.has_ip_v4 else machine_net.cidr_v4) lb_controller = LoadBalancerController(tf) lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips) if not args.kube_api: utils.wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[consts.NodesStatus.KNOWN], ) if args.install_cluster: install_cluster.run_install_flow( client=client, cluster_id=cluster.id, kubeconfig_path=consts.DEFAULT_CLUSTER_KUBECONFIG_PATH, pull_secret=args.pull_secret, tf=tf, cluster_deployment=cluster_deployment, nodes_number=nodes_number, ) # Validate DNS domains resolvability validate_dns(client, cluster.id)
def nodes_flow(client, cluster_name, cluster, image_path): nodes_details = _create_node_details(cluster_name) if cluster: nodes_details["cluster_inventory_id"] = cluster.id tf_folder = utils.get_tf_folder(cluster_name, args.namespace) utils.recreate_folder(tf_folder) copy_tree(consts.TF_TEMPLATE, tf_folder) tf = terraform_utils.TerraformUtils(working_dir=tf_folder) machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr, args.vm_network_cidr6, args.ns_index) create_nodes_and_wait_till_registered(cluster_name=cluster_name, inventory_client=client, cluster=cluster, image_path=image_path, storage_path=args.storage_path, master_count=args.master_count, nodes_details=nodes_details, tf=tf, machine_net=machine_net) if client: cluster_info = client.cluster_get(cluster.id) macs = utils.get_libvirt_nodes_macs( nodes_details["libvirt_network_name"]) if not (cluster_info.api_vip and cluster_info.ingress_vip): utils.wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[ consts.NodesStatus.INSUFFICIENT, consts.NodesStatus.PENDING_FOR_INPUT, ], ) if args.vip_dhcp_allocation: set_cluster_machine_cidr(client, cluster.id, machine_net) else: set_cluster_vips(client, cluster.id, machine_net) else: log.info("VIPs already configured") network_name = nodes_details["libvirt_network_name"] if machine_net.has_ip_v4: libvirt_nodes = utils.get_libvirt_nodes_mac_role_ip_and_name( network_name) update_hostnames = False else: log.warning( "Work around libvirt for Terrafrom not setting hostnames of IPv6-only hosts" ) libvirt_nodes = _get_libvirt_nodes_from_tf_state( network_name, tf.get_state()) update_hostnames = True update_hosts(client, cluster.id, libvirt_nodes, update_hostnames) utils.wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[consts.NodesStatus.KNOWN], ) if args.install_cluster: time.sleep(10) install_cluster.run_install_flow( client=client, cluster_id=cluster.id, kubeconfig_path=consts.DEFAULT_CLUSTER_KUBECONFIG_PATH, pull_secret=args.pull_secret, tf=tf) # Validate DNS domains resolvability validate_dns(client, cluster.id) if args.wait_for_cvo: cluster_info = client.cluster_get(cluster.id) log.info("Start waiting till CVO status is available") config_etc_hosts(cluster_info.name, cluster_info.base_dns_domain, cluster_info.api_vip) utils.wait_for_cvo_available()
def _create_tf_folder(self): tf_folder = utils.get_tf_folder(self.cluster_name) logging.info("Creating %s as terraform folder", tf_folder) utils.recreate_folder(tf_folder) utils.copy_template_tree(tf_folder) return tf_folder
def _create_tf_folder(self): tf_folder = utils.get_tf_folder(self.cluster_name) logging.info("Creating %s as terraform folder", tf_folder) utils.recreate_folder(tf_folder) copy_tree(consts.TF_TEMPLATE, tf_folder) return tf_folder
def apply_day2_tf_configuration(cluster_name, num_worker_nodes, api_vip_ip, api_vip_dnsname, namespace): tf_folder = utils.get_tf_folder(cluster_name, namespace) configure_terraform(tf_folder, num_worker_nodes, api_vip_ip, api_vip_dnsname) return get_network_nodes_from_terraform(tf_folder)
def execute_day1_flow(cluster_name): client = None cluster = {} if args.managed_dns_domains: args.base_dns_domain = args.managed_dns_domains.split(":")[0] if not args.vm_network_cidr: net_cidr = IPNetwork('192.168.126.0/24') net_cidr += args.ns_index args.vm_network_cidr = str(net_cidr) if not args.vm_network_cidr6: net_cidr = IPNetwork('1001:db8::/120') net_cidr += args.ns_index args.vm_network_cidr6 = str(net_cidr) if not args.network_bridge: args.network_bridge = f'tt{args.ns_index}' set_tf_config(cluster_name) image_path = None image_type = args.iso_image_type if not args.image: utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args)) if args.cluster_id: cluster = client.cluster_get(cluster_id=args.cluster_id) else: cluster = client.create_cluster(cluster_name, ssh_public_key=args.ssh_key, **_cluster_create_params()) image_path = os.path.join(consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso') if args.with_static_ips: tf_folder = utils.get_tf_folder(cluster_name, args.namespace) static_ips_config = static_ips.generate_static_ips_data_from_tf( tf_folder) else: static_ips_config = None client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, image_type=image_type, ssh_key=args.ssh_key, static_ips=static_ips_config, ) # Iso only, cluster will be up and iso downloaded but vm will not be created if not args.iso_only: try: nodes_flow(client, cluster_name, cluster) finally: if not image_path or args.keep_iso: return log.info('deleting iso: %s', image_path) os.unlink(image_path) return cluster.id
def nodes_flow(client, cluster_name, cluster): tf_folder = utils.get_tf_folder(cluster_name, args.namespace) nodes_details = utils.get_tfvars(tf_folder) if cluster: nodes_details["cluster_inventory_id"] = cluster.id utils.set_tfvars(tf_folder, nodes_details) tf = terraform_utils.TerraformUtils(working_dir=tf_folder) machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr, args.vm_network_cidr6, args.ns_index) create_nodes_and_wait_till_registered(inventory_client=client, cluster=cluster, nodes_details=nodes_details, tf=tf) if client: cluster_info = client.cluster_get(cluster.id) macs = utils.get_libvirt_nodes_macs( nodes_details["libvirt_network_name"]) if not (cluster_info.api_vip and cluster_info.ingress_vip): utils.wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[ consts.NodesStatus.INSUFFICIENT, consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN ], ) if args.master_count == 1: is_ip4 = machine_net.has_ip_v4 or not machine_net.has_ip_v6 cidr = args.vm_network_cidr if is_ip4 else args.vm_network_cidr6 tf.change_variables({ "single_node_ip": helper_cluster.Cluster.get_ip_for_single_node( client, cluster.id, cidr, ipv4_first=is_ip4) }) elif args.vip_dhcp_allocation: set_cluster_machine_cidr(client, cluster.id, machine_net) else: set_cluster_vips(client, cluster.id, machine_net) else: log.info("VIPs already configured") set_hosts_roles(client, cluster, nodes_details, machine_net, tf, args.master_count, args.with_static_ips) utils.wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[consts.NodesStatus.KNOWN], ) if args.install_cluster: time.sleep(10) install_cluster.run_install_flow( client=client, cluster_id=cluster.id, kubeconfig_path=consts.DEFAULT_CLUSTER_KUBECONFIG_PATH, pull_secret=args.pull_secret, tf=tf) # Validate DNS domains resolvability validate_dns(client, cluster.id) if args.wait_for_cvo: cluster_info = client.cluster_get(cluster.id) log.info("Start waiting till CVO status is available") api_vip = helper_cluster.get_api_vip_from_cluster( client, cluster_info) config_etc_hosts(cluster_info.name, cluster_info.base_dns_domain, api_vip) utils.wait_for_cvo_available()
def execute_day1_flow(cluster_name): client = None cluster = {} if args.managed_dns_domains: args.base_dns_domain = args.managed_dns_domains.split(":")[0] if not args.vm_network_cidr: net_cidr = IPNetwork('192.168.126.0/24') net_cidr += args.ns_index args.vm_network_cidr = str(net_cidr) if not args.vm_network_cidr6: net_cidr = IPNetwork('1001:db8::/120') net_cidr += args.ns_index args.vm_network_cidr6 = str(net_cidr) if not args.network_bridge: args.network_bridge = f'tt{args.ns_index}' set_tf_config(cluster_name) image_path = None image_url = None image_type = args.iso_image_type kube_client = None cluster_deployment = None machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr, args.vm_network_cidr6, args.ns_index) if not args.image: utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) client = assisted_service_api.create_client( url=utils.get_assisted_service_url_by_args(args=args)) if args.cluster_id: cluster = client.cluster_get(cluster_id=args.cluster_id) elif args.kube_api: kube_client = create_kube_api_client( str(pathlib.Path("~/.kube/config").expanduser())) cluster_deployment = ClusterDeployment(kube_api_client=kube_client, name=cluster_name, namespace=args.namespace) secret = Secret( kube_api_client=kube_client, name=cluster_name, namespace=args.namespace, ) with contextlib.suppress(ApiException): secret.delete() secret.create(pull_secret=args.pull_secret) ipv4 = args.ipv4 and args.ipv4.lower() in MachineNetwork.YES_VALUES ipv6 = args.ipv6 and args.ipv6.lower() in MachineNetwork.YES_VALUES api_vip, ingress_vip = "", "" with contextlib.suppress(ApiException): cluster_deployment.delete() cluster_deployment.create( platform=Platform( api_vip=api_vip, ingress_vip=ingress_vip, ), install_strategy=InstallStrategy( host_prefix=args.host_prefix if ipv4 else args.host_prefix6, machine_cidr=machine_net.machine_cidr_addresses[0], cluster_cidr=args.cluster_network if ipv4 else args.cluster_network6, service_cidr=args.service_network if ipv4 else args.service_network6, ssh_public_key=args.ssh_key, control_plane_agents=args.master_count, worker_agents=args.number_of_workers, ), secret=secret, base_domain=args.base_dns_domain, ) cluster_deployment.wait_for_state("insufficient") http_proxy, https_proxy, no_proxy = _get_http_proxy_params( ipv4=ipv4, ipv6=ipv6) install_env = InstallEnv(kube_api_client=kube_client, name=f"{cluster_name}-install-env", namespace=args.namespace) with contextlib.suppress(ApiException): install_env.delete() install_env.create(cluster_deployment=cluster_deployment, secret=secret, proxy=Proxy(http_proxy=http_proxy, https_proxy=https_proxy, no_proxy=no_proxy)) install_env.status() image_url = install_env.get_iso_download_url() cluster = client.cluster_get( cluster_id=install_env.get_cluster_id()) else: cluster = client.create_cluster(cluster_name, ssh_public_key=args.ssh_key, **_cluster_create_params()) image_path = os.path.join(consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso') if args.with_static_network_config: tf_folder = utils.get_tf_folder(cluster_name, args.namespace) static_network_config = static_network.generate_static_network_data_from_tf( tf_folder) else: static_network_config = None if image_url is not None: utils.download_iso(image_url, image_path) else: client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, image_type=image_type, ssh_key=args.ssh_key, static_network_config=static_network_config, ) # Iso only, cluster will be up and iso downloaded but vm will not be created if not args.iso_only: try: nodes_flow(client, cluster_name, cluster, machine_net, kube_client, cluster_deployment) finally: if not image_path or args.keep_iso: return log.info('deleting iso: %s', image_path) os.unlink(image_path) return cluster.id