def apply_static_network_config(cluster_name, kube_client): if not args.with_static_network_config: return None tf_folder = os.path.join(utils.get_tf_folder(cluster_name, args.namespace), consts.Platforms.BARE_METAL) static_network_config = static_network.generate_static_network_data_from_tf(tf_folder) if args.kube_api: if args.master_count != 1: raise NotImplementedError("At the moment, KubeAPI workflow supports only single-node clusters") mac_to_interface = static_network_config[0]["mac_interface_map"] interfaces = [ {"name": item["logical_nic_name"], "macAddress": item["mac_address"]} for item in mac_to_interface ] nmstate_config = NMStateConfig( kube_api_client=kube_client, name=f"{cluster_name}-nmstate-config", namespace=args.namespace, ) nmstate_config.apply( config=yaml.safe_load(static_network_config[0]["network_yaml"]), interfaces=interfaces, label=cluster_name, ) return static_network_config
def get_ocp_cluster(args): if not args.cluster_id: cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' tf_folder = utils.get_tf_folder(cluster_name, args.namespace) args.cluster_id = utils.get_tfvars(tf_folder).get( 'cluster_inventory_id') client = ClientFactory.create_client( url=utils.get_assisted_service_url_by_args(args=args), offline_token=utils.get_env("OFFLINE_TOKEN")) return client.cluster_get(cluster_id=args.cluster_id)
def execute_day2_flow(cluster_id, args, day2_type_flag, has_ipv6): utils.recreate_folder(consts.IMAGE_FOLDER, force_recreate=False) client = args.api_client cluster = client.cluster_get(cluster_id=cluster_id) cluster_name = cluster.name openshift_version = cluster.openshift_version api_vip_dnsname = "api." + cluster_name + "." + cluster.base_dns_domain api_vip_ip = cluster.api_vip terraform_cluster_dir_prefix = cluster_name if day2_type_flag == "ocp": terraform_cluster_dir_prefix = f"{consts.CLUSTER_PREFIX}-{consts.DEFAULT_NAMESPACE}" else: cluster_id = str(uuid.uuid4()) cluster = client.create_day2_cluster( cluster_name + "-day2", cluster_id, **_day2_cluster_create_params(openshift_version, api_vip_dnsname)) set_cluster_pull_secret(client, cluster_id, args.pull_secret) set_cluster_proxy(client, cluster_id, args) config_etc_hosts(api_vip_ip, api_vip_dnsname) image_path = os.path.join(consts.IMAGE_FOLDER, f'{cluster_name}-installer-image.iso') tf_folder = os.path.join(utils.get_tf_folder(terraform_cluster_dir_prefix), consts.Platforms.BARE_METAL) set_day2_tf_configuration(tf_folder, args.num_day2_workers, api_vip_ip, api_vip_dnsname) static_network_config = None if args.with_static_network_config: static_network_config = static_network.generate_day2_static_network_data_from_tf( tf_folder, args.num_day2_workers) client.generate_and_download_image( cluster_id=cluster.id, image_path=image_path, ssh_key=args.ssh_key, static_network_config=static_network_config) day2_nodes_flow( client, terraform_cluster_dir_prefix, tf_folder, cluster, has_ipv6, args.num_day2_workers, args.install_cluster, day2_type_flag, args.with_static_network_config, cluster_name, )
def nodes_flow_kube_api(cluster_name, machine_net, cluster_deployment, agent_cluster_install): tf_folder = utils.get_tf_folder(cluster_name, args.namespace) baremetal_template = os.path.join(tf_folder, consts.Platforms.BARE_METAL) nodes_details = utils.get_tfvars(baremetal_template) tf = terraform_utils.TerraformUtils(working_dir=baremetal_template) is_ipv4 = machine_net.has_ip_v4 or not machine_net.has_ip_v6 nodes_number = args.master_count + args.number_of_workers create_nodes_and_wait_till_registered( inventory_client=None, cluster=None, nodes_details=nodes_details, tf=tf, is_ipv4=is_ipv4, nodes_number=nodes_number, cluster_deployment=cluster_deployment, ) if args.master_count == 1: set_single_node_ip( client=None, cluster_id=None, main_cidr=args.vm_network_cidr if is_ipv4 else args.vm_network_cidr6, is_ipv4=is_ipv4, cluster_deployment=cluster_deployment, tf=tf, ) else: log.info("VIPs already configured") kubeapi_utils.set_agents_hostnames( cluster_deployment=cluster_deployment, is_ipv4=is_ipv4, static_network_mode=args.with_static_network_config, tf=tf, nodes_number=nodes_number, ) if args.install_cluster: install_cluster.run_installation_flow_kube_api( cluster_deployment=cluster_deployment, agent_cluster_install=agent_cluster_install, nodes_number=nodes_number, kubeconfig_path=utils.get_kubeconfig_path(cluster_name) )
def set_tf_config(cluster_name): nodes_details = _create_node_details(cluster_name) tf_folder = utils.get_tf_folder(cluster_name, args.namespace) utils.recreate_folder(tf_folder) utils.copy_template_tree(tf_folder) baremetal_template = os.path.join(tf_folder, consts.Platforms.BARE_METAL) machine_net = MachineNetwork(args.ipv4, args.ipv6, args.vm_network_cidr, args.vm_network_cidr6, args.ns_index) default_image_path = os.path.join(consts.IMAGE_FOLDER, f'{args.namespace}-installer-image.iso') fill_tfvars( image_path=args.image or default_image_path, storage_path=args.storage_path, master_count=args.master_count, nodes_details=nodes_details, tf_folder=baremetal_template, machine_net=machine_net )
def main(): _verify_kube_download_folder(args.kubeconfig_path) log.info("Creating assisted service client") # if not cluster id is given, reads it from latest run tf = None if not args.cluster_id: cluster_name = f'{args.cluster_name or consts.CLUSTER_PREFIX}-{args.namespace}' tf_folder = utils.get_tf_folder(cluster_name, args.namespace) args.cluster_id = utils.get_tfvars(tf_folder).get('cluster_inventory_id') tf = terraform_utils.TerraformUtils(working_dir=tf_folder) client = ClientFactory.create_client(url=utils.get_assisted_service_url_by_args(args=args, wait=False), offline_token=utils.get_env("OFFLINE_TOKEN")) run_install_flow( client=client, cluster_id=args.cluster_id, kubeconfig_path=args.kubeconfig_path, pull_secret=args.pull_secret, tf=tf )
def nodes_flow( client, cluster_name, cluster, machine_net, cluster_deployment=None, agent_cluster_install=None, ): tf_folder = utils.get_tf_folder(cluster_name, args.namespace) nodes_details = utils.get_tfvars(tf_folder) if cluster: nodes_details["cluster_inventory_id"] = cluster.id utils.set_tfvars(tf_folder, nodes_details) tf = terraform_utils.TerraformUtils(working_dir=tf_folder) is_ipv4 = machine_net.has_ip_v4 or not machine_net.has_ip_v6 nodes_number = args.master_count + args.number_of_workers create_nodes_and_wait_till_registered( inventory_client=client, cluster=cluster, nodes_details=nodes_details, tf=tf, is_ipv4=is_ipv4, nodes_number=nodes_number, cluster_deployment=cluster_deployment, ) main_cidr = args.vm_network_cidr if is_ipv4 else args.vm_network_cidr6 secondary_cidr = machine_net.provisioning_cidr_v4 if is_ipv4 else machine_net.provisioning_cidr_v6 if client: cluster_info = client.cluster_get(cluster.id) macs = get_libvirt_nodes_macs(nodes_details["libvirt_network_name"]) if is_none_platform_mode(): macs += get_libvirt_nodes_macs(nodes_details["libvirt_secondary_network_name"]) if not (cluster_info.api_vip and cluster_info.ingress_vip): if not args.kube_api: wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[ consts.NodesStatus.INSUFFICIENT, consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN ], ) if args.master_count == 1: set_single_node_ip( client=client, cluster_id=cluster.id, main_cidr=main_cidr, is_ipv4=is_ipv4, cluster_deployment=cluster_deployment, tf=tf, ) if not args.kube_api: set_cluster_machine_cidr( client=client, cluster_id=cluster.id, machine_net=machine_net, set_vip_dhcp_allocation=False, ) elif is_none_platform_mode(): pass elif args.vip_dhcp_allocation and not args.kube_api: set_cluster_machine_cidr(client, cluster.id, machine_net) else: set_cluster_vips(client, cluster.id, machine_net) else: log.info("VIPs already configured") if args.kube_api: kubeapi_utils.set_agents_hostnames( cluster_deployment=cluster_deployment, is_ipv4=is_ipv4, static_network_mode=args.with_static_network_config, tf=tf, nodes_number=nodes_number, ) else: set_hosts_roles( client=client, cluster=cluster, nodes_details=nodes_details, machine_net=machine_net, tf=tf, master_count=args.master_count, static_network_mode=args.with_static_network_config, ) if is_none_platform_mode() and args.master_count > 1: master_ips = helper_cluster.Cluster.get_master_ips(client, cluster.id, main_cidr) + helper_cluster.Cluster.get_master_ips( client, cluster.id, secondary_cidr) worker_ips = helper_cluster.Cluster.get_worker_ips(client, cluster.id, main_cidr) + helper_cluster.Cluster.get_worker_ips( client, cluster.id, secondary_cidr) if not worker_ips: worker_ips = master_ips load_balancer_ip = _get_host_ip_from_cidr( machine_net.cidr_v6 if machine_net.has_ip_v6 and not machine_net.has_ip_v4 else machine_net.cidr_v4) lb_controller = LoadBalancerController(tf) lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips) if not args.kube_api: wait_till_hosts_with_macs_are_in_status( client=client, cluster_id=cluster.id, macs=macs, statuses=[consts.NodesStatus.KNOWN], ) if args.vip_dhcp_allocation: vips_info = helper_cluster.Cluster.get_vips_from_cluster(client, cluster.id) tf.set_new_vips(api_vip=vips_info["api_vip"], ingress_vip=vips_info["ingress_vip"]) if args.install_cluster: install_cluster.run_install_flow( client=client, cluster_id=cluster.id, kubeconfig_path=utils.get_kubeconfig_path(cluster_name), pull_secret=args.pull_secret, tf=tf, cluster_deployment=cluster_deployment, agent_cluster_install=agent_cluster_install, nodes_number=nodes_number, ) # Validate DNS domains resolvability validate_dns(client, cluster.id)