def create_nodes_and_wait_till_registered(cluster_name, inventory_client, cluster, image_path, storage_path, master_count, nodes_details, tf, machine_net): nodes_count = master_count + nodes_details["worker_count"] create_nodes(cluster_name=cluster_name, image_path=image_path, storage_path=storage_path, master_count=master_count, nodes_details=nodes_details, tf=tf, machine_net=machine_net) # TODO: Check for only new nodes if not inventory_client: # We will wait for leases only if only nodes are created without connection to s utils.wait_till_nodes_are_ready( nodes_count=nodes_count, network_name=nodes_details["libvirt_network_name"]) log.info("No inventory url, will not wait till nodes registration") return log.info("Wait till nodes will be registered") # In case there is assisted service connection, registration to the cluster in the assisted service # is checked, and not relying on libvirt leases. This overcomes bug in libvirt that does not report # all DHCP leases. utils.wait_till_all_hosts_are_in_status( client=inventory_client, cluster_id=cluster.id, nodes_count=nodes_count, statuses=[ consts.NodesStatus.INSUFFICIENT, consts.NodesStatus.PENDING_FOR_INPUT, ])
def create_nodes_and_wait_till_registered(cluster_name, inventory_client, cluster, image_path, storage_path, master_count, nodes_details, tf, machine_net): nodes_count = master_count + nodes_details["worker_count"] create_nodes(cluster_name=cluster_name, image_path=image_path, storage_path=storage_path, master_count=master_count, nodes_details=nodes_details, tf=tf, machine_net=machine_net) # TODO: Check for only new nodes utils.wait_till_nodes_are_ready( nodes_count=nodes_count, network_name=nodes_details["libvirt_network_name"]) if not inventory_client: log.info("No inventory url, will not wait till nodes registration") return log.info("Wait till nodes will be registered") waiting.wait( lambda: utils.are_all_libvirt_nodes_in_cluster_hosts( inventory_client, cluster.id, nodes_details["libvirt_network_name"] ), timeout_seconds=consts.NODES_REGISTERED_TIMEOUT, sleep_seconds=10, waiting_for="Nodes to be registered in inventory service", )
def _create_nodes(self, running=True): logging.info("Creating tfvars") self._fill_tfvars() logging.info('Start running terraform') self.tf.apply() if self.params.running: utils.wait_till_nodes_are_ready( nodes_count=self.params.worker_count + self.params.master_count, network_name=self.params.libvirt_network_name)
def wait_until_nodes_are_registered_rest_api( inventory_client, cluster, nodes_details, is_ipv4, nodes_number, ): # TODO: Check for only new nodes if not inventory_client: # We will wait for leases only if only nodes are created without connection to s utils.wait_till_nodes_are_ready( nodes_count=nodes_number, network_name=nodes_details["libvirt_network_name"], ) log.info("No inventory url, will not wait till nodes registration") return log.info("Wait till nodes will be registered") # In case there is assisted service connection, registration to the cluster in the assisted service # is checked, and not relying on libvirt leases. This overcomes bug in libvirt that does not report # all DHCP leases. statuses = [ consts.NodesStatus.INSUFFICIENT, consts.NodesStatus.PENDING_FOR_INPUT, ] if nodes_details['master_count'] == 1 or is_none_platform_mode(): statuses.append(consts.NodesStatus.KNOWN) if is_ipv4 and is_none_platform_mode( ) and nodes_details['master_count'] > 1: input_interfaces = [args.network_bridge, f"s{args.network_bridge}"] nat_controller = NatController() nat_controller.add_nat_rules(input_interfaces, args.ns_index) utils.wait_till_all_hosts_are_in_status( client=inventory_client, cluster_id=cluster.id, nodes_count=nodes_number, statuses=statuses, )
def day2_nodes_flow(client, terraform_cluster_dir_prefix, tf_folder, cluster, has_ipv_6, num_worker_nodes, api_vip_ip, api_vip_dnsname, install_cluster_flag, day2_type_flag, with_static_network_config, base_cluster_name): tf_network_name, total_num_nodes = get_network_num_nodes_from_tf(tf_folder) with utils.file_lock_context(): utils.run_command( f'make _apply_terraform CLUSTER_NAME={terraform_cluster_dir_prefix}' ) time.sleep(5) if day2_type_flag == "ocp": num_nodes_to_wait = total_num_nodes installed_status = consts.NodesStatus.INSTALLED else: num_nodes_to_wait = num_worker_nodes installed_status = consts.NodesStatus.DAY2_INSTALLED utils.wait_till_nodes_are_ready(nodes_count=num_nodes_to_wait, network_name=tf_network_name) waiting.wait( lambda: utils.are_libvirt_nodes_in_cluster_hosts( client, cluster.id, num_nodes_to_wait), timeout_seconds=consts.NODES_REGISTERED_TIMEOUT, sleep_seconds=10, waiting_for="Nodes to be registered in inventory service", ) set_nodes_hostnames_if_needed(client, tf_folder, with_static_network_config, has_ipv_6, tf_network_name, cluster.id) utils.wait_till_all_hosts_are_in_status( client=client, cluster_id=cluster.id, nodes_count=num_worker_nodes, statuses=[consts.NodesStatus.KNOWN], interval=30, ) if install_cluster_flag: log.info("Start installing all known nodes in the cluster %s", cluster.id) kubeconfig = utils.get_kubeconfig_path(base_cluster_name) ocp_orig_ready_nodes = get_ocp_cluster_ready_nodes_num(kubeconfig) hosts = client.get_cluster_hosts(cluster.id) [ client.install_day2_host(cluster.id, host['id']) for host in hosts if host["status"] == 'known' ] log.info( "Start waiting until all nodes of cluster %s have been installed( reached added-to-existing-clustertate)", cluster.id) utils.wait_till_all_hosts_are_in_status( client=client, cluster_id=cluster.id, nodes_count=num_nodes_to_wait, statuses=[installed_status], interval=30, ) log.info( "Start waiting until installed nodes has actually been added to the OCP cluster" ) waiting.wait(lambda: wait_nodes_join_ocp_cluster( ocp_orig_ready_nodes, num_worker_nodes, day2_type_flag, kubeconfig ), timeout_seconds=consts.NODES_REGISTERED_TIMEOUT, sleep_seconds=30, waiting_for="Day2 nodes to be added to OCP cluster", expected_exceptions=Exception) log.info("%d worker nodes were successfully added to OCP cluster", num_worker_nodes)