def update_config(tf_config: TerraformConfig = TerraformConfig(), cluster_config: ClusterConfig = ClusterConfig(), operators=None): if operators is None: operators = parse_olm_operators_from_env() tf_config.worker_memory = resource_param(tf_config.worker_memory, OperatorResource.WORKER_MEMORY_KEY, operators) tf_config.master_memory = resource_param(tf_config.master_memory, OperatorResource.MASTER_MEMORY_KEY, operators) tf_config.worker_vcpu = resource_param(tf_config.worker_vcpu, OperatorResource.WORKER_VCPU_KEY, operators) tf_config.master_vcpu = resource_param(tf_config.master_vcpu, OperatorResource.MASTER_VCPU_KEY, operators) tf_config.workers_count = resource_param(tf_config.workers_count, OperatorResource.WORKER_COUNT_KEY, operators) tf_config.worker_disk = resource_param(tf_config.worker_disk, OperatorResource.WORKER_DISK_KEY, operators) tf_config.master_disk = resource_param(tf_config.master_disk, OperatorResource.MASTER_DISK_KEY, operators) tf_config.master_disk_count = resource_param(tf_config.master_disk_count, OperatorResource.MASTER_DISK_COUNT_KEY, operators) tf_config.worker_disk_count = resource_param(tf_config.worker_disk_count, OperatorResource.WORKER_DISK_COUNT_KEY, operators) cluster_config.workers_count = resource_param(cluster_config.workers_count, OperatorResource.WORKER_COUNT_KEY, operators) cluster_config.nodes_count = cluster_config.masters_count + cluster_config.workers_count cluster_config.olm_operators = [operators]
def get_nodes_func(config: TerraformConfig = TerraformConfig()): if "nodes" in nodes_data: return nodes_data["nodes"] nodes_data["needs_nat"] = config.platform == consts.Platforms.NONE nodes_data["net_asset"] = NetworkAssets() config.net_asset = nodes_data["net_asset"].get() nodes = Nodes(TerraformController(config), config.private_ssh_key_path) nodes.prepare_nodes() if nodes_data["needs_nat"]: nodes.configure_nat() nodes_data["nodes"] = nodes return nodes
def get_nodes_func(config: TerraformConfig = TerraformConfig()): if "nodes" in nodes_data: return nodes_data["nodes"] net_asset = NetworkAssets() config.net_asset = net_asset.get() nodes = Nodes(TerraformController(config), config.private_ssh_key_path, net_asset, config.platform == consts.Platforms.NONE) nodes.prepare_nodes() nodes_data["nodes"] = nodes return nodes
def test_kube_api_ipv4(self, kube_api_context, get_nodes): tf_config = TerraformConfig(masters_count=1, workers_count=0, master_vcpu=8, master_memory=35840) cluster_config = ClusterConfig() kube_api_test(kube_api_context, get_nodes(tf_config), cluster_config)
def test_kube_api_ipv4(self, kube_api_context, get_nodes): tf_config = TerraformConfig(masters_count=1, workers_count=0, bootstrap_in_place=True, master_vcpu=8, master_memory=35840) kube_api_test(kube_api_context, get_nodes(tf_config))
def new_controller_configuration(self) -> BaseNodeConfig: """ Creates the controller configuration object according to the platform. Override this fixture in your test class to provide a custom configuration object :rtype: new node controller configuration """ if global_variables.platform == consts.Platforms.VSPHERE: return VSphereControllerConfig() return TerraformConfig()
def gather_sosreport_data(output_dir: str): sosreport_output = os.path.join(output_dir, "sosreport") recreate_folder(sosreport_output) controller = LibvirtController(config=TerraformConfig(), entity_config=ClusterConfig()) run_concurrently( jobs=[(gather_sosreport_from_node, node, sosreport_output) for node in controller.list_nodes()], timeout=60 * 20, )
def test_olm_operator(self, get_nodes, get_cluster, operators, update_olm_config): new_cluster = get_cluster( cluster_config=ClusterConfig(olm_operators=[operators]), nodes=get_nodes( update_olm_config(config=TerraformConfig(), operators=operators))) new_cluster.prepare_for_installation() new_cluster.start_install_and_wait_for_installed() assert new_cluster.is_operator_in_status(operators, OperatorStatus.AVAILABLE)
def create_controller(net_asset): return TerraformController( TerraformConfig( masters_count=1, workers_count=0, master_memory=45 * 1024, # in megabytes master_vcpu=16, net_asset=net_asset, bootstrap_in_place=True, single_node_ip=net_asset.machine_cidr.replace("0/24", "10"), ), entity_config=ClusterConfig( cluster_name=ClusterName(prefix="test-infra-cluster", suffix="")))
def create_controller(net_asset): return TerraformController( TerraformConfig( cluster_name="test-infra-cluster", masters_count=1, workers_count=0, master_memory=45 * 1024, # in megabytes master_vcpu=16, net_asset=net_asset, iso_download_path="<TBD>", # will be set later on bootstrap_in_place=True, single_node_ip=net_asset.machine_cidr.replace("0/24", "10"), ))
def new_controller_configuration( self, request: FixtureRequest) -> BaseNodeConfig: """ Creates the controller configuration object according to the platform. Override this fixture in your test class to provide a custom configuration object :rtype: new node controller configuration """ if global_variables.platform == consts.Platforms.VSPHERE: config = VSphereConfig() else: config = TerraformConfig() self.update_parameterized(request, config) yield config
def new_controller_configuration(self, request) -> BaseNodeConfig: # Adjust the Terraform configuration according to whether we're # doing SNO or SNO + Worker if request.function == self.test_bootstrap_in_place_sno: return TerraformConfig( masters_count=1, workers_count=0, master_memory=16 * consts.MiB_UNITS, master_vcpu=16, bootstrap_in_place=True, ) elif request.function == self.test_bip_add_worker: return TerraformConfig( masters_count=1, workers_count=1, master_memory=16 * consts.MiB_UNITS, master_vcpu=16, worker_memory=8 * consts.MiB_UNITS, worker_vcpu=16, bootstrap_in_place=True, running=False, ) else: raise ValueError(f"Unexpected test {request.function}")
def new_controller_configuration(self, request) -> BaseNodeConfig: """ Creates the controller configuration object according to the platform. Override this fixture in your test class to provide a custom configuration object :rtype: new node controller configuration """ if global_variables.platform == consts.Platforms.VSPHERE: config = VSphereControllerConfig() else: config = TerraformConfig() with suppress(FixtureLookupError): operators = request.getfixturevalue("olm_operators") self.update_olm_configuration(config, operators) return config
def test_kube_api_ipv6(self, kube_api_context, proxy_server, get_nodes): tf_config = TerraformConfig(masters_count=1, workers_count=0, master_vcpu=8, master_memory=35840, is_ipv6=True) cluster_config = ClusterConfig( service_network_cidr='2003:db8::/112', cluster_network_cidr='2002:db8::/53', cluster_network_host_prefix=64, is_ipv6=True, ) kube_api_test(kube_api_context, get_nodes(tf_config), cluster_config, proxy_server, is_ipv4=False)
def get_nodes_func(tf_config: TerraformConfig, cluster_config: ClusterConfig): if "nodes" in nodes_data: return nodes_data["nodes"] nodes_data["configs"] = cluster_config, tf_config net_asset = LibvirtNetworkAssets() tf_config.net_asset = net_asset.get() nodes_data["net_asset"] = net_asset controller = TerraformController(tf_config, cluster_config=cluster_config) nodes = Nodes(controller, tf_config.private_ssh_key_path) nodes_data["nodes"] = nodes nodes.prepare_nodes() interfaces = BaseTest.nat_interfaces(tf_config) nat = NatController(interfaces, NatController.get_namespace_index(interfaces[0])) nat.add_nat_rules() nodes_data["nat"] = nat return nodes
def get_nodes_func(config: Optional[TerraformConfig] = None): if not config: config = TerraformConfig() if "nodes" in nodes_data: return nodes_data["nodes"] net_asset = LibvirtNetworkAssets() config.net_asset = net_asset.get() controller = TerraformController(config) nodes = Nodes(controller, config.private_ssh_key_path) nodes.prepare_nodes() interfaces = BaseTest.nat_interfaces(config) nat = NatController( interfaces, NatController.get_namespace_index(interfaces[0])) nat.add_nat_rules() nodes_data["nodes"] = nodes nodes_data["config"] = config nodes_data["net_asset"] = net_asset nodes_data["nat"] = nat return nodes
def configs(self) -> Tuple[ClusterConfig, TerraformConfig]: """ Get configurations objects - while using configs fixture cluster and tf configs are the same For creating new Config object just call it explicitly e.g. ClusterConfig(masters_count=1) """ yield ClusterConfig(), TerraformConfig()
def terraform_config(self) -> TerraformConfig: yield TerraformConfig()
def test_olm_operator(self, get_nodes, get_cluster, olm_operator): new_cluster = get_cluster(cluster_config=ClusterConfig(olm_operators=[olm_operator]), nodes=get_nodes(TerraformConfig(olm_operators=[olm_operator]))) new_cluster.prepare_for_installation() new_cluster.start_install_and_wait_for_installed()
def start_install_and_wait_for_installed(self): cluster_name = self.config.day1_cluster_name # Running twice as a workaround for an issue with terraform not spawning a new node on first apply. for _ in range(2): with utils.file_lock_context(): utils.run_command( f"make _apply_terraform CLUSTER_NAME={cluster_name} PLATFORM={consts.Platforms.BARE_METAL}" ) time.sleep(5) num_nodes_to_wait = self.config.day2_workers_count installed_status = consts.NodesStatus.DAY2_INSTALLED tfvars = utils.get_tfvars(self.config.tf_folder) tf_network_name = tfvars["libvirt_network_name"] config = TerraformConfig() config.nodes_count = num_nodes_to_wait libvirt_controller = LibvirtController(config=config, entity_config=ClusterConfig()) libvirt_controller.wait_till_nodes_are_ready( network_name=tf_network_name) # Wait for day2 nodes waiting.wait( lambda: self.are_libvirt_nodes_in_cluster_hosts(), timeout_seconds=consts.NODES_REGISTERED_TIMEOUT, sleep_seconds=10, waiting_for="Nodes to be registered in inventory service", ) self.set_nodes_hostnames_if_needed(tf_network_name) wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.config.cluster_id, nodes_count=self.config.day2_workers_count, statuses=[consts.NodesStatus.KNOWN], interval=30, ) # Start day2 nodes installation log.info("Start installing all known nodes in the cluster %s", self.config.cluster_id) kubeconfig = utils.get_kubeconfig_path(self.config.day1_cluster_name) ocp_ready_nodes = self.get_ocp_cluster_ready_nodes_num(kubeconfig) hosts = self.api_client.get_cluster_hosts(self.config.cluster_id) [ self.api_client.install_day2_host(self.config.infra_env_id, host["id"]) for host in hosts if host["status"] == "known" ] log.info( "Waiting until all nodes of cluster %s have been installed (reached added-to-existing-cluster)", self.config.cluster_id, ) wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.config.cluster_id, nodes_count=num_nodes_to_wait, statuses=[installed_status], interval=30, ) log.info( "Waiting until installed nodes has actually been added to the OCP cluster" ) waiting.wait( lambda: self.wait_nodes_join_ocp_cluster( ocp_ready_nodes, self.config.day2_workers_count, kubeconfig), timeout_seconds=consts.NODES_REGISTERED_TIMEOUT, sleep_seconds=30, waiting_for="Day2 nodes to be added to OCP cluster", expected_exceptions=Exception, ) log.info("%d worker nodes were successfully added to OCP cluster", self.config.day2_workers_count)