def kube_api_test_prepare_late_binding_infraenv(
        self, kube_api_context: KubeAPIContext, nodes: Nodes, infraenv_config: InfraEnvConfig
    ):
        api_client = kube_api_context.api_client
        spoke_namespace = kube_api_context.spoke_namespace
        infraenv_name = infraenv_config.entity_name.get()
        infraenv_config.iso_download_path = utils.get_iso_download_path(infraenv_name)
        nodes.prepare_nodes()

        spoke_namespace = spoke_namespace
        secret = Secret(api_client, f"{infraenv_name}-secret", spoke_namespace)
        secret.create(pull_secret=infraenv_config.pull_secret)

        ignition_config_override = None

        infra_env = InfraEnv(api_client, f"{infraenv_name}-infra-env", spoke_namespace)
        infra_env.create(
            cluster_deployment=None,
            ignition_config_override=ignition_config_override,
            secret=secret,
            proxy=None,
            ssh_pub_key=infraenv_config.ssh_public_key,
        )

        agents = self.start_nodes(nodes, infra_env, infraenv_config, infraenv_config.is_static_ip)

        log.info("Waiting for agent status verification")
        Agent.wait_for_agents_to_be_ready_for_install(agents)

        return infra_env
Exemplo n.º 2
0
        def get_nodes_func(tf_config: BaseTerraformConfig,
                           infraenv_config: InfraEnvConfig):
            if "nodes" in nodes_data:
                return nodes_data["nodes"]

            nodes_data["configs"] = infraenv_config, tf_config

            net_asset = LibvirtNetworkAssets()
            tf_config.net_asset = net_asset.get()
            nodes_data["net_asset"] = net_asset

            controller = TerraformController(tf_config,
                                             entity_config=infraenv_config)
            nodes = Nodes(controller)
            nodes_data["nodes"] = nodes

            nodes.prepare_nodes()

            interfaces = self.nat_interfaces(tf_config)
            nat = NatController(
                interfaces, NatController.get_namespace_index(interfaces[0]))
            nat.add_nat_rules()

            nodes_data["nat"] = nat

            return nodes
Exemplo n.º 3
0
 def prepare_nodes(self, nodes: Nodes,
                   cluster_configuration: ClusterConfig) -> Nodes:
     try:
         nodes.prepare_nodes()
         yield nodes
     finally:
         if global_variables.test_teardown:
             log.info("--- TEARDOWN --- node controller\n")
             nodes.destroy_all_nodes()
             log.info(
                 f"--- TEARDOWN --- deleting iso file from: {cluster_configuration.iso_download_path}\n"
             )
             utils.run_command(
                 f"rm -f {cluster_configuration.iso_download_path}",
                 shell=True)
Exemplo n.º 4
0
 def prepare_infraenv_nodes(
         self, infraenv_nodes: Nodes,
         infra_env_configuration: InfraEnvConfig) -> Nodes:
     try:
         infraenv_nodes.prepare_nodes()
         yield infraenv_nodes
     finally:
         if global_variables.test_teardown:
             log.info("--- TEARDOWN --- node controller\n")
             infraenv_nodes.destroy_all_nodes()
             log.info(
                 f"--- TEARDOWN --- deleting iso file from: {infra_env_configuration.iso_download_path}\n"
             )
             utils.run_command(
                 f"rm -f {infra_env_configuration.iso_download_path}",
                 shell=True)
    def kube_api_test(
        self,
        kube_api_context: KubeAPIContext,
        nodes: Nodes,
        cluster_config: ClusterConfig,
        prepared_controller_configuration: BaseNodeConfig,
        infra_env_configuration: BaseInfraEnvConfig,
        proxy_server: Optional[Callable] = None,
        *,
        is_disconnected: bool = False,
    ):
        cluster_name = cluster_config.cluster_name.get()
        api_client = kube_api_context.api_client
        spoke_namespace = kube_api_context.spoke_namespace

        # TODO resolve it from the service if the node controller doesn't have this information
        #  (please see cluster.get_primary_machine_cidr())

        agent_cluster_install = AgentClusterInstall(
            api_client, f"{cluster_name}-agent-cluster-install", spoke_namespace
        )

        secret = Secret(api_client, f"{cluster_name}-secret", spoke_namespace)
        secret.create(pull_secret=cluster_config.pull_secret)

        cluster_deployment = ClusterDeployment(api_client, cluster_name, spoke_namespace)
        cluster_deployment.create(agent_cluster_install_ref=agent_cluster_install.ref, secret=secret)
        proxy = self.setup_proxy(nodes, cluster_config, proxy_server)

        if is_disconnected:
            log.info("getting ignition and install config override for disconnected install")
            ca_bundle = self.get_ca_bundle_from_hub(spoke_namespace)
            self.patch_install_config_with_ca_bundle(cluster_deployment, ca_bundle)
            ignition_config_override = self.get_ignition_config_override(ca_bundle)
        else:
            ignition_config_override = None

        infra_env = InfraEnv(api_client, f"{cluster_name}-infra-env", spoke_namespace)
        infraenv = infra_env.create(
            cluster_deployment, secret, proxy, ignition_config_override, ssh_pub_key=cluster_config.ssh_public_key
        )
        cluster_config.iso_download_path = utils.get_iso_download_path(infraenv.get("metadata", {}).get("name"))
        nodes.prepare_nodes()

        agent_cluster_install.create(
            cluster_deployment_ref=cluster_deployment.ref,
            image_set_ref=self.deploy_image_set(cluster_name, api_client),
            cluster_cidr=cluster_config.cluster_networks[0].cidr,
            host_prefix=cluster_config.cluster_networks[0].host_prefix,
            service_network=cluster_config.service_networks[0].cidr,
            ssh_pub_key=cluster_config.ssh_public_key,
            hyperthreading=cluster_config.hyperthreading,
            control_plane_agents=nodes.masters_count,
            worker_agents=nodes.workers_count,
            proxy=proxy.as_dict() if proxy else {},
        )

        agent_cluster_install.wait_to_be_ready(ready=False)

        if infra_env_configuration.is_static_ip:
            self.apply_static_network_config(kube_api_context, nodes, cluster_name)

        agents = self.start_nodes(nodes, infra_env, cluster_config, infra_env_configuration.is_static_ip)

        if len(nodes) == 1:
            # for single node set the cidr and take the actual ip from the host
            # the vips is the ip of the host
            self._set_agent_cluster_install_machine_cidr(agent_cluster_install, nodes)
            # wait till the ip is set for the node and read it from its inventory
            self.set_single_node_ip(cluster_deployment, nodes)
            api_vip = ingress_vip = get_ip_for_single_node(cluster_deployment, nodes.is_ipv4)
        else:
            # for multi node allocate 2 address at a safe distance from the beginning
            # of the available address block to allow enough addresses for workers
            access_vips = nodes.controller.get_ingress_and_api_vips()
            api_vip = access_vips["api_vip"]
            ingress_vip = access_vips["ingress_vip"]
            # patch the aci with the vips. The cidr will be derived from the range
            agent_cluster_install.set_api_vip(api_vip)
            agent_cluster_install.set_ingress_vip(ingress_vip)

        nodes.controller.set_dns(api_ip=api_vip, ingress_ip=ingress_vip)

        log.info("Waiting for install")
        self._wait_for_install(agent_cluster_install, agents)
    def capi_test(
        self,
        kube_api_context: KubeAPIContext,
        nodes: Nodes,
        cluster_config: ClusterConfig,
        is_static_ip: bool,
        proxy_server: Optional[Callable] = None,
        *,
        is_disconnected: bool = False,
    ):
        cluster_name = cluster_config.cluster_name.get()
        api_client = kube_api_context.api_client
        spoke_namespace = kube_api_context.spoke_namespace
        cluster_config.iso_download_path = utils.get_iso_download_path(cluster_name)
        nodes.prepare_nodes()

        secret = Secret(api_client, f"{cluster_name}-secret", spoke_namespace)
        secret.create(pull_secret=cluster_config.pull_secret)

        if is_disconnected:
            log.info("getting igntion and install config override for disconected install")
            ca_bundle = self.get_ca_bundle_from_hub(spoke_namespace)
            ignition_config_override = self.get_ignition_config_override(ca_bundle)
        else:
            ignition_config_override = None

        proxy = self.setup_proxy(nodes, cluster_config, proxy_server)

        infra_env = InfraEnv(api_client, f"{cluster_name}-infra-env", spoke_namespace)
        infra_env.create(
            cluster_deployment=None,
            ignition_config_override=ignition_config_override,
            secret=secret,
            proxy=proxy,
            ssh_pub_key=cluster_config.ssh_public_key,
        )
        self.start_nodes(nodes, infra_env, cluster_config, is_static_ip)
        hypershift = HyperShift(name=cluster_name, kube_api_client=api_client)

        with utils.pull_secret_file() as ps:
            with tempfile.NamedTemporaryFile(mode="w") as f:
                f.write(cluster_config.ssh_public_key)
                f.flush()
                ssh_public_key_file = f.name
                hypershift.create(
                    pull_secret_file=ps,
                    agent_namespace=spoke_namespace,
                    provider_image=os.environ.get("PROVIDER_IMAGE", ""),
                    hypershift_cpo_image=os.environ.get("HYPERSHIFT_IMAGE", ""),
                    release_image=os.environ.get("OPENSHIFT_INSTALL_RELEASE_IMAGE", ""),
                    ssh_key=ssh_public_key_file,
                )

        hypershift.wait_for_control_plane_ready()
        # WORKAROUND for ovn on minikube
        secret = Secret(api_client, "ovn-master-metrics-cert", hypershift.namespace)
        secret.create_with_data(secret_data={"ca_cert": "dummy data, we only need this secret to exists"})

        cluster_deployment = ClusterDeployment(api_client, cluster_name, f"clusters-{cluster_name}")

        def _cluster_deployment_installed() -> bool:
            return cluster_deployment.get().get("spec", {}).get("installed")

        waiting.wait(
            _cluster_deployment_installed,
            sleep_seconds=1,
            timeout_seconds=60,
            waiting_for="clusterDeployment to get created",
            expected_exceptions=Exception,
        )
        hypershift.wait_for_control_plane_ready()
        self.set_node_count_and_wait_for_ready_nodes(cluster_deployment, hypershift, spoke_namespace, node_count=1)
        self.set_node_count_and_wait_for_ready_nodes(cluster_deployment, hypershift, spoke_namespace, node_count=2)
        self.scale_down_nodepool_and_wait_for_unbounded_agent(
            cluster_deployment, hypershift, spoke_namespace, node_count=1
        )