def cluster(self, api_client: InventoryClient, request: FixtureRequest, infra_env_configuration: InfraEnvConfig, proxy_server, prepare_nodes_network: Nodes, cluster_configuration: ClusterConfig): logging.debug( f'--- SETUP --- Creating cluster for test: {request.node.name}\n') cluster = Cluster(api_client=api_client, config=cluster_configuration, infra_env_config=infra_env_configuration, nodes=prepare_nodes_network) if self._does_need_proxy_server(prepare_nodes_network): self._set_up_proxy_server(cluster, cluster_configuration, proxy_server) yield cluster if BaseTest._is_test_failed(request): logging.info( f'--- TEARDOWN --- Collecting Logs for test: {request.node.name}\n' ) self.collect_test_logs(cluster, api_client, request, cluster.nodes) if global_variables.test_teardown: if cluster.is_installing() or cluster.is_finalizing(): cluster.cancel_install() with suppress(ApiException): logging.info( f'--- TEARDOWN --- deleting created cluster {cluster.id}\n' ) cluster.delete()
def get_cluster_func(cluster_name: Optional[str] = None): if not cluster_name: cluster_name = random_name() res = Cluster(api_client=api_client, cluster_name=cluster_name) clusters.append(res) return res
def get_cluster_func(cluster_config: ClusterConfig = ClusterConfig()): if not cluster_config.cluster_name: cluster_config.cluster_name = env_variables.get( 'cluster_name', infra_utils.get_random_name(length=10)) res = Cluster(api_client=api_client, config=cluster_config) clusters.append(res) return res
def get_cluster_func(cluster_name: Optional[str] = None): if not cluster_name: cluster_name = infra_utils.get_random_name(length=10) res = Cluster(api_client=api_client, cluster_name=cluster_name) clusters.append(res) return res
def get_cluster_func(api_client, cluster_name: Optional[str] = None, cluster_id: Optional[str] = None): res = Cluster(api_client=api_client, cluster_name=cluster_name, cluster_id=cluster_id) clusters.append(res) return res
def get_cluster_func(nodes: Nodes, cluster_config: ClusterConfig) -> Cluster: logging.debug(f'--- SETUP --- Creating cluster for test: {request.node.name}\n') _cluster = Cluster(api_client=api_client, config=cluster_config, nodes=nodes) if cluster_config.is_ipv6: self._set_up_proxy_server(_cluster, cluster_config, proxy_server) clusters.append(_cluster) return _cluster
def _set_up_proxy_server(cluster: Cluster, cluster_config, proxy_server): proxy_name = "squid-" + cluster_config.cluster_name.suffix port = infra_utils.scan_for_free_port(consts.DEFAULT_PROXY_SERVER_PORT) host_ip = str(IPNetwork(cluster.nodes.controller.get_machine_cidr()).ip + 1) no_proxy = ",".join([cluster.nodes.controller.get_machine_cidr(), cluster_config.service_network_cidr, cluster_config.cluster_network_cidr, f".{str(cluster_config.cluster_name)}.redhat.com"]) # todo cluster.config will be property as part of MGMT-7060 - need to replace cluster._config.is_ipv6 with # cluster.config.is_ipv6 proxy = proxy_server(name=proxy_name, port=port, dir=proxy_name, host_ip=host_ip, is_ipv6=cluster._config.is_ipv6) cluster.set_proxy_values(http_proxy=proxy.address, https_proxy=proxy.address, no_proxy=no_proxy) install_config = cluster.get_install_config() proxy_details = install_config.get("proxy") assert proxy_details and proxy_details.get("httpProxy") == proxy.address assert proxy_details.get("httpsProxy") == proxy.address
def get_cluster_func(cluster_name: Optional[str] = None, additional_ntp_source: Optional[str] = consts. DEFAULT_ADDITIONAL_NTP_SOURCE): if not cluster_name: cluster_name = infra_utils.get_random_name(length=10) res = Cluster(api_client=api_client, cluster_name=cluster_name, additional_ntp_source=additional_ntp_source) clusters.append(res) return res
def set_iptables_rules_for_nodes( cluster: Cluster, iptables_rules: List[IptableRule], given_nodes=None, ): given_node_ips = [] given_nodes = given_nodes or cluster.nodes.nodes if cluster.download_image: cluster.generate_and_download_infra_env( iso_download_path=cluster.iso_download_path) cluster.nodes.start_given(given_nodes) for node in given_nodes: given_node_ips.append(node.ips[0]) cluster.nodes.shutdown_given(given_nodes) logging.info(f'Given node ips: {given_node_ips}') for _rule in iptables_rules: _rule.add_sources(given_node_ips) rules.append(_rule) _rule.insert()
def get_cluster_func(cluster_name: Optional[str] = None, additional_ntp_source: Optional[str] = consts. DEFAULT_ADDITIONAL_NTP_SOURCE, openshift_version: Optional[str] = env_variables[ 'openshift_version']): if not cluster_name: cluster_name = infra_utils.get_random_name(length=10) res = Cluster(api_client=api_client, cluster_name=cluster_name, additional_ntp_source=additional_ntp_source, openshift_version=openshift_version) clusters.append(res) return res
def _set_up_proxy_server(cluster: Cluster, cluster_config: ClusterConfig, proxy_server): proxy_name = "squid-" + cluster_config.cluster_name.suffix port = infra_utils.scan_for_free_port(consts.DEFAULT_PROXY_SERVER_PORT) machine_cidr = cluster.get_primary_machine_cidr() host_ip = str(IPNetwork(machine_cidr).ip + 1) no_proxy = [] no_proxy += [ str(cluster_network.cidr) for cluster_network in cluster_config.cluster_networks ] no_proxy += [ str(service_network.cidr) for service_network in cluster_config.service_networks ] no_proxy += [machine_cidr] no_proxy += [f".{str(cluster_config.cluster_name)}.redhat.com"] no_proxy = ",".join(no_proxy) proxy = proxy_server(name=proxy_name, port=port, dir=proxy_name, host_ip=host_ip, is_ipv6=cluster.nodes.is_ipv6) cluster_proxy_values = models.Proxy(http_proxy=proxy.address, https_proxy=proxy.address, no_proxy=no_proxy) cluster.set_proxy_values(proxy_values=cluster_proxy_values) install_config = cluster.get_install_config() proxy_details = install_config.get("proxy") or install_config.get( "Proxy") assert proxy_details, str(install_config) assert ( proxy_details.get("httpsProxy") == proxy.address ), f"{proxy_details.get('httpsProxy')} should equal {proxy.address}"
def get_cluster_func(nodes: Nodes, cluster_config: ClusterConfig) -> Cluster: logging.debug( f'--- SETUP --- Creating cluster for test: {request.node.name}\n' ) _cluster = Cluster(api_client=api_client, config=cluster_config, nodes=nodes, infra_env_config=infra_env_configuration) if self._does_need_proxy_server(nodes): self._set_up_proxy_server(_cluster, cluster_config, proxy_server) clusters.append(_cluster) return _cluster
def get_cluster_func(cluster_name: Optional[str] = None, additional_ntp_source: Optional[str] = consts.DEFAULT_ADDITIONAL_NTP_SOURCE, openshift_version: Optional[str] = env_variables['openshift_version'], user_managed_networking=False, high_availability_mode=consts.HighAvailabilityMode.FULL, olm_operators=env_variables['olm_operators']): if not cluster_name: cluster_name = env_variables.get('cluster_name', infra_utils.get_random_name(length=10)) res = Cluster(api_client=api_client, cluster_name=cluster_name, additional_ntp_source=additional_ntp_source, openshift_version=openshift_version, user_managed_networking=user_managed_networking, high_availability_mode=high_availability_mode, olm_operators=olm_operators) clusters.append(res) return res