def set_hostnames(self, cluster): ipv6 = env_variables.get('ipv6') static_ips_config = env_variables.get('static_ips_config') if ipv6 or static_ips_config: # When using IPv6 with libvirt, hostnames are not set automatically by DHCP. Therefore, we must find out # the hostnames using terraform's tfstate file. In case of static ip, the hostname is localhost and must be set # to valid hostname network_name = self.controller.params.libvirt_network_name libvirt_nodes = utils.get_libvirt_nodes_from_tf_state( network_name, self.controller.tf.get_state()) utils.update_hosts(cluster.api_client, cluster.id, libvirt_nodes, update_hostnames=True)
def start_all(self): static_ips_config = env_variables.get('static_ips_config') if static_ips_config: skip_ips = False else: skip_ips = True self.run_for_all_nodes("start", skip_ips)
def prepare_for_install( self, nodes, iso_download_path=env_variables['iso_download_path'], iso_image_type=env_variables['iso_image_type'], ssh_key=env_variables['ssh_public_key'], nodes_count=env_variables['num_nodes'], vip_dhcp_allocation=env_variables['vip_dhcp_allocation'], download_image=True): if download_image: if env_variables.get('static_network_config'): static_network_config = static_network.generate_static_network_data_from_tf( nodes.controller.tf_folder) else: static_network_config = None self.generate_and_download_image( iso_download_path=iso_download_path, iso_image_type=iso_image_type, ssh_key=ssh_key, static_network_config=static_network_config) nodes.start_all() self.wait_until_hosts_are_discovered(nodes_count=nodes_count, allow_insufficient=True) nodes.set_hostnames(self) if self._high_availability_mode != consts.HighAvailabilityMode.NONE: self.set_host_roles() else: nodes.set_single_node_ip(self) self.set_network_params( controller=nodes.controller, vip_dhcp_allocation=vip_dhcp_allocation, ) self.wait_for_ready_to_install()
def prepare_for_install( self, nodes, iso_download_path=env_variables['iso_download_path'], iso_image_type=env_variables['iso_image_type'], ssh_key=env_variables['ssh_public_key'], nodes_count=env_variables['num_nodes'], vip_dhcp_allocation=env_variables['vip_dhcp_allocation'], download_image=True): if download_image: if env_variables.get('static_ips_config'): static_ips_config = static_ips.generate_static_ips_data_from_tf( nodes.controller.tf_folder) else: static_ips_config = None self.generate_and_download_image( iso_download_path=iso_download_path, iso_image_type=iso_image_type, ssh_key=ssh_key, static_ips=static_ips_config) nodes.start_all() self.wait_until_hosts_are_discovered(nodes_count=nodes_count) nodes.set_hostnames(self) self.set_host_roles() self.set_network_params( controller=nodes.controller, vip_dhcp_allocation=vip_dhcp_allocation, ) self.wait_for_ready_to_install()
def get_cluster_func(cluster_config: ClusterConfig = ClusterConfig()): if not cluster_config.cluster_name: cluster_config.cluster_name = env_variables.get( 'cluster_name', infra_utils.get_random_name(length=10)) res = Cluster(api_client=api_client, config=cluster_config) clusters.append(res) return res
def __init__(self, api_client, cluster_name=None, additional_ntp_source=None, openshift_version="4.7", cluster_id=None, user_managed_networking=False, high_availability_mode=consts.HighAvailabilityMode.FULL, olm_operators=None): self.api_client = api_client self._high_availability_mode = high_availability_mode if cluster_id: self.id = cluster_id else: cluster_name = cluster_name or env_variables.get( 'cluster_name', "test-infra-cluster") self.id = self._create( cluster_name, additional_ntp_source, openshift_version, user_managed_networking=user_managed_networking, high_availability_mode=high_availability_mode, olm_operators=olm_operators).id self.name = cluster_name
def collect_test_logs(self, cluster, api_client, test: pytest.Function, nodes: Nodes): log_dir_name = f"{env_variables['log_folder']}/{test.name}" with suppress(ApiException): cluster_details = json.loads(json.dumps(cluster.get_details().to_dict(), sort_keys=True, default=str)) download_logs(api_client, cluster_details, log_dir_name, test.result_call.failed, pull_secret=env_variables.get("pull_secret")) self._collect_virsh_logs(nodes, log_dir_name) self._collect_journalctl(nodes, log_dir_name)
def get_cluster_func(cluster_name: Optional[str] = None, additional_ntp_source: Optional[str] = consts.DEFAULT_ADDITIONAL_NTP_SOURCE, openshift_version: Optional[str] = env_variables['openshift_version'], user_managed_networking=False, high_availability_mode=consts.HighAvailabilityMode.FULL, olm_operators=env_variables['olm_operators']): if not cluster_name: cluster_name = env_variables.get('cluster_name', infra_utils.get_random_name(length=10)) res = Cluster(api_client=api_client, cluster_name=cluster_name, additional_ntp_source=additional_ntp_source, openshift_version=openshift_version, user_managed_networking=user_managed_networking, high_availability_mode=high_availability_mode, olm_operators=olm_operators) clusters.append(res) return res
from tests.conftest import env_variables CRD_API_GROUP = 'agent-install.openshift.io' CRD_API_VERSION = 'v1beta1' HIVE_API_GROUP = 'hive.openshift.io' HIVE_API_VERSION = 'v1' DEFAULT_API_VIP = env_variables.get('api_vip', '') DEFAULT_API_VIP_DNS_NAME = env_variables.get('api_vip_dns_name', '') DEFAULT_INGRESS_VIP = env_variables.get('ingress_vip', '') DEFAULT_MACHINE_CIDR = env_variables.get('machine_cidr', '') DEFAULT_CLUSTER_CIDR = env_variables.get('cluster_cidr', '172.30.0.0/16') DEFAULT_SERVICE_CIDR = env_variables.get('service_cidr', '10.128.0.0/14') _MINUTE = 60 _HOUR = 60 * _MINUTE DEFAULT_WAIT_FOR_CRD_STATUS_TIMEOUT = 5 * _MINUTE DEFAULT_WAIT_FOR_CRD_STATE_TIMEOUT = 5 * _MINUTE DEFAULT_WAIT_FOR_AGENTS_TIMEOUT = 5 * _MINUTE DEFAULT_WAIT_FOR_INSTALLATION_COMPLETE_TIMEOUT = 2 * _HOUR DEFAULT_WAIT_FOR_ISO_URL_TIMEOUT = 5 * _MINUTE