def get_cluster_func(cluster_config: ClusterConfig = ClusterConfig()): if not cluster_config.cluster_name: cluster_config.cluster_name = env_variables.get( 'cluster_name', infra_utils.get_random_name(length=10)) res = Cluster(api_client=api_client, config=cluster_config) clusters.append(res) return res
def get_cluster_func(cluster_name: Optional[str] = None): if not cluster_name: cluster_name = infra_utils.get_random_name(length=10) res = Cluster(api_client=api_client, cluster_name=cluster_name) clusters.append(res) return res
def get_cluster_func(cluster_name: Optional[str] = None, additional_ntp_source: Optional[str] = consts. DEFAULT_ADDITIONAL_NTP_SOURCE): if not cluster_name: cluster_name = infra_utils.get_random_name(length=10) res = Cluster(api_client=api_client, cluster_name=cluster_name, additional_ntp_source=additional_ntp_source) clusters.append(res) return res
def get_cluster_func(cluster_name: Optional[str] = None, additional_ntp_source: Optional[str] = consts. DEFAULT_ADDITIONAL_NTP_SOURCE, openshift_version: Optional[str] = env_variables[ 'openshift_version']): if not cluster_name: cluster_name = infra_utils.get_random_name(length=10) res = Cluster(api_client=api_client, cluster_name=cluster_name, additional_ntp_source=additional_ntp_source, openshift_version=openshift_version) clusters.append(res) return res
def get_cluster_func(cluster_name: Optional[str] = None, additional_ntp_source: Optional[str] = consts.DEFAULT_ADDITIONAL_NTP_SOURCE, openshift_version: Optional[str] = env_variables['openshift_version'], user_managed_networking=False, high_availability_mode=consts.HighAvailabilityMode.FULL, olm_operators=env_variables['olm_operators']): if not cluster_name: cluster_name = env_variables.get('cluster_name', infra_utils.get_random_name(length=10)) res = Cluster(api_client=api_client, cluster_name=cluster_name, additional_ntp_source=additional_ntp_source, openshift_version=openshift_version, user_managed_networking=user_managed_networking, high_availability_mode=high_availability_mode, olm_operators=olm_operators) clusters.append(res) return res
def cluster_deployment_context(kube_api_client: ApiClient, name: Optional[str] = None, **kwargs) -> ContextManager[ClusterDeployment]: """ Used by tests as pytest fixture, this contextmanager function yields a ClusterDeployment CRD that is deployed and registered to assisted service, alongside to a Secret resource. When exiting context the resources are deleted and deregistered from the service. """ if not name: name = get_random_name(length=8) cluster_deployment = deploy_default_cluster_deployment( kube_api_client, name, **kwargs) try: yield cluster_deployment finally: delete_cluster_deployment(cluster_deployment)
utils.get_env('CLUSTER_NAME', f'{consts.CLUSTER_PREFIX}'), "private_ssh_key_path": utils.get_env('PRIVATE_KEY_PATH', private_ssh_key_path_default), "kubeconfig_path": utils.get_env('KUBECONFIG', ''), "log_folder": utils.get_env('LOG_FOLDER', consts.LOG_FOLDER), "service_cidr": utils.get_env('SERVICE_CIDR', '172.30.0.0/16'), "cluster_cidr": utils.get_env('CLUSTER_CIDR', '10.128.0.0/14'), "host_prefix": int(utils.get_env('HOST_PREFIX', '23')) } cluster_mid_name = infra_utils.get_random_name() # Tests running on terraform parallel must have unique ISO file if not qe_env: image = utils.get_env( 'ISO', os.path.join( consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-{cluster_mid_name}-' f'installer-image.iso')).strip() env_variables[ "kubeconfig_path"] = f'/tmp/test_kubeconfig_{cluster_mid_name}' else: image = utils.get_env('ISO', os.path.join(consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-installer-image.iso')).\ strip()