def add_nodes(self): """ Add new nodes to the cluster """ # create separate directory for scale-up terraform data scaleup_terraform_data_dir = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR, constants.SCALEUP_TERRAFORM_DATA_DIR) create_directory_path(scaleup_terraform_data_dir) logger.info( f"scale-up terraform data directory: {scaleup_terraform_data_dir}") # git clone repo from openshift-misc clone_repo(constants.VSPHERE_SCALEUP_REPO, self.upi_scale_up_repo_path) # modify scale-up repo self.modify_scaleup_repo() config.ENV_DATA['vsphere_resource_pool'] = config.ENV_DATA.get( "cluster_name") # sync guest time with host if config.ENV_DATA.get('sync_time_with_host'): sync_time_with_host(constants.SCALEUP_VSPHERE_MACHINE_CONF, True) # get the RHCOS worker list self.rhcos_ips = get_node_ips() logger.info(f"RHCOS IP's: {json.dumps(self.rhcos_ips)}") # generate terraform variable for scaling nodes self.generate_terraform_vars_for_scaleup() # Add nodes using terraform scaleup_terraform = Terraform(constants.SCALEUP_VSPHERE_DIR) previous_dir = os.getcwd() os.chdir(scaleup_terraform_data_dir) scaleup_terraform.initialize() scaleup_terraform.apply(self.scale_up_terraform_var) scaleup_terraform_tfstate = os.path.join(scaleup_terraform_data_dir, "terraform.tfstate") out = scaleup_terraform.output(scaleup_terraform_tfstate, "rhel_worker") rhel_worker_nodes = json.loads(out)['value'] logger.info(f"RHEL worker nodes: {rhel_worker_nodes}") os.chdir(previous_dir) # Install OCP on rhel nodes rhel_install = OCPINSTALLRHEL(rhel_worker_nodes) rhel_install.prepare_rhel_nodes() rhel_install.execute_ansible_playbook() # Giving some time to settle down the new nodes time.sleep(self.wait_time) # wait for nodes to be in READY state wait_for_nodes_status(timeout=300)
class OCPDeployment(BaseOCPDeployment): def __init__(self): super(VSPHEREUPI.OCPDeployment, self).__init__() self.public_key = {} self.upi_repo_path = os.path.join( constants.EXTERNAL_DIR, 'installer' ) self.previous_dir = os.getcwd() self.terraform_data_dir = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR) create_directory_path(self.terraform_data_dir) self.terraform_work_dir = constants.VSPHERE_DIR self.terraform = Terraform(self.terraform_work_dir) def deploy_prereq(self): """ Pre-Requisites for vSphere UPI Deployment """ super(VSPHEREUPI.OCPDeployment, self).deploy_prereq() # create ignitions self.create_ignitions() self.kubeconfig = os.path.join(self.cluster_path, config.RUN.get('kubeconfig_location')) # git clone repo from openshift installer clone_openshift_installer() # upload bootstrap ignition to public access server bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.BOOTSTRAP_IGN) remote_path = os.path.join( config.ENV_DATA.get('path_to_upload'), f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}" ) upload_file( config.ENV_DATA.get('httpd_server'), bootstrap_path, remote_path, config.ENV_DATA.get('httpd_server_user'), config.ENV_DATA.get('httpd_server_password') ) # generate bootstrap ignition url path_to_bootstrap_on_remote = remote_path.replace("/var/www/html/", "") bootstrap_ignition_url = ( f"http://{config.ENV_DATA.get('httpd_server')}/" f"{path_to_bootstrap_on_remote}" ) logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}") config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url # load master and worker ignitions to variables master_ignition_path = os.path.join( config.ENV_DATA.get('cluster_path'), constants.MASTER_IGN ) master_ignition = read_file_as_str(f"{master_ignition_path}") config.ENV_DATA['control_plane_ignition'] = master_ignition worker_ignition_path = os.path.join( config.ENV_DATA.get('cluster_path'), constants.WORKER_IGN ) worker_ignition = read_file_as_str(f"{worker_ignition_path}") config.ENV_DATA['compute_ignition'] = worker_ignition cluster_domain = ( f"{config.ENV_DATA.get('cluster_name')}." f"{config.ENV_DATA.get('base_domain')}" ) config.ENV_DATA['cluster_domain'] = cluster_domain # generate terraform variables from template logger.info("Generating terraform variables") _templating = Templating() terraform_var_template = "terraform.tfvars.j2" terraform_var_template_path = os.path.join( "ocp-deployment", terraform_var_template ) terraform_config_str = _templating.render_template( terraform_var_template_path, config.ENV_DATA ) terraform_var_yaml = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR, "terraform.tfvars.yaml" ) with open(terraform_var_yaml, "w") as f: f.write(terraform_config_str) self.terraform_var = convert_yaml2tfvars(terraform_var_yaml) # update the machine configurations update_machine_conf() # sync guest time with host if config.ENV_DATA.get('sync_time_with_host'): sync_time_with_host(constants.INSTALLER_MACHINE_CONF, True) def create_config(self): """ Creates the OCP deploy config for the vSphere """ # Generate install-config from template _templating = Templating() ocp_install_template = ( f"install-config-{self.deployment_platform}-" f"{self.deployment_type}.yaml.j2" ) ocp_install_template_path = os.path.join( "ocp-deployment", ocp_install_template ) install_config_str = _templating.render_template( ocp_install_template_path, config.ENV_DATA ) # Parse the rendered YAML so that we can manipulate the object directly install_config_obj = yaml.safe_load(install_config_str) install_config_obj['pullSecret'] = self.get_pull_secret() install_config_obj['sshKey'] = self.get_ssh_key() install_config_str = yaml.safe_dump(install_config_obj) install_config = os.path.join(self.cluster_path, "install-config.yaml") with open(install_config, "w") as f: f.write(install_config_str) def create_ignitions(self): """ Creates the ignition files """ logger.info("creating ignition files for the cluster") run_cmd( f"{self.installer} create ignition-configs " f"--dir {self.cluster_path} " ) def configure_storage_for_image_registry(self, kubeconfig): """ Configures storage for the image registry """ logger.info("configuring storage for image registry") patch = " '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}' " run_cmd( f"oc --kubeconfig {kubeconfig} patch " f"configs.imageregistry.operator.openshift.io " f"cluster --type merge --patch {patch}" ) def deploy(self, log_cli_level='DEBUG'): """ Deployment specific to OCP cluster on this platform Args: log_cli_level (str): openshift installer's log level (default: "DEBUG") """ logger.info("Deploying OCP cluster for vSphere platform") logger.info( f"Openshift-installer will be using loglevel:{log_cli_level}" ) os.chdir(self.terraform_data_dir) self.terraform.initialize() self.terraform.apply(self.terraform_var) os.chdir(self.previous_dir) logger.info("waiting for bootstrap to complete") try: run_cmd( f"{self.installer} wait-for bootstrap-complete " f"--dir {self.cluster_path} " f"--log-level {log_cli_level}", timeout=3600 ) except CommandFailed as e: if constants.GATHER_BOOTSTRAP_PATTERN in str(e): try: gather_bootstrap() except Exception as ex: logger.error(ex) raise e if not config.DEPLOYMENT['preserve_bootstrap_node']: logger.info("removing bootstrap node") os.chdir(self.terraform_data_dir) self.terraform.apply( self.terraform_var, bootstrap_complete=True ) os.chdir(self.previous_dir) OCP.set_kubeconfig(self.kubeconfig) # wait for all nodes to generate CSR # From OCP version 4.4 and above, we have to approve CSR manually # for all the nodes ocp_version = get_ocp_version() if Version.coerce(ocp_version) >= Version.coerce('4.4'): wait_for_all_nodes_csr_and_approve(timeout=1200, sleep=30) # wait for image registry to show-up co = "image-registry" wait_for_co(co) # patch image registry to null self.configure_storage_for_image_registry(self.kubeconfig) # wait for install to complete logger.info("waiting for install to complete") run_cmd( f"{self.installer} wait-for install-complete " f"--dir {self.cluster_path} " f"--log-level {log_cli_level}", timeout=1800 ) # Approving CSRs here in-case if any exists approve_pending_csr() self.test_cluster()
class OCPDeployment(BaseOCPDeployment): def __init__(self): super(VSPHEREUPI.OCPDeployment, self).__init__() self.public_key = {} self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR, 'installer') self.previous_dir = os.getcwd() self.terraform_data_dir = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR) create_directory_path(self.terraform_data_dir) self.terraform_work_dir = constants.VSPHERE_DIR self.terraform = Terraform(self.terraform_work_dir) def deploy_prereq(self): """ Pre-Requisites for vSphere UPI Deployment """ super(VSPHEREUPI.OCPDeployment, self).deploy_prereq() # create ignitions self.create_ignitions() self.kubeconfig = os.path.join( self.cluster_path, config.RUN.get('kubeconfig_location')) # git clone repo from openshift installer clone_repo(constants.VSPHERE_INSTALLER_REPO, self.upi_repo_path) # upload bootstrap ignition to public access server bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.BOOTSTRAP_IGN) remote_path = os.path.join( config.ENV_DATA.get('path_to_upload'), f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}") upload_file(config.ENV_DATA.get('httpd_server'), bootstrap_path, remote_path, config.ENV_DATA.get('httpd_server_user'), config.ENV_DATA.get('httpd_server_password')) # generate bootstrap ignition url path_to_bootstrap_on_remote = remote_path.replace( "/var/www/html/", "") bootstrap_ignition_url = ( f"http://{config.ENV_DATA.get('httpd_server')}/" f"{path_to_bootstrap_on_remote}") logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}") config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url # load master and worker ignitions to variables master_ignition_path = os.path.join( config.ENV_DATA.get('cluster_path'), constants.MASTER_IGN) master_ignition = read_file_as_str(f"{master_ignition_path}") config.ENV_DATA['control_plane_ignition'] = master_ignition worker_ignition_path = os.path.join( config.ENV_DATA.get('cluster_path'), constants.WORKER_IGN) worker_ignition = read_file_as_str(f"{worker_ignition_path}") config.ENV_DATA['compute_ignition'] = worker_ignition cluster_domain = (f"{config.ENV_DATA.get('cluster_name')}." f"{config.ENV_DATA.get('base_domain')}") config.ENV_DATA['cluster_domain'] = cluster_domain # generate terraform variables from template logger.info("Generating terraform variables") _templating = Templating() terraform_var_template = "terraform.tfvars.j2" terraform_var_template_path = os.path.join("ocp-deployment", terraform_var_template) terraform_config_str = _templating.render_template( terraform_var_template_path, config.ENV_DATA) terraform_var_yaml = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR, "terraform.tfvars.yaml") with open(terraform_var_yaml, "w") as f: f.write(terraform_config_str) self.terraform_var = convert_yaml2tfvars(terraform_var_yaml) # update gateway and DNS if config.ENV_DATA.get('gateway'): replace_content_in_file(constants.INSTALLER_IGNITION, '${cidrhost(var.machine_cidr,1)}', f"{config.ENV_DATA.get('gateway')}") if config.ENV_DATA.get('dns'): replace_content_in_file(constants.INSTALLER_IGNITION, constants.INSTALLER_DEFAULT_DNS, f"{config.ENV_DATA.get('dns')}") # update the zone in route if config.ENV_DATA.get('region'): def_zone = 'provider "aws" { region = "%s" } \n' % config.ENV_DATA.get( 'region') replace_content_in_file(constants.INSTALLER_ROUTE53, "xyz", def_zone) # increase memory if config.ENV_DATA.get('memory'): replace_content_in_file(constants.INSTALLER_MACHINE_CONF, '${var.memory}', config.ENV_DATA.get('memory')) # increase CPUs worker_num_cpus = config.ENV_DATA.get('worker_num_cpus') master_num_cpus = config.ENV_DATA.get('master_num_cpus') if worker_num_cpus or master_num_cpus: with open(constants.VSPHERE_MAIN, 'r') as fd: obj = hcl.load(fd) if worker_num_cpus: obj['module']['compute']['num_cpu'] = worker_num_cpus if master_num_cpus: obj['module']['control_plane'][ 'num_cpu'] = master_num_cpus # Dump data to json file since hcl module # doesn't support dumping of data in HCL format dump_data_to_json(obj, f"{constants.VSPHERE_MAIN}.json") os.rename(constants.VSPHERE_MAIN, f"{constants.VSPHERE_MAIN}.backup") def create_config(self): """ Creates the OCP deploy config for the vSphere """ # Generate install-config from template _templating = Templating() ocp_install_template = ( f"install-config-{self.deployment_platform}-" f"{self.deployment_type}.yaml.j2") ocp_install_template_path = os.path.join("ocp-deployment", ocp_install_template) install_config_str = _templating.render_template( ocp_install_template_path, config.ENV_DATA) # Parse the rendered YAML so that we can manipulate the object directly install_config_obj = yaml.safe_load(install_config_str) install_config_obj['pullSecret'] = self.get_pull_secret() install_config_obj['sshKey'] = self.get_ssh_key() install_config_str = yaml.safe_dump(install_config_obj) install_config = os.path.join(self.cluster_path, "install-config.yaml") with open(install_config, "w") as f: f.write(install_config_str) def create_ignitions(self): """ Creates the ignition files """ logger.info("creating ignition files for the cluster") run_cmd(f"{self.installer} create ignition-configs " f"--dir {self.cluster_path} ") def configure_storage_for_image_registry(self, kubeconfig): """ Configures storage for the image registry """ logger.info("configuring storage for image registry") patch = " '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}' " run_cmd(f"oc --kubeconfig {kubeconfig} patch " f"configs.imageregistry.operator.openshift.io " f"cluster --type merge --patch {patch}") def deploy(self, log_cli_level='DEBUG'): """ Deployment specific to OCP cluster on this platform Args: log_cli_level (str): openshift installer's log level (default: "DEBUG") """ logger.info("Deploying OCP cluster for vSphere platform") logger.info( f"Openshift-installer will be using loglevel:{log_cli_level}") os.chdir(self.terraform_data_dir) self.terraform.initialize() self.terraform.apply(self.terraform_var) os.chdir(self.previous_dir) logger.info("waiting for bootstrap to complete") run_cmd( f"{self.installer} wait-for bootstrap-complete " f"--dir {self.cluster_path} " f"--log-level {log_cli_level}", timeout=3600) logger.info("removing bootstrap node") os.chdir(self.terraform_data_dir) self.terraform.apply(self.terraform_var, bootstrap_complete=True) os.chdir(self.previous_dir) OCP.set_kubeconfig(self.kubeconfig) # wait for image registry to show-up co = "image-registry" wait_for_co(co) # patch image registry to null self.configure_storage_for_image_registry(self.kubeconfig) # wait for install to complete logger.info("waiting for install to complete") run_cmd( f"{self.installer} wait-for install-complete " f"--dir {self.cluster_path} " f"--log-level {log_cli_level}", timeout=1800) self.test_cluster()
class OCPDeployment(BaseOCPDeployment): def __init__(self): super(VSPHEREUPI.OCPDeployment, self).__init__() self.public_key = {} self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR, "installer") self.previous_dir = os.getcwd() # get OCP version ocp_version = get_ocp_version() # create terraform_data directory self.terraform_data_dir = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR) create_directory_path(self.terraform_data_dir) # Download terraform binary based on ocp version and # update the installer path in ENV_DATA # use "0.11.14" for releases below OCP 4.5 terraform_version = config.DEPLOYMENT["terraform_version"] terraform_installer = get_terraform(version=terraform_version) config.ENV_DATA["terraform_installer"] = terraform_installer # Download terraform ignition provider # ignition provider dependancy from OCP 4.6 if Version.coerce(ocp_version) >= Version.coerce("4.6"): get_terraform_ignition_provider(self.terraform_data_dir) # Initialize Terraform self.terraform_work_dir = constants.VSPHERE_DIR self.terraform = Terraform(self.terraform_work_dir) self.folder_structure = False if Version.coerce(ocp_version) >= Version.coerce("4.5"): self.folder_structure = True config.ENV_DATA["folder_structure"] = self.folder_structure def deploy_prereq(self): """ Pre-Requisites for vSphere UPI Deployment """ super(VSPHEREUPI.OCPDeployment, self).deploy_prereq() # generate manifests self.generate_manifests() # create ignitions self.create_ignitions() self.kubeconfig = os.path.join( self.cluster_path, config.RUN.get("kubeconfig_location")) self.terraform_var = os.path.join( config.ENV_DATA["cluster_path"], constants.TERRAFORM_DATA_DIR, "terraform.tfvars", ) # git clone repo from openshift installer clone_openshift_installer() # generate terraform variable file generate_terraform_vars_and_update_machine_conf() # sync guest time with host vm_file = (constants.VM_MAIN if self.folder_structure else constants.INSTALLER_MACHINE_CONF) if config.ENV_DATA.get("sync_time_with_host"): sync_time_with_host(vm_file, True) def create_config(self): """ Creates the OCP deploy config for the vSphere """ # Generate install-config from template _templating = Templating() ocp_install_template = ( f"install-config-{self.deployment_platform}-" f"{self.deployment_type}.yaml.j2") ocp_install_template_path = os.path.join("ocp-deployment", ocp_install_template) install_config_str = _templating.render_template( ocp_install_template_path, config.ENV_DATA) # Parse the rendered YAML so that we can manipulate the object directly install_config_obj = yaml.safe_load(install_config_str) install_config_obj["pullSecret"] = self.get_pull_secret() install_config_obj["sshKey"] = self.get_ssh_key() install_config_str = yaml.safe_dump(install_config_obj) install_config = os.path.join(self.cluster_path, "install-config.yaml") with open(install_config, "w") as f: f.write(install_config_str) def generate_manifests(self): """ Generates manifest files """ logger.info("creating manifest files for the cluster") run_cmd( f"{self.installer} create manifests --dir {self.cluster_path}") # remove machines and machinesets # Some of the manifests produced are for creating machinesets # and machine objects. We should remove these, because we don't # want to involve the machine-API operator and # machine-api-operator during install. manifest_files_path = os.path.join(self.cluster_path, "openshift") files_to_remove = glob.glob( f"{manifest_files_path}/99_openshift-cluster-api_" f"master-machines-*.yaml") files_to_remove.extend( glob.glob(f"{manifest_files_path}/99_openshift-cluster-api_" f"worker-machineset-*.yaml")) logger.debug( f"Removing machines and machineset files: {files_to_remove}") for each_file in files_to_remove: os.remove(each_file) def create_ignitions(self): """ Creates the ignition files """ logger.info("creating ignition files for the cluster") run_cmd(f"{self.installer} create ignition-configs " f"--dir {self.cluster_path} ") @retry(exceptions.CommandFailed, tries=10, delay=30, backoff=1) def configure_storage_for_image_registry(self, kubeconfig): """ Configures storage for the image registry """ logger.info("configuring storage for image registry") patch = ' \'{"spec":{"storage":{"emptyDir":{}}}}\' ' run_cmd(f"oc --kubeconfig {kubeconfig} patch " f"configs.imageregistry.operator.openshift.io " f"cluster --type merge --patch {patch}") def deploy(self, log_cli_level="DEBUG"): """ Deployment specific to OCP cluster on this platform Args: log_cli_level (str): openshift installer's log level (default: "DEBUG") """ logger.info("Deploying OCP cluster for vSphere platform") logger.info( f"Openshift-installer will be using loglevel:{log_cli_level}") os.chdir(self.terraform_data_dir) self.terraform.initialize() self.terraform.apply(self.terraform_var) os.chdir(self.previous_dir) logger.info("waiting for bootstrap to complete") try: run_cmd( f"{self.installer} wait-for bootstrap-complete " f"--dir {self.cluster_path} " f"--log-level {log_cli_level}", timeout=3600, ) except CommandFailed as e: if constants.GATHER_BOOTSTRAP_PATTERN in str(e): try: gather_bootstrap() except Exception as ex: logger.error(ex) raise e if self.folder_structure: # comment bootstrap module comment_bootstrap_in_lb_module() # remove bootstrap IP in load balancer and # restart haproxy lb = LoadBalancer() lb.remove_boostrap_in_proxy() lb.restart_haproxy() # remove bootstrap node if not config.DEPLOYMENT["preserve_bootstrap_node"]: logger.info("removing bootstrap node") os.chdir(self.terraform_data_dir) if self.folder_structure: self.terraform.destroy_module(self.terraform_var, constants.BOOTSTRAP_MODULE) else: self.terraform.apply(self.terraform_var, bootstrap_complete=True) os.chdir(self.previous_dir) OCP.set_kubeconfig(self.kubeconfig) # wait for all nodes to generate CSR # From OCP version 4.4 and above, we have to approve CSR manually # for all the nodes ocp_version = get_ocp_version() if Version.coerce(ocp_version) >= Version.coerce("4.4"): wait_for_all_nodes_csr_and_approve(timeout=1200, sleep=30) # wait for image registry to show-up co = "image-registry" wait_for_co(co) # patch image registry to null self.configure_storage_for_image_registry(self.kubeconfig) # wait for install to complete logger.info("waiting for install to complete") run_cmd( f"{self.installer} wait-for install-complete " f"--dir {self.cluster_path} " f"--log-level {log_cli_level}", timeout=1800, ) # Approving CSRs here in-case if any exists approve_pending_csr() self.test_cluster()
def add_nodes(self): """ Add new nodes to the cluster """ # create separate directory for scale-up terraform data scaleup_terraform_data_dir = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR, constants.SCALEUP_TERRAFORM_DATA_DIR, ) create_directory_path(scaleup_terraform_data_dir) logger.info( f"scale-up terraform data directory: {scaleup_terraform_data_dir}") # git clone repo from openshift-misc clone_repo(constants.VSPHERE_SCALEUP_REPO, self.upi_scale_up_repo_path) # git clone repo from cluster-launcher clone_repo(constants.VSPHERE_CLUSTER_LAUNCHER, self.cluster_launcer_repo_path) helpers = VSPHEREHELPERS() helpers.modify_scaleup_repo() config.ENV_DATA["vsphere_resource_pool"] = config.ENV_DATA.get( "cluster_name") # sync guest time with host sync_time_with_host_file = constants.SCALEUP_VSPHERE_MACHINE_CONF if config.ENV_DATA["folder_structure"]: sync_time_with_host_file = os.path.join( constants.CLUSTER_LAUNCHER_VSPHERE_DIR, f"aos-{get_ocp_version(seperator='_')}", constants.CLUSTER_LAUNCHER_MACHINE_CONF, ) if config.ENV_DATA.get("sync_time_with_host"): sync_time_with_host(sync_time_with_host_file, True) # get the RHCOS worker list rhcos_ips = get_node_ips() logger.info(f"RHCOS IP's: {json.dumps(rhcos_ips)}") # generate terraform variable for scaling nodes self.scale_up_terraform_var = helpers.generate_terraform_vars_for_scaleup( rhcos_ips) # choose the vsphere_dir based on OCP version # generate cluster_info and config yaml files # for OCP version greater than 4.4 vsphere_dir = constants.SCALEUP_VSPHERE_DIR rhel_module = "rhel-worker" if Version.coerce(self.ocp_version) >= Version.coerce("4.5"): vsphere_dir = os.path.join( constants.CLUSTER_LAUNCHER_VSPHERE_DIR, f"aos-{get_ocp_version('_')}", "vsphere", ) helpers.generate_cluster_info() helpers.generate_config_yaml() rhel_module = "RHEL_WORKER_LIST" # Add nodes using terraform scaleup_terraform = Terraform(vsphere_dir) previous_dir = os.getcwd() os.chdir(scaleup_terraform_data_dir) scaleup_terraform.initialize() scaleup_terraform.apply(self.scale_up_terraform_var) scaleup_terraform_tfstate = os.path.join(scaleup_terraform_data_dir, "terraform.tfstate") out = scaleup_terraform.output(scaleup_terraform_tfstate, rhel_module) if config.ENV_DATA["folder_structure"]: rhel_worker_nodes = out.strip().replace('"', "").split(",") else: rhel_worker_nodes = json.loads(out)["value"] logger.info(f"RHEL worker nodes: {rhel_worker_nodes}") os.chdir(previous_dir) # Install OCP on rhel nodes rhel_install = OCPINSTALLRHEL(rhel_worker_nodes) rhel_install.prepare_rhel_nodes() rhel_install.execute_ansible_playbook() # Giving some time to settle down the new nodes time.sleep(self.wait_time) # wait for nodes to be in READY state wait_for_nodes_status(timeout=300)