def comment_bootstrap_in_lb_module(): """ Commenting the bootstrap module in vsphere main.tf """ logger.debug(f"Commenting bootstrap module in {constants.VSPHERE_MAIN}") replace_str = "module.ipam_bootstrap.ip_addresses[0]" replace_content_in_file(constants.VSPHERE_MAIN, replace_str, f"//{replace_str}")
def modify_scaleup_repo(self): """ Modify the scale-up repo. Considering the user experience, removing the access and secret keys and variable from appropriate location in the scale-up repo """ # remove access and secret key from constants.SCALEUP_VSPHERE_MAIN access_key = 'access_key = "${var.aws_access_key}"' secret_key = 'secret_key = "${var.aws_secret_key}"' replace_content_in_file(constants.SCALEUP_VSPHERE_MAIN, f"{access_key}", " ") replace_content_in_file(constants.SCALEUP_VSPHERE_MAIN, f"{secret_key}", " ") # remove access and secret key from constants.SCALEUP_VSPHERE_ROUTE53 route53_access_key = 'access_key = "${var.access_key}"' route53_secret_key = 'secret_key = "${var.secret_key}"' replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53, f"{route53_access_key}", " ") replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53, f"{route53_secret_key}", " ") replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53, "us-east-1", f"{config.ENV_DATA.get('region')}") # remove access and secret variables from scale-up repo remove_keys_from_tf_variable_file(constants.SCALEUP_VSPHERE_VARIABLES, ['aws_access_key', 'aws_secret_key']) remove_keys_from_tf_variable_file( constants.SCALEUP_VSPHERE_ROUTE53_VARIABLES, ['access_key', 'secret_key'])
def generate_terraform_vars_for_scaleup(self, rhcos_ips): """ Generates the terraform variables file for scaling nodes """ self.scale_up_terraform_dir = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR, constants.SCALEUP_TERRAFORM_DATA_DIR, ) scale_up_terraform_var_yaml = os.path.join( self.scale_up_terraform_dir, "scale_up_terraform.tfvars.yaml") config.ENV_DATA["cluster_info_path"] = self.scale_up_terraform_dir config.ENV_DATA["credentials_path"] = self.scale_up_terraform_dir if self.folder_structure: logger.info("Generating terraform variables for " "scaling nodes with folder structure") scale_up_terraform_var_template_with_folder_structure = ( "scale_up_terraform_with_folder_structure.tfvars.j2") scale_up_terraform_var_template_path_with_folder_structure = os.path.join( "ocp-deployment", scale_up_terraform_var_template_with_folder_structure) scale_up_terraform_config_str_with_folder_structure = ( self._templating.render_template( scale_up_terraform_var_template_path_with_folder_structure, config.ENV_DATA, )) with open(scale_up_terraform_var_yaml, "w") as f: f.write(scale_up_terraform_config_str_with_folder_structure) scale_up_terraform_var = convert_yaml2tfvars( scale_up_terraform_var_yaml) replace_content_in_file(scale_up_terraform_var, "None", "") else: logger.info("Generating terraform variables for scaling" " nodes without folder structure") scale_up_terraform_var_template = "scale_up_terraform.tfvars.j2" scale_up_terraform_var_template_path = os.path.join( "ocp-deployment", scale_up_terraform_var_template) scale_up_terraform_config_str = self._templating.render_template( scale_up_terraform_var_template_path, config.ENV_DATA) with open(scale_up_terraform_var_yaml, "w") as f: f.write(scale_up_terraform_config_str) scale_up_terraform_var = convert_yaml2tfvars( scale_up_terraform_var_yaml) # append RHCOS ip list to terraform variable file with open(scale_up_terraform_var, "a+") as fd: fd.write(f"rhcos_list = {json.dumps(rhcos_ips)}") logger.info( f"scale-up terraform variable file: {scale_up_terraform_var}") return scale_up_terraform_var
def update_zone(): """ Updates the zone in constants.INSTALLER_ROUTE53 """ # update the zone in route if config.ENV_DATA.get('region'): def_zone = 'provider "aws" { region = "%s" } \n' % config.ENV_DATA.get('region') replace_content_in_file(constants.INSTALLER_ROUTE53, "xyz", def_zone)
def update_path(): """ Updates Path to var.folder in resource vsphere_folder """ logger.debug(f"Updating path to var.folder in {constants.VSPHERE_MAIN}") replace_str = "path = var.cluster_id" replace_content_in_file(constants.VSPHERE_MAIN, replace_str, "path = var.folder")
def modify_haproxyservice(): """ Add ExecStop in haproxy service """ to_change = "TimeoutStartSec=0" execstop = f"{to_change}\nExecStop=/bin/podman rm -f haproxy" replace_content_in_file(constants.TERRAFORM_HAPROXY_SERVICE, to_change, execstop)
def update_dns(): """ Updates the DNS """ # update DNS if config.ENV_DATA.get('dns'): replace_content_in_file(constants.INSTALLER_IGNITION, constants.INSTALLER_DEFAULT_DNS, f"{config.ENV_DATA.get('dns')}")
def sync_time_with_host(machine_file, enable=False): """ Syncs the guest time with host Args: machine_file (str): machine file to sync the guest time with host enable (bool): True to sync guest time with host """ to_change = 'enable_disk_uuid = "true"' sync_time = f"{to_change} sync_time_with_host = \"{enable}\"" replace_content_in_file(machine_file, to_change, sync_time)
def update_gw(str_to_replace, config_file): """ Updates the gateway Args: str_to_replace (str): string to replace in config file config_file (str): file to replace the string """ # update gateway if config.ENV_DATA.get("gateway"): replace_content_in_file(config_file, str_to_replace, f"{config.ENV_DATA.get('gateway')}")
def change_vm_root_disk_size(machine_file): """ Change the root disk size of VM from constants.CURRENT_VM_ROOT_DISK_SIZE to constants.VM_ROOT_DISK_SIZE Args: machine_file (str): machine file to change the disk size """ disk_size_prefix = "size = " current_vm_root_disk_size = f"{disk_size_prefix}{constants.CURRENT_VM_ROOT_DISK_SIZE}" vm_root_disk_size = f"{disk_size_prefix}{constants.VM_ROOT_DISK_SIZE}" replace_content_in_file(machine_file, current_vm_root_disk_size, vm_root_disk_size)
def sync_time_with_host(machine_file, enable=False): """ Syncs the guest time with host Args: machine_file (str): machine file to sync the guest time with host enable (bool): True to sync guest time with host """ # terraform will support only lowercase bool enable = str(enable).lower() to_change = 'enable_disk_uuid = "true"' sync_time = f'{to_change}\n sync_time_with_host = "{enable}"' replace_content_in_file(machine_file, to_change, sync_time)
def update_gw_and_dns(): """ Updates the gateway and DNS """ # update gateway if config.ENV_DATA.get('gateway'): replace_content_in_file( constants.INSTALLER_IGNITION, '${cidrhost(var.machine_cidr,1)}', f"{config.ENV_DATA.get('gateway')}" ) # update DNS if config.ENV_DATA.get('dns'): replace_content_in_file( constants.INSTALLER_IGNITION, constants.INSTALLER_DEFAULT_DNS, f"{config.ENV_DATA.get('dns')}" )
def deploy_prereq(self): """ Pre-Requisites for vSphere UPI Deployment """ super(VSPHEREUPI.OCPDeployment, self).deploy_prereq() # create ignitions self.create_ignitions() self.kubeconfig = os.path.join( self.cluster_path, config.RUN.get('kubeconfig_location')) # git clone repo from openshift installer clone_repo(constants.VSPHERE_INSTALLER_REPO, self.upi_repo_path) # upload bootstrap ignition to public access server bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.BOOTSTRAP_IGN) remote_path = os.path.join( config.ENV_DATA.get('path_to_upload'), f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}") upload_file(config.ENV_DATA.get('httpd_server'), bootstrap_path, remote_path, config.ENV_DATA.get('httpd_server_user'), config.ENV_DATA.get('httpd_server_password')) # generate bootstrap ignition url path_to_bootstrap_on_remote = remote_path.replace( "/var/www/html/", "") bootstrap_ignition_url = ( f"http://{config.ENV_DATA.get('httpd_server')}/" f"{path_to_bootstrap_on_remote}") logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}") config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url # load master and worker ignitions to variables master_ignition_path = os.path.join( config.ENV_DATA.get('cluster_path'), constants.MASTER_IGN) master_ignition = read_file_as_str(f"{master_ignition_path}") config.ENV_DATA['control_plane_ignition'] = master_ignition worker_ignition_path = os.path.join( config.ENV_DATA.get('cluster_path'), constants.WORKER_IGN) worker_ignition = read_file_as_str(f"{worker_ignition_path}") config.ENV_DATA['compute_ignition'] = worker_ignition cluster_domain = (f"{config.ENV_DATA.get('cluster_name')}." f"{config.ENV_DATA.get('base_domain')}") config.ENV_DATA['cluster_domain'] = cluster_domain # generate terraform variables from template logger.info("Generating terraform variables") _templating = Templating() terraform_var_template = "terraform.tfvars.j2" terraform_var_template_path = os.path.join("ocp-deployment", terraform_var_template) terraform_config_str = _templating.render_template( terraform_var_template_path, config.ENV_DATA) terraform_var_yaml = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR, "terraform.tfvars.yaml") with open(terraform_var_yaml, "w") as f: f.write(terraform_config_str) self.terraform_var = convert_yaml2tfvars(terraform_var_yaml) # update gateway and DNS if config.ENV_DATA.get('gateway'): replace_content_in_file(constants.INSTALLER_IGNITION, '${cidrhost(var.machine_cidr,1)}', f"{config.ENV_DATA.get('gateway')}") if config.ENV_DATA.get('dns'): replace_content_in_file(constants.INSTALLER_IGNITION, constants.INSTALLER_DEFAULT_DNS, f"{config.ENV_DATA.get('dns')}") # update the zone in route if config.ENV_DATA.get('region'): def_zone = 'provider "aws" { region = "%s" } \n' % config.ENV_DATA.get( 'region') replace_content_in_file(constants.INSTALLER_ROUTE53, "xyz", def_zone) # increase memory if config.ENV_DATA.get('memory'): replace_content_in_file(constants.INSTALLER_MACHINE_CONF, '${var.memory}', config.ENV_DATA.get('memory')) # increase CPUs worker_num_cpus = config.ENV_DATA.get('worker_num_cpus') master_num_cpus = config.ENV_DATA.get('master_num_cpus') if worker_num_cpus or master_num_cpus: with open(constants.VSPHERE_MAIN, 'r') as fd: obj = hcl.load(fd) if worker_num_cpus: obj['module']['compute']['num_cpu'] = worker_num_cpus if master_num_cpus: obj['module']['control_plane'][ 'num_cpu'] = master_num_cpus # Dump data to json file since hcl module # doesn't support dumping of data in HCL format dump_data_to_json(obj, f"{constants.VSPHERE_MAIN}.json") os.rename(constants.VSPHERE_MAIN, f"{constants.VSPHERE_MAIN}.backup")
def generate_cluster_info(self): """ Generates the cluster information file """ logger.info("Generating cluster information file") # get kubeconfig and upload to httpd server kubeconfig = os.path.join(self.cluster_path, config.RUN.get('kubeconfig_location')) remote_path = os.path.join(config.ENV_DATA.get('path_to_upload'), f"{config.RUN.get('run_id')}_kubeconfig") upload_file(config.ENV_DATA.get('httpd_server'), kubeconfig, remote_path, config.ENV_DATA.get('httpd_server_user'), config.ENV_DATA.get('httpd_server_password')) # Form the kubeconfig url path kubeconfig_url_path = os.path.join( 'http://', config.ENV_DATA.get('httpd_server'), remote_path.lstrip('/var/www/html/')) config.ENV_DATA['kubeconfig_url'] = kubeconfig_url_path # get the infra_id infra_id = get_infra_id(self.cluster_path) config.ENV_DATA['infra_id'] = infra_id # get the cluster id cluster_id = get_cluster_id(self.cluster_path) config.ENV_DATA['cluster_id'] = cluster_id # fetch the installer version installer_version_str = run_cmd( f"{config.RUN['bin_dir']}/openshift-install version") installer_version = installer_version_str.split()[1] config.ENV_DATA['installer_version'] = installer_version # get the major and minor version of OCP version_obj = Version(installer_version) ocp_version_x = version_obj.major ocp_version_y = version_obj.minor config.ENV_DATA['ocp_version_x'] = ocp_version_x config.ENV_DATA['ocp_version_y'] = ocp_version_y # generate the cluster info yaml file terraform_var_template = "cluster_info.yaml.j2" terraform_var_template_path = os.path.join("ocp-deployment", terraform_var_template) terraform_config_str = self._templating.render_template( terraform_var_template_path, config.ENV_DATA) terraform_var_yaml = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR, constants.SCALEUP_TERRAFORM_DATA_DIR, "cluster_info.yaml") with open(terraform_var_yaml, "w") as f: f.write(terraform_config_str) # config.ENV_DATA['dns_server'] = config.ENV_DATA['dns'] template_vars = (f"\"dns_server: {config.ENV_DATA['dns']}" f"\\nremove_rhcos_worker: 'yes'\\n\"") replace_content_in_file(terraform_var_yaml, "PLACEHOLDER", template_vars) logger.info(f"cluster yaml file: {terraform_var_yaml}")
def modify_scaleup_repo(self): """ Modify the scale-up repo. Considering the user experience, removing the access and secret keys and variable from appropriate location in the scale-up repo """ # importing here to avoid circular dependancy from ocs_ci.deployment.vmware import change_vm_root_disk_size if self.folder_structure: logger.info("Modifying scaleup repo for folder structure") # modify default_map.yaml default_map_path = os.path.join( constants.CLUSTER_LAUNCHER_VSPHERE_DIR, f"aos-{self.ocp_version}", "default_map.yaml") dict_data = load_yaml(default_map_path) dict_data['cluster_domain'] = config.ENV_DATA['base_domain'] dict_data['vsphere']['vcsa-qe']['datacenter'] = ( config.ENV_DATA['vsphere_datacenter']) dict_data['vsphere']['vcsa-qe']['datastore'] = ( config.ENV_DATA['vsphere_datastore']) dict_data['vsphere']['vcsa-qe']['network'] = ( config.ENV_DATA['vm_network']) dict_data['vsphere']['vcsa-qe']['cpus'] = ( config.ENV_DATA['rhel_num_cpus']) dict_data['vsphere']['vcsa-qe']['memory'] = ( config.ENV_DATA['rhel_memory']) dict_data['vsphere']['vcsa-qe']['root_volume_size'] = ( config.ENV_DATA.get('root_disk_size', '120')) dict_data['vsphere']['vcsa-qe']['image'] = ( config.ENV_DATA['rhel_template']) dump_data_to_temp_yaml(dict_data, default_map_path) else: # remove access and secret key from constants.SCALEUP_VSPHERE_MAIN access_key = 'access_key = "${var.aws_access_key}"' secret_key = 'secret_key = "${var.aws_secret_key}"' replace_content_in_file(constants.SCALEUP_VSPHERE_MAIN, f"{access_key}", " ") replace_content_in_file(constants.SCALEUP_VSPHERE_MAIN, f"{secret_key}", " ") # remove access and secret key from constants.SCALEUP_VSPHERE_ROUTE53 route53_access_key = 'access_key = "${var.access_key}"' route53_secret_key = 'secret_key = "${var.secret_key}"' replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53, f"{route53_access_key}", " ") replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53, f"{route53_secret_key}", " ") replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53, "us-east-1", f"{config.ENV_DATA.get('region')}") # remove access and secret variables from scale-up repo remove_keys_from_tf_variable_file( constants.SCALEUP_VSPHERE_VARIABLES, ['aws_access_key', 'aws_secret_key']) remove_keys_from_tf_variable_file( constants.SCALEUP_VSPHERE_ROUTE53_VARIABLES, ['access_key', 'secret_key']) # change root disk size change_vm_root_disk_size(constants.SCALEUP_VSPHERE_MACHINE_CONF)
def modify_scaleup_repo(self): """ Modify the scale-up repo. Considering the user experience, removing the access and secret keys and variable from appropriate location in the scale-up repo """ # importing here to avoid circular dependancy from ocs_ci.deployment.vmware import change_vm_root_disk_size if self.folder_structure: logger.info("Modifying scaleup repo for folder structure") # modify default_map.yaml default_map_path = os.path.join( constants.CLUSTER_LAUNCHER_VSPHERE_DIR, f"aos-{self.ocp_version}", "default_map.yaml", ) dict_data = load_yaml(default_map_path) dict_data["cluster_domain"] = config.ENV_DATA["base_domain"] dict_data["vsphere"]["vcsa-qe"]["datacenter"] = config.ENV_DATA[ "vsphere_datacenter"] dict_data["vsphere"]["vcsa-qe"]["datastore"] = config.ENV_DATA[ "vsphere_datastore"] dict_data["vsphere"]["vcsa-qe"]["network"] = config.ENV_DATA[ "vm_network"] dict_data["vsphere"]["vcsa-qe"]["cpus"] = config.ENV_DATA[ "rhel_num_cpus"] dict_data["vsphere"]["vcsa-qe"]["memory"] = config.ENV_DATA[ "rhel_memory"] dict_data["vsphere"]["vcsa-qe"][ "root_volume_size"] = config.ENV_DATA.get( "root_disk_size", "120") if version.get_semantic_ocp_version_from_config( ) >= version.VERSION_4_9: dict_data["vsphere"]["vcsa-qe"]["image_7"] = config.ENV_DATA[ "rhel_template"] else: dict_data["vsphere"]["vcsa-qe"]["image"] = config.ENV_DATA[ "rhel_template"] dump_data_to_temp_yaml(dict_data, default_map_path) else: # remove access and secret key from constants.SCALEUP_VSPHERE_MAIN access_key = 'access_key = "${var.aws_access_key}"' secret_key = 'secret_key = "${var.aws_secret_key}"' replace_content_in_file(constants.SCALEUP_VSPHERE_MAIN, f"{access_key}", " ") replace_content_in_file(constants.SCALEUP_VSPHERE_MAIN, f"{secret_key}", " ") # remove access and secret key from constants.SCALEUP_VSPHERE_ROUTE53 route53_access_key = 'access_key = "${var.access_key}"' route53_secret_key = 'secret_key = "${var.secret_key}"' replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53, f"{route53_access_key}", " ") replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53, f"{route53_secret_key}", " ") replace_content_in_file( constants.SCALEUP_VSPHERE_ROUTE53, "us-east-1", f"{config.ENV_DATA.get('region')}", ) # remove access and secret variables from scale-up repo remove_keys_from_tf_variable_file( constants.SCALEUP_VSPHERE_VARIABLES, ["aws_access_key", "aws_secret_key"], ) remove_keys_from_tf_variable_file( constants.SCALEUP_VSPHERE_ROUTE53_VARIABLES, ["access_key", "secret_key"], ) # change root disk size change_vm_root_disk_size(constants.SCALEUP_VSPHERE_MACHINE_CONF)