def change_mem_and_cpu(): """ Increase CPUs and memory for nodes """ worker_num_cpus = config.ENV_DATA.get('worker_num_cpus') master_num_cpus = config.ENV_DATA.get('master_num_cpus') worker_memory = config.ENV_DATA.get('compute_memory') master_memory = config.ENV_DATA.get('master_memory') if ( worker_num_cpus or master_num_cpus or master_memory or worker_memory ): with open(constants.VSPHERE_MAIN, 'r') as fd: obj = hcl.load(fd) if worker_num_cpus: obj['module']['compute']['num_cpu'] = worker_num_cpus if master_num_cpus: obj['module']['control_plane']['num_cpu'] = master_num_cpus if worker_memory: obj['module']['compute']['memory'] = worker_memory if master_memory: obj['module']['control_plane']['memory'] = master_memory # Dump data to json file since hcl module # doesn't support dumping of data in HCL format dump_data_to_json(obj, f"{constants.VSPHERE_MAIN}.json") os.rename(constants.VSPHERE_MAIN, f"{constants.VSPHERE_MAIN}.backup")
def remove_keys_from_tf_variable_file(tf_file, keys): """ Removes the keys from the tf files and convert to json format Args: tf_file (str): path to tf file keys (list): list of keys to remove """ # importing here to avoid dependencies from ocs_ci.utility.templating import dump_data_to_json with open(tf_file, 'r') as fd: obj = hcl.load(fd) for key in keys: obj['variable'].pop(key) dump_data_to_json(obj, f"{tf_file}.json") os.rename(tf_file, f"{tf_file}.backup")
def change_mem_and_cpu(): """ Increase CPUs and memory for nodes """ worker_num_cpus = config.ENV_DATA.get("worker_num_cpus") master_num_cpus = config.ENV_DATA.get("master_num_cpus") worker_memory = config.ENV_DATA.get("compute_memory") master_memory = config.ENV_DATA.get("master_memory") if worker_num_cpus or master_num_cpus or master_memory or worker_memory: with open(constants.VSPHERE_MAIN, "r") as fd: obj = hcl.load(fd) if worker_num_cpus: obj["module"]["compute"]["num_cpu"] = worker_num_cpus if master_num_cpus: obj["module"]["control_plane"]["num_cpu"] = master_num_cpus if worker_memory: obj["module"]["compute"]["memory"] = worker_memory if master_memory: obj["module"]["control_plane"]["memory"] = master_memory # Dump data to json file since hcl module # doesn't support dumping of data in HCL format dump_data_to_json(obj, f"{constants.VSPHERE_MAIN}.json") os.rename(constants.VSPHERE_MAIN, f"{constants.VSPHERE_MAIN}.backup")
def deploy_prereq(self): """ Pre-Requisites for vSphere UPI Deployment """ super(VSPHEREUPI.OCPDeployment, self).deploy_prereq() # create ignitions self.create_ignitions() self.kubeconfig = os.path.join( self.cluster_path, config.RUN.get('kubeconfig_location')) # git clone repo from openshift installer clone_repo(constants.VSPHERE_INSTALLER_REPO, self.upi_repo_path) # upload bootstrap ignition to public access server bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.BOOTSTRAP_IGN) remote_path = os.path.join( config.ENV_DATA.get('path_to_upload'), f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}") upload_file(config.ENV_DATA.get('httpd_server'), bootstrap_path, remote_path, config.ENV_DATA.get('httpd_server_user'), config.ENV_DATA.get('httpd_server_password')) # generate bootstrap ignition url path_to_bootstrap_on_remote = remote_path.replace( "/var/www/html/", "") bootstrap_ignition_url = ( f"http://{config.ENV_DATA.get('httpd_server')}/" f"{path_to_bootstrap_on_remote}") logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}") config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url # load master and worker ignitions to variables master_ignition_path = os.path.join( config.ENV_DATA.get('cluster_path'), constants.MASTER_IGN) master_ignition = read_file_as_str(f"{master_ignition_path}") config.ENV_DATA['control_plane_ignition'] = master_ignition worker_ignition_path = os.path.join( config.ENV_DATA.get('cluster_path'), constants.WORKER_IGN) worker_ignition = read_file_as_str(f"{worker_ignition_path}") config.ENV_DATA['compute_ignition'] = worker_ignition cluster_domain = (f"{config.ENV_DATA.get('cluster_name')}." f"{config.ENV_DATA.get('base_domain')}") config.ENV_DATA['cluster_domain'] = cluster_domain # generate terraform variables from template logger.info("Generating terraform variables") _templating = Templating() terraform_var_template = "terraform.tfvars.j2" terraform_var_template_path = os.path.join("ocp-deployment", terraform_var_template) terraform_config_str = _templating.render_template( terraform_var_template_path, config.ENV_DATA) terraform_var_yaml = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR, "terraform.tfvars.yaml") with open(terraform_var_yaml, "w") as f: f.write(terraform_config_str) self.terraform_var = convert_yaml2tfvars(terraform_var_yaml) # update gateway and DNS if config.ENV_DATA.get('gateway'): replace_content_in_file(constants.INSTALLER_IGNITION, '${cidrhost(var.machine_cidr,1)}', f"{config.ENV_DATA.get('gateway')}") if config.ENV_DATA.get('dns'): replace_content_in_file(constants.INSTALLER_IGNITION, constants.INSTALLER_DEFAULT_DNS, f"{config.ENV_DATA.get('dns')}") # update the zone in route if config.ENV_DATA.get('region'): def_zone = 'provider "aws" { region = "%s" } \n' % config.ENV_DATA.get( 'region') replace_content_in_file(constants.INSTALLER_ROUTE53, "xyz", def_zone) # increase memory if config.ENV_DATA.get('memory'): replace_content_in_file(constants.INSTALLER_MACHINE_CONF, '${var.memory}', config.ENV_DATA.get('memory')) # increase CPUs worker_num_cpus = config.ENV_DATA.get('worker_num_cpus') master_num_cpus = config.ENV_DATA.get('master_num_cpus') if worker_num_cpus or master_num_cpus: with open(constants.VSPHERE_MAIN, 'r') as fd: obj = hcl.load(fd) if worker_num_cpus: obj['module']['compute']['num_cpu'] = worker_num_cpus if master_num_cpus: obj['module']['control_plane'][ 'num_cpu'] = master_num_cpus # Dump data to json file since hcl module # doesn't support dumping of data in HCL format dump_data_to_json(obj, f"{constants.VSPHERE_MAIN}.json") os.rename(constants.VSPHERE_MAIN, f"{constants.VSPHERE_MAIN}.backup")
def run_amq_benchmark( self, benchmark_pod_name="benchmark", kafka_namespace=constants.AMQ_NAMESPACE, tiller_namespace=AMQ_BENCHMARK_NAMESPACE, num_of_clients=8, worker=None, timeout=1800, amq_workload_yaml=None, run_in_bg=False, ): """ Run benchmark pod and get the results Args: benchmark_pod_name (str): Name of the benchmark pod kafka_namespace (str): Namespace where kafka cluster created tiller_namespace (str): Namespace where tiller pod needs to be created num_of_clients (int): Number of clients to be created worker (str) : Loads to create on workloads separated with commas e.g http://benchmark-worker-0.benchmark-worker:8080, http://benchmark-worker-1.benchmark-worker:8080 timeout (int): Time to complete the run amq_workload_yaml (dict): Contains amq workloads information keys and values :name (str): Name of the workloads :topics (int): Number of topics created :partitions_per_topic (int): Number of partitions per topic :message_size (int): Message size :payload_file (str): Load to run on workload :subscriptions_per_topic (int): Number of subscriptions per topic :consumer_per_subscription (int): Number of consumers per subscription :producers_per_topic (int): Number of producers per topic :producer_rate (int): Producer rate :consumer_backlog_sizegb (int): Size of block in gb :test_duration_minutes (int): Time to run the workloads run_in_bg (bool): On true the workload will run in background Return: result (str/Thread obj): Returns benchmark run information if run_in_bg is False. Otherwise a thread of the amq workload execution """ # Namespace for to helm/tiller try: self.create_namespace(tiller_namespace) except CommandFailed as ef: if ( f'project.project.openshift.io "{tiller_namespace}" already exists' not in str(ef) ): raise ef # Create rbac file try: sa_tiller = list( templating.load_yaml(constants.AMQ_RBAC_YAML, multi_document=True) ) sa_tiller[0]["metadata"]["namespace"] = tiller_namespace sa_tiller[1]["subjects"][0]["namespace"] = tiller_namespace self.sa_tiller = OCS(**sa_tiller[0]) self.crb_tiller = OCS(**sa_tiller[1]) self.sa_tiller.create() self.crb_tiller.create() except (CommandFailed, CalledProcessError) as cf: log.error("Failed during creation of service account tiller") raise cf # Install helm cli (version v2.16.0 as we need tiller component) # And create tiller pods wget_cmd = f"wget -c --read-timeout=5 --tries=0 {URL}" untar_cmd = "tar -zxvf helm-v2.16.1-linux-amd64.tar.gz" tiller_cmd = ( f"linux-amd64/helm init --tiller-namespace {tiller_namespace}" f" --service-account {tiller_namespace}" ) exec_cmd(cmd=wget_cmd, cwd=self.dir) exec_cmd(cmd=untar_cmd, cwd=self.dir) exec_cmd(cmd=tiller_cmd, cwd=self.dir) # Validate tiller pod is running log.info("Waiting for 30s for tiller pod to come up") time.sleep(30) if self.is_amq_pod_running( pod_pattern="tiller", expected_pods=1, namespace=tiller_namespace ): log.info("Tiller pod is running") else: raise ResourceWrongStatusException("Tiller pod is not in running state") # Create benchmark pods log.info("Create benchmark pods") values = templating.load_yaml(constants.AMQ_BENCHMARK_VALUE_YAML) values["numWorkers"] = num_of_clients benchmark_cmd = ( f"linux-amd64/helm install {constants.AMQ_BENCHMARK_POD_YAML}" f" --name {benchmark_pod_name} --tiller-namespace {tiller_namespace}" ) exec_cmd(cmd=benchmark_cmd, cwd=self.dir) # Making sure the benchmark pod and clients are running if self.is_amq_pod_running( pod_pattern="benchmark", expected_pods=(1 + num_of_clients), namespace=tiller_namespace, ): log.info("All benchmark pod is up and running") else: raise ResourceWrongStatusException( "Benchmark pod is not getting to running state" ) # Update commonConfig with kafka-bootstrap server details driver_kafka = templating.load_yaml(constants.AMQ_DRIVER_KAFKA_YAML) driver_kafka[ "commonConfig" ] = f"bootstrap.servers=my-cluster-kafka-bootstrap.{kafka_namespace}.svc.cluster.local:9092" json_file = f"{self.dir}/driver_kafka" templating.dump_data_to_json(driver_kafka, json_file) cmd = f"cp {json_file} {benchmark_pod_name}-driver:/" self.pod_obj.exec_oc_cmd(cmd) # Update the workload yaml if not amq_workload_yaml: amq_workload_yaml = templating.load_yaml(constants.AMQ_WORKLOAD_YAML) yaml_file = f"{self.dir}/amq_workload.yaml" templating.dump_data_to_temp_yaml(amq_workload_yaml, yaml_file) cmd = f"cp {yaml_file} {benchmark_pod_name}-driver:/" self.pod_obj.exec_oc_cmd(cmd) self.benchmark = True # Run the benchmark if worker: cmd = f"bin/benchmark --drivers /driver_kafka --workers {worker} /amq_workload.yaml" else: cmd = "bin/benchmark --drivers /driver_kafka /amq_workload.yaml" log.info(f"Run benchmark and running command {cmd} inside the benchmark pod ") if run_in_bg: executor = ThreadPoolExecutor(1) result = executor.submit( self.run_amq_workload, cmd, benchmark_pod_name, tiller_namespace, timeout, ) return result pod_obj = get_pod_obj( name=f"{benchmark_pod_name}-driver", namespace=tiller_namespace ) result = pod_obj.exec_cmd_on_pod( command=cmd, out_yaml_format=False, timeout=timeout ) return result