def cleanup(self): """ Cleanup the backend resources in case of external """ if not self.vault_server: self.gather_init_vault_conf() self.update_vault_env_vars() try: # We need to set vault namespace in the env # so that path, policy and token are accessed # within the namespace context if config.ENV_DATA.get("use_vault_namespace"): self.get_vault_namespace() os.environ["VAULT_NAMESPACE"] = self.vault_namespace # get vault path self.get_vault_backend_path() # from token secret get token self.get_vault_path_token() # from token get policy if not self.cluster_id: self.cluster_id = get_running_cluster_id() self.get_vault_policy() except (CommandFailed, IndexError): logger.error("Error occured during kms resource info gathering," "skipping vault cleanup") return # Delete the policy and backend path from vault # we need root token of vault in the env self.remove_vault_backend_path() self.remove_vault_policy() if self.vault_namespace: self.remove_vault_namespace()
def vault_create_backend_path(self): """ create vault path to be used by OCS Raises: VaultOperationError exception """ if config.ENV_DATA.get("VAULT_BACKEND_PATH"): self.vault_backend_path = config.ENV_DATA.get("VAULT_BACKEND_PATH") else: # Generate backend path name using prefix "ocs" # "ocs-<cluster-id>" self.cluster_id = get_running_cluster_id() self.vault_backend_path = ( f"{constants.VAULT_DEFAULT_PATH_PREFIX}-{self.cluster_id}-" f"{get_cluster_name(config.ENV_DATA['cluster_path'])}" ) cmd = f"vault secrets enable -path={self.vault_backend_path} kv" out = subprocess.check_output(shlex.split(cmd)) if "Success" in out.decode(): logger.info(f"vault path {self.vault_backend_path} created") else: raise VaultOperationError( f"Failed to create path f{self.vault_backend_path}" ) self.vault_create_policy()
def setup(self): """ Setting up the environment for each performance and scale test Args: name (str): The test name that will use in the performance dashboard """ log.info("Setting up test environment") self.crd_data = None # place holder for Benchmark CDR data self.es = None # place holder for the incluster deployment elasticsearch self.es_backup = None # place holder for the elasticsearch backup self.main_es = None # place holder for the main elasticsearch object self.benchmark_obj = None # place holder for the benchmark object self.client_pod = None # Place holder for the client pod object self.dev_mode = config.RUN["cli_params"].get("dev_mode") self.pod_obj = OCP(kind="pod", namespace=benchmark_operator.BMO_NAME) # Place holders for test results file (all sub-tests together) self.results_path = "" self.results_file = "" # Collecting all Environment configuration Software & Hardware # for the performance report. self.environment = get_environment_info() self.environment["clusterID"] = get_running_cluster_id() self.get_osd_info() self.get_node_info(node_type="master") self.get_node_info(node_type="worker")
def setup(self): """ Setting up the environment for each performance and scale test Args: name (str): The test name that will use in the performance dashboard """ log.info("Setting up test environment") self.es = None # place holder for the incluster deployment elasticsearch self.es_backup = None # place holder for the elasticsearch backup self.main_es = None # place holder for the main elasticsearch object self.benchmark_obj = None # place holder for the benchmark object self.client_pod = None # Place holder for the client pod object self.dev_mode = config.RUN["cli_params"].get("dev_mode") self.pod_obj = OCP(kind="pod", namespace=benchmark_operator.BMO_NAME) self.initialize_test_crd() # Place holders for test results file (all sub-tests together) self.results_file = "" # All tests need a uuid for the ES results, benchmark-operator base test # will overrite it with uuid pulling from the benchmark pod self.uuid = uuid4().hex # Getting the full path for the test logs self.full_log_path = os.environ.get("PYTEST_CURRENT_TEST").split(" ")[0] self.full_log_path = ( self.full_log_path.replace("::", "/").replace("[", "-").replace("]", "") ) self.full_log_path = os.path.join(ocsci_log_path(), self.full_log_path) log.info(f"Logs file path name is : {self.full_log_path}") # Getting the results path as a list self.results_path = self.full_log_path.split("/") self.results_path.pop() # List of test(s) for checking the results self.workloads = [] # Collecting all Environment configuration Software & Hardware # for the performance report. self.environment = get_environment_info() self.environment["clusterID"] = get_running_cluster_id() self.ceph_cluster = CephCluster() self.used_capacity = self.get_cephfs_data() self.get_osd_info() self.get_node_info(node_type="master") self.get_node_info(node_type="worker")
def cleanup(self): """ Cleanup the backend resources in case of external """ if not self.vault_server: self.gather_init_vault_conf() self.update_vault_env_vars() # get vault path self.get_vault_backend_path() # from token secret get token self.get_vault_path_token() # from token get policy if not self.cluster_id: self.cluster_id = get_running_cluster_id() self.get_vault_policy() # Delete the policy and backend path from vault # we need root token of vault in the env self.remove_vault_backend_path() self.remove_vault_policy()
def setup(self): """ Setting up the environment for each performance and scale test Args: name (str): The test name that will use in the performance dashboard """ log.info("Setting up test environment") self.crd_data = None # place holder for Benchmark CDR data self.es = None # place holder for the incluster deployment elasticsearch self.es_backup = None # place holder for the elasticsearch backup self.main_es = None # place holder for the main elasticsearch object self.benchmark_obj = None # place holder for the benchmark object self.client_pod = None # Place holder for the client pod object self.dev_mode = config.RUN["cli_params"].get("dev_mode") self.pod_obj = OCP(kind="pod", namespace=benchmark_operator.BMO_NAME) # Place holders for test results file (all sub-tests together) self.results_path = "" self.results_file = "" # Getting the full path for the test logs self.full_log_path = os.environ.get("PYTEST_CURRENT_TEST").split( "]")[0] self.full_log_path = self.full_log_path.replace("::", "/").replace("[", "-") log.info(f"Logs file path name is : {self.full_log_path}") # Collecting all Environment configuration Software & Hardware # for the performance report. self.environment = get_environment_info() self.environment["clusterID"] = get_running_cluster_id() self.ceph_cluster = CephCluster() self.used_capacity = self.get_cephfs_data() self.get_osd_info() self.get_node_info(node_type="master") self.get_node_info(node_type="worker")
def setup(self): """ Setting up the environment for each performance and scale test """ log.info("Setting up test environment") self.crd_data = None # place holder for Benchmark CDR data self.es_backup = None # place holder for the elasticsearch backup self.main_es = None # place holder for the main elasticsearch object self.benchmark_obj = None # place holder for the benchmark object self.client_pod = None # Place holder for the client pod object self.dev_mode = config.RUN["cli_params"].get("dev_mode") self.pod_obj = OCP(kind="pod", namespace=benchmark_operator.BMO_NAME) # Collecting all Environment configuration Software & Hardware # for the performance report. self.environment = get_environment_info() self.environment["clusterID"] = get_running_cluster_id() self.get_osd_info() self.get_node_info(node_type="master") self.get_node_info(node_type="worker")