def deploy_prereq(self): """ Perform generic prereq before calling openshift-installer This method performs all the basic steps necessary before invoking the installer """ deploy = config.RUN['cli_params']['deploy'] teardown = config.RUN['cli_params']['teardown'] if teardown and not deploy: msg = f"Attempting teardown of non-accessible cluster: " msg += f"{self.cluster_path}" pytest.fail(msg) elif not deploy and not teardown: msg = "The given cluster can not be connected to: {}. ".format( self.cluster_path) msg += (f"Provide a valid --cluster-path or use --deploy to " f"deploy a new cluster") pytest.fail(msg) elif not system.is_path_empty(self.cluster_path) and deploy: msg = "The given cluster path is not empty: {}. ".format( self.cluster_path) msg += (f"Provide an empty --cluster-path and --deploy to deploy " f"a new cluster") pytest.fail(msg) else: logger.info( f"A testing cluster will be deployed and cluster information " f"stored at: %s", self.cluster_path) self.create_config()
def deploy_prereq(self): """ Perform generic prereq before calling openshift-installer This method performs all the basic steps necessary before invoking the installer """ deploy = config.RUN["cli_params"]["deploy"] teardown = config.RUN["cli_params"]["teardown"] if teardown and not deploy: msg = "Attempting teardown of non-accessible cluster: " msg += f"{self.cluster_path}" pytest.fail(msg) elif not deploy and not teardown: msg = "The given cluster can not be connected to: {}. ".format( self.cluster_path) msg += ("Provide a valid --cluster-path or use --deploy to " "deploy a new cluster") pytest.fail(msg) elif not system.is_path_empty(self.cluster_path) and deploy: msg = "The given cluster path is not empty: {}. ".format( self.cluster_path) msg += ("Provide an empty --cluster-path and --deploy to deploy " "a new cluster") pytest.fail(msg) else: logger.info( "A testing cluster will be deployed and cluster information " "stored at: %s", self.cluster_path, ) if (not self.flexy_deployment and config.ENV_DATA["deployment_type"] != "managed"): self.create_config()
def deploy_ocp_prereq(self): """ Perform generic prereq before calling openshift-installer This method performs all the basic steps necessary before invoking the installer """ if self.teardown and not self.deploy: msg = f"Attempting teardown of non-accessible cluster: " msg += f"{self.cluster_path}" pytest.fail(msg) elif not self.deploy and not self.teardown: msg = "The given cluster can not be connected to: {}. ".format( self.cluster_path) msg += (f"Provide a valid --cluster-path or use --deploy to " f"deploy a new cluster") pytest.fail(msg) elif not system.is_path_empty(self.cluster_path) and self.deploy: msg = "The given cluster path is not empty: {}. ".format( self.cluster_path) msg += (f"Provide an empty --cluster-path and --deploy to deploy " f"a new cluster") pytest.fail(msg) else: logger.info( f"A testing cluster will be deployed and cluster information " f"stored at: %s", self.cluster_path) # Generate install-config from template logger.info("Generating install-config") pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret") _templating = templating.Templating() install_config_str = _templating.render_template( "install-config.yaml.j2", config.ENV_DATA) # Log the install config *before* adding the pull secret, # so we don't leak sensitive data. logger.info(f"Install config: \n{install_config_str}") # Parse the rendered YAML so that we can manipulate the object directly install_config_obj = yaml.safe_load(install_config_str) with open(pull_secret_path, "r") as f: # Parse, then unparse, the JSON file. # We do this for two reasons: to ensure it is well-formatted, and # also to ensure it ends up as a single line. install_config_obj['pullSecret'] = json.dumps(json.loads(f.read())) install_config_str = yaml.safe_dump(install_config_obj) install_config = os.path.join(self.cluster_path, "install-config.yaml") with open(install_config, "w") as f: f.write(install_config_str)
def cluster(request): log.info(f"All logs located at {log_path}") log.info("Running OCS basic installation") cluster_path = config.ENV_DATA['cluster_path'] deploy = config.RUN['cli_params']['deploy'] teardown = config.RUN['cli_params']['teardown'] # Add a finalizer to teardown the cluster after test execution is finished if teardown: request.addfinalizer(cluster_teardown) log.info("Will teardown cluster because --teardown was provided") # Test cluster access and if exist just skip the deployment. if is_cluster_running(cluster_path): log.info("The installation is skipped because the cluster is running") return elif teardown and not deploy: log.info("Attempting teardown of non-accessible cluster: %s", cluster_path) return elif not deploy and not teardown: msg = "The given cluster can not be connected to: {}. ".format( cluster_path) msg += "Provide a valid --cluster-path or use --deploy to deploy a new cluster" pytest.fail(msg) elif not system.is_path_empty(cluster_path) and deploy: msg = "The given cluster path is not empty: {}. ".format(cluster_path) msg += "Provide an empty --cluster-path and --deploy to deploy a new cluster" pytest.fail(msg) else: log.info( "A testing cluster will be deployed and cluster information stored at: %s", cluster_path) # Generate install-config from template log.info("Generating install-config") pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret") # TODO: check for supported platform and raise the exception if not # supported. Currently we support just AWS. _templating = templating.Templating() install_config_str = _templating.render_template("install-config.yaml.j2", config.ENV_DATA) # Log the install config *before* adding the pull secret, so we don't leak # sensitive data. log.info(f"Install config: \n{install_config_str}") # Parse the rendered YAML so that we can manipulate the object directly install_config_obj = yaml.safe_load(install_config_str) with open(pull_secret_path, "r") as f: # Parse, then unparse, the JSON file. # We do this for two reasons: to ensure it is well-formatted, and # also to ensure it ends up as a single line. install_config_obj['pullSecret'] = json.dumps(json.loads(f.read())) install_config_str = yaml.safe_dump(install_config_obj) install_config = os.path.join(cluster_path, "install-config.yaml") with open(install_config, "w") as f: f.write(install_config_str) # Download installer installer = get_openshift_installer(config.DEPLOYMENT['installer_version']) # Download client get_openshift_client() # Deploy cluster log.info("Deploying cluster") run_cmd(f"{installer} create cluster " f"--dir {cluster_path} " f"--log-level debug") # Test cluster access if not OCP.set_kubeconfig( os.path.join(cluster_path, config.RUN.get('kubeconfig_location'))): pytest.fail("Cluster is not available!") # TODO: Create cluster object, add to config.ENV_DATA for other tests to # utilize. # Determine worker pattern and create ebs volumes with open(os.path.join(cluster_path, "terraform.tfvars")) as f: tfvars = json.load(f) cluster_id = tfvars['cluster_id'] worker_pattern = f'{cluster_id}-worker*' log.info(f'Worker pattern: {worker_pattern}') create_ebs_volumes(worker_pattern, region_name=config.ENV_DATA['region']) # render templates and create resources create_oc_resource('common.yaml', cluster_path, _templating, config.ENV_DATA) run_cmd(f'oc label namespace {config.ENV_DATA["cluster_namespace"]} ' f'"openshift.io/cluster-monitoring=true"') run_cmd(f"oc policy add-role-to-user view " f"system:serviceaccount:openshift-monitoring:prometheus-k8s " f"-n {config.ENV_DATA['cluster_namespace']}") apply_oc_resource('csi-nodeplugin-rbac_rbd.yaml', cluster_path, _templating, config.ENV_DATA, template_dir="ocs-deployment/csi/rbd/") apply_oc_resource('csi-provisioner-rbac_rbd.yaml', cluster_path, _templating, config.ENV_DATA, template_dir="ocs-deployment/csi/rbd/") apply_oc_resource('csi-nodeplugin-rbac_cephfs.yaml', cluster_path, _templating, config.ENV_DATA, template_dir="ocs-deployment/csi/cephfs/") apply_oc_resource('csi-provisioner-rbac_cephfs.yaml', cluster_path, _templating, config.ENV_DATA, template_dir="ocs-deployment/csi/cephfs/") # Increased to 15 seconds as 10 is not enough # TODO: do the sampler function and check if resource exist wait_time = 15 log.info(f"Waiting {wait_time} seconds...") time.sleep(wait_time) create_oc_resource('operator-openshift-with-csi.yaml', cluster_path, _templating, config.ENV_DATA) log.info(f"Waiting {wait_time} seconds...") time.sleep(wait_time) run_cmd(f"oc wait --for condition=ready pod " f"-l app=rook-ceph-operator " f"-n {config.ENV_DATA['cluster_namespace']} " f"--timeout=120s") run_cmd(f"oc wait --for condition=ready pod " f"-l app=rook-discover " f"-n {config.ENV_DATA['cluster_namespace']} " f"--timeout=120s") create_oc_resource('cluster.yaml', cluster_path, _templating, config.ENV_DATA) POD = ocp.OCP(kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']) CFS = ocp.OCP(kind=constants.CEPHFILESYSTEM, namespace=config.ENV_DATA['cluster_namespace']) # Check for the Running status of Ceph Pods run_cmd(f"oc wait --for condition=ready pod " f"-l app=rook-ceph-agent " f"-n {config.ENV_DATA['cluster_namespace']} " f"--timeout=120s") assert POD.wait_for_resource(condition='Running', selector='app=rook-ceph-mon', resource_count=3, timeout=600) assert POD.wait_for_resource(condition='Running', selector='app=rook-ceph-mgr', timeout=600) assert POD.wait_for_resource(condition='Running', selector='app=rook-ceph-osd', resource_count=3, timeout=600) create_oc_resource('toolbox.yaml', cluster_path, _templating, config.ENV_DATA) log.info(f"Waiting {wait_time} seconds...") time.sleep(wait_time) create_oc_resource('storage-manifest.yaml', cluster_path, _templating, config.ENV_DATA) create_oc_resource("service-monitor.yaml", cluster_path, _templating, config.ENV_DATA) create_oc_resource("prometheus-rules.yaml", cluster_path, _templating, config.ENV_DATA) log.info(f"Waiting {wait_time} seconds...") time.sleep(wait_time) # Create MDS pods for CephFileSystem fs_data = templating.load_yaml_to_dict(constants.CEPHFILESYSTEM_YAML) fs_data['metadata']['namespace'] = config.ENV_DATA['cluster_namespace'] ceph_obj = OCS(**fs_data) ceph_obj.create() assert POD.wait_for_resource(condition=constants.STATUS_RUNNING, selector='app=rook-ceph-mds', resource_count=2, timeout=600) # Check for CephFilesystem creation in ocp cfs_data = CFS.get() cfs_name = cfs_data['items'][0]['metadata']['name'] if helpers.validate_cephfilesystem(cfs_name): log.info(f"MDS deployment is successful!") defaults.CEPHFILESYSTEM_NAME = cfs_name else: log.error(f"MDS deployment Failed! Please check logs!") # Verify health of ceph cluster # TODO: move destroy cluster logic to new CLI usage pattern? log.info("Done creating rook resources, waiting for HEALTH_OK") assert ceph_health_check(namespace=config.ENV_DATA['cluster_namespace'])