def validate_absent_sb(context, sb_name): openshift = Openshift() output = openshift.search_resource_in_namespace("servicebindings", sb_name, context.namespace.name) assert output is None, f"Service Binding {sb_name} is present in namespace '{context.namespace.name}'"
class DbOperator(): openshift = Openshift() cmd = Command() pod_name_pattern = "{name}.*" name = "" namespace = "" operator_source_name = "db-operators" operator_registry_namespace = "pmacik" operator_registry_channel = "stable" package_name = "db-operators" def __init__(self, name="postgresql-operator", namespace="openshift-operators"): self.name = name self.namespace = namespace def is_running(self, wait=False): if wait: pod_name = self.openshift.wait_for_pod( self.pod_name_pattern.format(name=self.name), self.namespace) else: pod_name = self.openshift.search_pod_in_namespace( self.pod_name_pattern.format(name=self.name), self.namespace) if pod_name is not None: operator_pod_status = self.openshift.check_pod_status( pod_name, self.namespace) print("The pod {} is running: {}".format(self.name, operator_pod_status)) return operator_pod_status else: return False def install_operator_source(self): install_src_output = self.openshift.create_operator_source( self.operator_source_name, self.operator_registry_namespace) if not re.search( r'.*operatorsource.operators.coreos.com/%s\s(unchanged|created)' % self.operator_source_name, install_src_output): print("Failed to create {} operator source".format( self.operator_source_name)) return False return self.openshift.wait_for_package_manifest( self.package_name, self.operator_source_name, self.operator_registry_channel) def install_operator_subscription(self): install_sub_output = self.openshift.create_operator_subscription( self.package_name, self.operator_source_name, self.operator_registry_channel) return re.search( r'.*subscription.operators.coreos.com/%s\s(unchanged|created)' % self.operator_source_name, install_sub_output) def get_package_manifest(self): cmd = 'oc get packagemanifest %s -o "jsonpath={.metadata.name}"' % self.pkgManifest manifest = self.cmd.run_check_for_status(cmd, status=self.pkgManifest) manifest | should_not.be_equal_to(None) manifest | should.equal(self.pkgManifest) return manifest
def create_cr(context, cr_name): openshift = Openshift() yaml = context.text output = openshift.oc_apply(yaml) result = re.search(rf'.*{cr_name}.*(created|unchanged|configured)', output) result | should_not.be_none.desc("CR {cr_name} Created/Updated")
def check_secret_key(context, secret_name, key): openshift = Openshift() json_path = f'{{.data.{key}}}' polling2.poll(lambda: openshift.get_resource_info_by_jsonpath("secrets", secret_name, context.namespace.name, json_path) == "", step=5, timeout=120, ignore_exceptions=(binascii.Error,))
def service_binding_is_deleted(context, sb_name): openshift = Openshift() openshift.delete_service_binding(sb_name, context.namespace.name)
def get_sbr_secret_name(context): openshift = Openshift() output = openshift.get_resource_info_by_jsonpath("servicebindings", context.sbr_name, context.namespace.name, "{.status.secret}") assert output is not None, "Failed to fetch secret name from ServiceBinding" return output
class NodeJSApp(object): nodesj_app_image = "quay.io/pmacik/nodejs-rest-http-crud" api_end_point = 'http://{route_url}/api/status/dbNameCM' openshift = Openshift() pod_name_pattern = "{name}.*$(?<!-build)" name = "" namespace = "" def __init__(self, name, namespace): self.cmd = Command() self.name = name self.namespace = namespace def is_running(self, wait=False): deployment_flag = False if wait: pod_name = self.openshift.wait_for_pod(self.get_pod_name_pattern(), self.namespace, timeout=300) else: pod_name = self.openshift.search_pod_in_namespace( self.get_pod_name_pattern(), self.namespace) if pod_name is not None: application_pod_status = self.openshift.check_pod_status( pod_name, self.namespace, wait_for_status="Running") print("The pod {} is running: {}".format(pod_name, application_pod_status)) deployment = self.openshift.search_resource_in_namespace( "deployments", f"{self.name}.*", self.namespace) if deployment is not None: print("deployment is {}".format(deployment)) deployment_flag = True if application_pod_status and deployment_flag: return True else: return False else: return False def install(self): create_new_app_output, exit_code = self.cmd.run( f"oc new-app --docker-image={self.nodesj_app_image} --name={self.name} -n {self.namespace}" ) assert exit_code == 0, f"Non-zero exit code ({exit_code}) returned when attempting to create a new app: {create_new_app_output}" assert re.search( f'imagestream.image.openshift.io.*{self.name}.*created', create_new_app_output ) is not None, f"Unable to create imagestream: {create_new_app_output}" assert re.search( f'deployment.apps.*{self.name}.*created', create_new_app_output ) is not None, f"Unable to create deployment: {create_new_app_output}" assert re.search( f'service.*{self.name}.*created', create_new_app_output ) is not None, f"Unable to create service: {create_new_app_output}" assert self.openshift.expose_service_route( self.name, self.namespace) is not None, "Unable to expose service route" return self.is_running(wait=True) def get_db_name_from_api(self, wait=False, interval=10, timeout=300): route_url = self.openshift.get_route_host(self.name, self.namespace) if route_url is None: return None start = 0 while ((start + interval) <= timeout): db_name = requests.get(url=self.api_end_point.format( route_url=route_url)) if wait: if db_name.status_code == 200 and db_name.text != 'N/A': return db_name.text else: if db_name.status_code == 200: return db_name.text time.sleep(interval) start += interval return None def get_observed_generation(self): return self.openshift.get_resource_info_by_jsonpath( "deployment", self.name, self.namespace, "{.status.observedGeneration}") def get_running_pod_name(self, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): pod_list = self.openshift.get_pod_lst(self.namespace) for pod in pod_list.split(" "): if re.fullmatch(self.get_pod_name_pattern(), pod) is not None: if self.openshift.get_pod_status( pod, self.namespace) == "Running": return pod time.sleep(interval) start += interval return None def get_redeployed_pod_name(self, old_pod_name, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): pod_list = self.openshift.get_pod_lst(self.namespace) for pod in pod_list.split(" "): if pod != old_pod_name and re.fullmatch( self.get_pod_name_pattern(), pod) is not None: if self.openshift.get_pod_status( pod, self.namespace) == "Running": return pod time.sleep(interval) start += interval return None def get_pod_name_pattern(self): return self.pod_name_pattern.format(name=self.name) def is_redeployed(self, old_generation, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): current_generation = self.get_generation() pod_list = self.openshift.get_pod_lst(self.namespace) for pod in pod_list.split(" "): if (current_generation > old_generation) and (re.fullmatch( self.get_pod_name_pattern(), pod) is not None): if self.openshift.get_pod_status( pod, self.namespace) == "Running": return pod time.sleep(interval) start += interval return None def get_generation(self): return self.openshift.get_resource_info_by_jsonpath( "deployment", self.name, self.namespace, "{.metadata.generation}") def format_pattern(self, pattern): return pattern.format(name=self.name)
def then_envFrom_contains(context, app_name, sbr_name1, sbr_name2): time.sleep(60) openshift = Openshift() result = openshift.get_deployment_envFrom_info(app_name, context.namespace.name) result | should.be_equal_to("[map[secretRef:map[name:binding-request-1]] map[secretRef:map[name:binding-request-2]]]")\ .desc(f'{app_name} deployment should contain secretRef: {sbr_name1} and {sbr_name2}')
def get_servicebinding_info_by_jsonpath(self, servicebinding_name, namespace, json_path): openshift = Openshift() return openshift.get_resource_info_by_jsonpath("servicebinding", servicebinding_name, namespace, json_path)
class PostgresDB(object): cmd = Command() openshift = Openshift() pod_name_pattern = "{name}.*" name = "" namespace = "" database_yaml_template = """--- apiVersion: postgresql.baiju.dev/v1alpha1 kind: Database metadata: name: {name} namespace: {namespace} spec: image: docker.io/postgres imageName: postgres dbName: {name} """ def __init__(self, name, namespace): self.name = name self.namespace = namespace def create(self): db_create_output = self.openshift.apply( self.database_yaml_template.format(name=self.name, namespace=self.namespace)) return re.search( r'.*database.postgresql.baiju.dev/%s\s(created|unchanged)' % self.name, db_create_output) def is_running(self, wait=False): if wait: pod_name = self.openshift.wait_for_pod( self.pod_name_pattern.format(name=self.name), self.namespace, timeout=120) else: pod_name = self.openshift.search_pod_in_namespace( self.pod_name_pattern.format(name=self.name), self.namespace) if pod_name is not None: pod_status = self.openshift.check_pod_status( pod_name, self.namespace) print("The pod {} is running: {}".format(self.name, pod_status)) output, exit_code = self.cmd.run( f'{ctx.cli} get db {self.name} -n {self.namespace} -o jsonpath="{{.status.dbConnectionIP}}"' ) if exit_code == 0 and re.search(r'\d+\.\d+\.\d+\.\d+', output): print(f"The DB {self.name} is up and listening at {output}.") return True return False def get_connection_ip(self): cmd = f'{ctx.cli} get db {self.name} -n {self.namespace} -o jsonpath="{{.status.dbConnectionIP}}"' return self.cmd.run(cmd) def check_pod_status(self, status="Running"): return self.openshift.check_pod_status(self.name, self.namespace, wait_for_status=status)
class QuarkusApplication(object): cmd = Command() image_name_with_tag = "quay.io/pmacik/using-spring-data-jqa-quarkus:latest" openshift = Openshift() name = "" namespace = "" deployment_name_pattern = "{name}-\\w+-deployment" def __init__(self, name, namespace): self.name = name self.namespace = namespace def install(self): knative_service_output = self.openshift.create_knative_service( self.name, self.namespace, self.image_name_with_tag) output = re.search( r'.*service.serving.knative.dev/%s\s(created|configured|unchanged)' % self.name, knative_service_output) assert output is not None, f"Knative serving is not created as the result is {knative_service_output}" return True def get_pod_name_running(self, pod_name_pattern, wait=False): if wait: pod_name = self.openshift.wait_for_pod( self.format_pattern(pod_name_pattern), self.namespace, timeout=500) else: pod_name = self.openshift.search_pod_in_namespace( self.format_pattern(pod_name_pattern), self.namespace) return pod_name def is_imported(self, wait=False, interval=5, timeout=600): deployment_name = self.openshift.get_deployment_name_in_namespace( self.format_pattern(self.deployment_name_pattern), self.namespace, wait=wait, timeout=timeout) if deployment_name is None: return False else: deployment_replicas = self.openshift.get_resource_info_by_jsonpath( "deployment", deployment_name, self.namespace, "{.status.replicas}") assert deployment_replicas.isnumeric( ), f"Number of replicas of deployment '{deployment_name}' should be a numerical value, but is actually: '{deployment_replicas}" assert int(str(deployment_replicas)) > 0, "Number of replicas of deployment '{deployment_name}' " + \ "should be greater than 0, but is actually: '{deployment_replicas}'." return True def get_response_from_api(self, endpoint, wait=False, interval=5, timeout=300): route_url = self.openshift.get_knative_route_host( self.name, self.namespace) if route_url is None: return None url = f"{route_url}{endpoint}" if wait: start = 0 while ((start + interval) <= timeout): db_name = requests.get(url) if db_name.status_code == 200: return db_name.text time.sleep(interval) start += interval else: db_name = requests.get(url) if db_name.status_code == 200: return db_name.text return None def get_observed_generation(self): deployment_name = self.openshift.get_deployment_name_in_namespace( self.format_pattern(self.deployment_name_pattern), self.namespace) return self.openshift.get_resource_info_by_jsonpath( "deployment", deployment_name, self.namespace, "{.status.observedGeneration}") def format_pattern(self, pattern): return pattern.format(name=self.name) def get_redeployed_rev_name(self, old_rev_name, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): revisions = self.openshift.get_revisions(self.namespace) for rev in revisions: if rev != old_rev_name and re.match(self.name, rev) is not None: new_revision = self.openshift.get_last_revision_status( rev, self.namespace) if new_revision == 'True': return rev time.sleep(interval) start += interval return None def get_rev_name_redeployed_by_generation(self, old_generation, interval=5, timeout=300): start = 0 while ((start + interval) <= timeout): current_generation = self.get_generation() revisions = self.openshift.get_revisions(self.namespace) for rev in revisions: if (current_generation > old_generation) and (re.match( self.name, rev) is not None): new_revision = self.openshift.get_last_revision_status( rev, self.namespace) if new_revision == 'True': return rev time.sleep(interval) start += interval return None def get_generation(self): deployment_name = self.openshift.get_deployment_name_in_namespace( self.format_pattern(self.deployment_name_pattern), self.namespace) return self.openshift.get_resource_info_by_jsonpath( "deployment", deployment_name, self.namespace, "{.metadata.generation}") def get_deployment_with_intermediate_secret(self, intermediate_secret_name): return self.openshift.get_deployment_with_intermediate_secret_of_given_pattern( intermediate_secret_name, self.format_pattern(self.deployment_name_pattern), self.namespace, wait=True, timeout=120)
def resource_jsonpath_value(context, json_path, res_name, json_value=""): openshift = Openshift() (crdName, name) = res_name.split("/") polling2.poll(lambda: openshift.get_resource_info_by_jsonpath(crdName, name, context.namespace.name, json_path) == json_value, step=5, timeout=800, ignore_exceptions=(json.JSONDecodeError,))
def verify_injected_secretRef(context, secret_ref, cr_name, crd_name, json_path): time.sleep(60) openshift = Openshift() result = openshift.get_resource_info_by_jsonpath(crd_name, cr_name, context.namespace.name, json_path, wait=True, timeout=180) result | should.be_equal_to(secret_ref).desc(f'Failed to inject secretRef "{secret_ref}" in "{cr_name}" at path "{json_path}"')
def service_binding_is_deleted(context, sb_name): openshift = Openshift() context.sb_secret = get_sbr_secret_name(context, sb_name) openshift.delete_service_binding(sb_name, context.namespace.name)
def service_binding_is_deleted(context, sb_name): openshift = Openshift() result = openshift.delete_service_binding(sb_name) assert result is not None, f"Unable to delete service binding '{sb_name}'"
def then_sbo_jq_is(context, jq_expression, sbr_name, json_value_regex): openshift = Openshift() openshift.search_resource_in_namespace("servicebindingrequests", sbr_name, context.namespace.name) | should_not.be_none.desc("SBR {sbr_name} exists") result = openshift.get_resource_info_by_jq("sbr", sbr_name, context.namespace.name, jq_expression, wait=True, timeout=600) result | should_not.be_none.desc("jq result") re.fullmatch(json_value_regex, result) | should_not.be_none.desc("SBO jq result \"{result}\" should match \"{json_value_regex}\"")
def operator_manifest_installed(context, backend_service): openshift = Openshift() _ = openshift.oc_apply_yaml_file(os.path.join(os.getcwd(), "test/acceptance/resources/", backend_service + ".operator.manifest.yaml"))
class NodeJSApp(object): nodejs_app = "https://github.com/pmacik/nodejs-rest-http-crud" api_end_point = 'http://{route_url}/api/status/dbNameCM' openshift = Openshift() pod_name_pattern = "{name}.*$(?<!-build)" name = "" namespace = "" def __init__(self, name, namespace): self.cmd = Command() self.name = name self.namespace = namespace def is_running(self, wait=False): deployment_flag = False if wait: pod_name = self.openshift.wait_for_pod(self.get_pod_name_pattern(), self.namespace, timeout=180) else: pod_name = self.openshift.search_pod_in_namespace( self.get_pod_name_pattern(), self.namespace) if pod_name is not None: application_pod_status = self.openshift.check_pod_status( pod_name, self.namespace, wait_for_status="Running") print("The pod {} is running: {}".format(pod_name, application_pod_status)) deployment = self.openshift.search_resource_in_namespace( "deployments", f"{self.name}.*", self.namespace) if deployment is not None: print("deployment is {}".format(deployment)) deployment_flag = True if application_pod_status and deployment_flag: return True else: return False else: return False def install(self): nodejs_app_arg = "nodejs~" + self.nodejs_app cmd = f"oc new-app {nodejs_app_arg} --name {self.name} -n {self.namespace}" (create_new_app_output, exit_code) = self.cmd.run(cmd) if exit_code != 0: return False for pattern in [ f'imagestream.image.openshift.io\\s\"{self.name}\"\\screated', f'deployment.apps\\s\"{self.name}\"\\screated', f'service\\s\"{self.name}\"\\screated' ]: if not re.search(pattern, create_new_app_output): return False if not self.openshift.expose_service_route(self.name, self.namespace): print("Unable to expose the service with build config") return False return True def get_db_name_from_api(self, interval=5, timeout=20): route_url = self.openshift.get_route_host(self.name, self.namespace) if route_url is None: return None start = 0 while ((start + interval) <= timeout): db_name = requests.get(url=self.api_end_point.format( route_url=route_url)) if db_name.status_code == 200: return db_name.text time.sleep(interval) start += interval return None def get_observed_generation(self): return self.openshift.get_resource_info_by_jsonpath( "deployment", self.name, self.namespace, "{.status.observedGeneration}") def get_running_pod_name(self, interval=5, timeout=60): start = 0 while ((start + interval) <= timeout): pod_list = self.openshift.get_pod_lst(self.namespace) for pod in pod_list.split(" "): if re.fullmatch(self.get_pod_name_pattern(), pod): if self.openshift.get_pod_status( pod, self.namespace) == "Running": return pod time.sleep(interval) start += interval return None def get_redeployed_pod_name(self, old_pod_name, interval=5, timeout=60): start = 0 while ((start + interval) <= timeout): pod_list = self.openshift.get_pod_lst(self.namespace) for pod in pod_list.split(" "): if pod != old_pod_name and re.fullmatch( self.get_pod_name_pattern(), pod): if self.openshift.get_pod_status( pod, self.namespace) == "Running": return pod time.sleep(interval) start += interval return None def get_pod_name_pattern(self): return self.pod_name_pattern.format(name=self.name)
def check_secret_key_with_ip_value(context, secret_name, secret_key): openshift = Openshift() json_path = f'{{.data.{secret_key}}}' polling2.poll(lambda: ipaddress.ip_address( openshift.get_resource_info_by_jsonpath("secrets", secret_name, context.namespace.name, json_path)), step=5, timeout=120, ignore_exceptions=(ValueError,))
def sbo_jq_is(context, jq_expression, sbr_name, json_value): openshift = Openshift() polling2.poll(lambda: json.loads( openshift.get_resource_info_by_jq("servicebinding", sbr_name, context.namespace.name, jq_expression, wait=False)) == json_value, step=5, timeout=800, ignore_exceptions=(json.JSONDecodeError,))
def verify_injected_secretRef(context, secret_ref, cr_name, crd_name, json_path): openshift = Openshift() polling2.poll(lambda: openshift.get_resource_info_by_jsonpath(crd_name, cr_name, context.namespace.name, json_path) == secret_ref, step=5, timeout=400)
def verify_injected_secretRef(context, cr_name, crd_name, json_path): openshift = Openshift() secret = polling2.poll(lambda: get_sbr_secret_name(context), step=100, timeout=1000, ignore_exceptions=(ValueError,), check_success=lambda v: v is not None) polling2.poll(lambda: openshift.get_resource_info_by_jsonpath(crd_name, cr_name, context.namespace.name, json_path) == secret, step=5, timeout=400)
def create_crd(context, crd_name): openshift = Openshift() yaml = context.text output = openshift.oc_apply(yaml) result = re.search(rf'.*customresourcedefinition.apiextensions.k8s.io/{crd_name}.*(created|unchanged)', output) result | should_not.be_none.desc("CRD {crd_name} Created")