def test_sha256sum_properly_ignores_some_params(): resources = fxt.get_anymarkup("ignores_params.yml") assert (OR(resources[0], TEST_INT, TEST_INT_VER).annotate().sha256sum() == OR( resources[1], TEST_INT, TEST_INT_VER).annotate().sha256sum())
def test_sha256sum(): resource = fxt.get_anymarkup("sha256sum.yml") openshift_resource = OR(resource, TEST_INT, TEST_INT_VER) assert ( openshift_resource.sha256sum() == "1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965") annotated = openshift_resource.annotate() assert ( annotated.sha256sum() == "1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965") assert annotated.has_valid_sha256sum() annotated.body["metadata"]["annotations"][ "qontract.sha256sum"] = "test" assert ( annotated.sha256sum() == "1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965") assert not annotated.has_valid_sha256sum()
def construct_user_oc_resource(role, user): name = f"{role}-{user}" # Note: In OpenShift 4.x this resource is in rbac.authorization.k8s.io/v1 body = { "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "ClusterRoleBinding", "metadata": { "name": name }, "roleRef": { "name": role, "kind": "ClusterRole" }, "subjects": [{ "kind": "User", "name": user }], } return ( OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name), name, )
def test_invalid_container_name_too_long(): resource = fxt.get_anymarkup( "invalid_resource_container_name_too_long.yml") with pytest.raises(ConstructResourceError): openshift_resource = OR(resource, TEST_INT, TEST_INT_VER) assert openshift_resource.verify_valid_k8s_object() is None
def fetch_current_state(oc, ri, cluster, namespace, resource_type, resource_type_override=None, resource_names=None): global _log_lock resource_type_to_use = resource_type_override or resource_type msg = "Fetching {}s from {}/{}".format(resource_type_to_use, cluster, namespace) _log_lock.acquire() logging.debug(msg) _log_lock.release() if oc is None: return # some resource types may be used explicitly (<kind>.<api_group>). # we only take the first token as oc.api_resources contains only the kind. # this is the case created by using `managedResourceTypeOverrides`. if oc.api_resources and \ resource_type_to_use.split('.')[0].lower() \ not in [a.lower() for a in oc.api_resources]: msg = \ f"[{cluster}] cluster has no API resource {resource_type_to_use}." logging.warning(msg) return for item in oc.get_items(resource_type_to_use, namespace=namespace, resource_names=resource_names): openshift_resource = OR(item, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION) ri.add_current(cluster, namespace, resource_type, openshift_resource.name, openshift_resource)
def construct_oc_resource(name, source_ns): body = { "apiVersion": "networking.k8s.io/v1", "kind": "NetworkPolicy", "metadata": { "name": name }, "spec": { "ingress": [{ "from": [{ "namespaceSelector": { "matchLabels": { "name": source_ns } } }] }], "podSelector": {}, "policyTypes": ["Ingress"] } } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name)
def populate_current_state(spec, ri, integration, integration_version): oc = spec.oc if oc is None: return api_resources = oc.api_resources if api_resources and spec.resource not in api_resources: msg = f"[{spec.cluster}] cluster has no API resource {spec.resource}." logging.warning(msg) return try: for item in oc.get_items(spec.resource, namespace=spec.namespace, resource_names=spec.resource_names): openshift_resource = OR(item, integration, integration_version) ri.add_current( spec.cluster, spec.namespace, spec.resource, openshift_resource.name, openshift_resource ) except StatusCodeError: ri.register_error(cluster=spec.cluster)
def construct_oc_resource(self, name, data): body = { "apiVersion": "v1", "kind": "Secret", "type": "Opaque", "metadata": { "name": name, "annotations": { "qontract.recycle": "true" } }, "data": {} } for k, v in data.items(): if self.integration_prefix in k: continue if v == "": v = None else: # convert to str to maintain compatability # as ports are now ints and not strs v = base64.b64encode(str(v).encode()).decode('utf-8') body['data'][k] = v return OR(body, self.integration, self.integration_version, error_details=name)
def construct_sa_oc_resource(role, namespace, sa_name): name = f"{role}-{namespace}-{sa_name}" body = { "apiVersion": "authorization.openshift.io/v1", "kind": "RoleBinding", "metadata": { "name": name }, "roleRef": { "name": role }, "subjects": [{ "kind": "ServiceAccount", "name": sa_name, "namespace": namespace }], "userNames": [f"system:serviceaccount:{namespace}:{sa_name}"], } return ( OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name), name, )
def construct_sa_oc_resource(role, namespace, sa_name): name = f"{role}-{namespace}-{sa_name}" # Note: In OpenShift 4.x this resource is in rbac.authorization.k8s.io/v1 body = { "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "ClusterRoleBinding", "metadata": { "name": name }, "roleRef": { "name": role, "kind": "ClusterRole" }, "subjects": [{ "kind": "ServiceAccount", "name": sa_name, "namespace": namespace }], "userNames": [f"system:serviceaccount:{namespace}:{sa_name}"] } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name), name
def test_get_owned_pods(self, oc_get_obj_root_owner, oc_get): owner_body = {'kind': 'ownerkind', 'metadata': {'name': 'ownername'}} owner_resource = OR(owner_body, '', '') oc_get.return_value = { 'items': [ { 'metadata': { 'name': 'pod1', 'ownerReferences': [{ 'controller': True, 'kind': 'ownerkind', 'name': 'ownername' }] } }, { 'metadata': { 'name': 'pod2', 'ownerReferences': [{ 'controller': True, 'kind': 'notownerkind', 'name': 'notownername' }] } }, { 'metadata': { 'name': 'pod3', 'ownerReferences': [{ 'controller': True, 'kind': 'ownerkind', 'name': 'notownername' }] } }, ] } oc_get_obj_root_owner.side_effect = [ owner_resource.body, { 'kind': 'notownerkind', 'metadata': { 'name': 'notownername' }, }, { 'kind': 'ownerkind', 'metadata': { 'name': 'notownername' } } ] oc = OC('server', 'token', local=True) pods = oc.get_owned_pods('namespace', owner_resource) self.assertEqual(len(pods), 1) self.assertEqual(pods[0]['metadata']['name'], 'pod1')
def test_annotates_resource(): resource = fxt.get_anymarkup("annotates_resource.yml") openshift_resource = OR(resource, TEST_INT, TEST_INT_VER) assert openshift_resource.has_qontract_annotations() is False annotated = openshift_resource.annotate() assert annotated.has_qontract_annotations() is True
def process_template(template, values): try: manifest = template % values return OR(anymarkup.parse(manifest, force_types=None), QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION) except KeyError as e: raise ConstructResourceError( 'could not process template: missing key {}'.format(e))
def populate_desired_state_saas_file(self, spec, ri): saas_file_name = spec['saas_file_name'] cluster = spec['cluster'] namespace = spec['namespace'] managed_resource_types = spec['managed_resource_types'] process_template_options = spec['process_template_options'] check_images_options_base = spec['check_images_options_base'] instance_name = spec['instance_name'] upstream = spec['upstream'] resources, html_url, promotion = \ self._process_template(process_template_options) if resources is None: ri.register_error() return # filter resources resources = [resource for resource in resources if isinstance(resource, dict) and resource['kind'] in managed_resource_types] # additional processing of resources self._additional_resource_process(resources, html_url) # check images skip_check_images = upstream and self.jenkins_map and \ self.jenkins_map[instance_name].is_job_running(upstream) if skip_check_images: logging.warning( "skipping check_image since " + f"upstream job {upstream} is running" ) else: check_images_options = { 'html_url': html_url, 'resources': resources } check_images_options.update(check_images_options_base) image_error = self._check_images(check_images_options) if image_error: ri.register_error() return # add desired resources for resource in resources: resource_kind = resource['kind'] resource_name = resource['metadata']['name'] oc_resource = OR( resource, self.integration, self.integration_version, caller_name=saas_file_name, error_details=html_url) ri.add_desired( cluster, namespace, resource_kind, resource_name, oc_resource ) return promotion
def test_managed_cluster_label_ignore(): desired = { "apiVersion": "cluster.open-cluster-management.io/v1", "kind": "ManagedCluster", "metadata": { "labels": { "cloud": "Amazon", "vendor": "OpenShift", "cluster.open-cluster-management.io/clusterset": "default", "name": "xxx", }, "name": "xxx", }, "spec": { "hubAcceptsClient": True }, } current = { "apiVersion": "cluster.open-cluster-management.io/v1", "kind": "ManagedCluster", "metadata": { "labels": { "cloud": "Amazon", "cluster.open-cluster-management.io/clusterset": "default", "name": "xxx", "vendor": "OpenShift", "clusterID": "yyy", "feature.open-cluster-management.io/addon-work-manager": "available", "managed-by": "platform", "openshiftVersion": "x.y.z", }, "name": "xxx", }, "spec": { "hubAcceptsClient": True }, } d_r = OR(desired, TEST_INT, TEST_INT_VER) c_r = OR(current, TEST_INT, TEST_INT_VER) assert d_r == c_r assert d_r.sha256sum() == c_r.sha256sum()
def _construct_tekton_trigger_resource(saas_file_name, env_name, tkn_pipeline_name, tkn_cluster_console_url, tkn_namespace_name, integration, integration_version): """Construct a resource (PipelineRun) to trigger a deployment via Tekton. Args: saas_file_name (string): SaaS file name env_name (string): Environment name tkn_cluster_console_url (string): Cluster console URL of the cluster where the pipeline runs tkn_namespace_name (string): namespace where the pipeline runs integration (string): Name of calling integration integration_version (string): Version of calling integration Returns: OpenshiftResource: OpenShift resource to be applied """ long_name = f"{saas_file_name}-{env_name}".lower() # using a timestamp to make the resource name unique. # we may want to revisit traceability, but this is compatible # with what we currently have in Jenkins. ts = datetime.datetime.utcnow().strftime('%Y%m%d%H%M') # len 12 # max name length can be 63. leaving 12 for the timestamp - 51 name = f"{long_name[:UNIQUE_SAAS_FILE_ENV_COMBO_LEN]}-{ts}" body = { "apiVersion": "tekton.dev/v1beta1", "kind": "PipelineRun", "metadata": { "name": name }, "spec": { "pipelineRef": { "name": tkn_pipeline_name }, "params": [{ "name": "saas_file_name", "value": saas_file_name }, { "name": "env_name", "value": env_name }, { "name": "tkn_cluster_console_url", "value": tkn_cluster_console_url }, { "name": "tkn_namespace_name", "value": tkn_namespace_name }] } } return OR(body, integration, integration_version, error_details=name), long_name
def test_has_owner_reference_true(): resource = { "kind": "kind", "metadata": { "name": "resource", "ownerReferences": [{ "name": "owner" }] }, } openshift_resource = OR(resource, TEST_INT, TEST_INT_VER) assert openshift_resource.has_owner_reference()
def test_has_owner_reference_true(): resource = { 'kind': 'kind', 'metadata': { 'name': 'resource', 'ownerReferences': [{ 'name': 'owner' }] } } openshift_resource = OR(resource, TEST_INT, TEST_INT_VER) assert openshift_resource.has_owner_reference()
def test_gabi_authorized_users_apply(self, mock_apply, mock_request, get_gabi_instances, oc_version, secret_read, get_settings): expirationDate = date.today() get_gabi_instances.return_value = \ mock_get_gabi_instances(expirationDate) mock_request.side_effect = apply_request gabi_u.run(dry_run=False) expected = OR(apply['desired'], gabi_u.QONTRACT_INTEGRATION, gabi_u.QONTRACT_INTEGRATION_VERSION) args, _ = mock_apply.call_args self.assertEqual(args[5], expected)
def construct_resource(quota): body = { "apiVersion": "v1", "kind": "ResourceQuota", "metadata": { "name": quota['name'] }, "spec": { "hard": flatten(quota['resources']), "scopes": quota['scopes'] or [] } } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=quota['name'])
def test_gabi_authorized_users_expire(self, mock_apply, mock_request, get_gabi_instances, oc_version, secret_read, get_settings): # pylint: disable=no-self-use expirationDate = date.today() - timedelta(days=1) get_gabi_instances.return_value = \ mock_get_gabi_instances(expirationDate) mock_request.side_effect = delete_request gabi_u.run(dry_run=False) expected = OR(delete['desired'], gabi_u.QONTRACT_INTEGRATION, gabi_u.QONTRACT_INTEGRATION_VERSION) args, _ = mock_apply.call_args self.assertEqual(args[5], expected)
def construct_sa_token_oc_resource(name, sa_name, sa_token): body = { "apiVersion": "v1", "kind": "Secret", "type": "Opaque", "metadata": { "name": name, }, "data": { "token": base64.b64encode(sa_token).decode('utf-8') } } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name)
def fetch_provider_vault_secret( path, version, name, labels, annotations, type, integration, integration_version, validate_alertmanager_config=False, alertmanager_config_key="alertmanager.yaml", ): # get the fields from vault vault_client = VaultClient() raw_data = vault_client.read_all({"path": path, "version": version}) if validate_alertmanager_config: check_alertmanager_config(raw_data, path, alertmanager_config_key) # construct oc resource body = { "apiVersion": "v1", "kind": "Secret", "type": type, "metadata": { "name": name, "annotations": annotations }, } if labels: body["metadata"]["labels"] = labels if raw_data.items(): body["data"] = {} # populate data for k, v in raw_data.items(): if v == "": continue if k.lower().endswith(QONTRACT_BASE64_SUFFIX): k = k[:-len(QONTRACT_BASE64_SUFFIX)] v = v.replace("\n", "") elif v is not None: v = base64.b64encode(v.encode()).decode("utf-8") body["data"][k] = v try: return OR(body, integration, integration_version, error_details=path) except ConstructResourceError as e: raise FetchResourceError(str(e))
def build_desired_resource(tkn_object: dict[str, Any], path: str, cluster: str, namespace: str) -> dict[str, Union[str, OR]]: '''Returns a dict with ResourceInventory.add_desired args''' openshift_resource = OR(tkn_object, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=path) return { 'cluster': cluster, 'namespace': namespace, 'resource_type': openshift_resource.kind, 'name': openshift_resource.name, 'value': openshift_resource }
def build_desired_resource(tkn_object: dict[str, Any], path: str, cluster: str, namespace: str) -> dict[str, Union[str, OR]]: """Returns a dict with ResourceInventory.add_desired args""" openshift_resource = OR( tkn_object, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=path, ) return { "cluster": cluster, "namespace": namespace, "resource_type": openshift_resource.kind, "name": openshift_resource.name, "value": openshift_resource, }
def construct_oc_resource(data): body = { "apiVersion": "v1", "kind": "Secret", "type": "Opaque", "metadata": { "name": "kafka", "annotations": { "qontract.recycle": "true" } }, "data": { k: base64.b64encode(v.encode()).decode('utf-8') for k, v in data.items() } } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION)
def construct_user_oc_resource(role, user): name = f"{role}-{user}" body = { "apiVersion": "authorization.openshift.io/v1", "kind": "RoleBinding", "metadata": { "name": name }, "roleRef": { "name": role }, "subjects": [ {"kind": "User", "name": user} ] } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name), name
def construct_resource(quota): body = { "apiVersion": "v1", "kind": "ResourceQuota", "metadata": { "name": quota["name"] }, "spec": { "hard": flatten(quota["resources"]), }, } if quota["scopes"]: body["spec"]["scopes"] = quota["scopes"] return OR( body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=quota["name"], )
def construct_resources(namespaces): for namespace in namespaces: if 'limitRanges' not in namespace: logging.warning( "limitRanges key not found on namespace %s. Skipping." % (namespace['name']) ) continue # Get the linked limitRanges schema settings limitranges = namespace.get("limitRanges", {}) body = { 'apiVersion': 'v1', 'kind': 'LimitRange', 'metadata': { 'name': limitranges['name'], }, 'spec': { 'limits': [] } } # Build each limit item ignoring null ones for lr in limitranges['limits']: speclimit = {} for ltype in SUPPORTED_LIMITRANGE_TYPES: if ltype in lr and lr[ltype] is not None: speclimit[ltype] = lr[ltype] body['spec']['limits'].append(speclimit) resource = OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION) # k8s changes an empty array to null/None. we do this here # to be consistent if len(body['spec']['limits']) == 0: body['spec']['limits'] = None # Create the resources and append them to the namespace namespace["resources"] = [resource] return namespaces
def construct_resources(namespaces): for namespace in namespaces: if "limitRanges" not in namespace: logging.warning( "limitRanges key not found on namespace %s. Skipping." % (namespace["name"])) continue # Get the linked limitRanges schema settings limitranges = namespace.get("limitRanges", {}) body = { "apiVersion": "v1", "kind": "LimitRange", "metadata": { "name": limitranges["name"], }, "spec": { "limits": [] }, } # Build each limit item ignoring null ones for lr in limitranges["limits"]: speclimit = {} for ltype in SUPPORTED_LIMITRANGE_TYPES: if ltype in lr and lr[ltype] is not None: speclimit[ltype] = lr[ltype] body["spec"]["limits"].append(speclimit) resource = OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION) # k8s changes an empty array to null/None. we do this here # to be consistent if len(body["spec"]["limits"]) == 0: body["spec"]["limits"] = None # Create the resources and append them to the namespace namespace["resources"] = [resource] return namespaces