def test_sha256sum_properly_ignores_some_params(self): resources = fxt.get_anymarkup('ignores_params.yml') assert OR(resources[0], TEST_INT, TEST_INT_VER).annotate().sha256sum() == \ OR(resources[1], TEST_INT, TEST_INT_VER).annotate().sha256sum()
def construct_sa_oc_resource(role, namespace, sa_name): name = f"{role}-{namespace}-{sa_name}" # Note: In OpenShift 4.x this resource is in rbac.authorization.k8s.io/v1 body = { "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "ClusterRoleBinding", "metadata": { "name": name }, "roleRef": { "name": role, "kind": "ClusterRole" }, "subjects": [{ "kind": "ServiceAccount", "name": sa_name, "namespace": namespace }], "userNames": [f"system:serviceaccount:{namespace}:{sa_name}"] } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name), name
def fetch_current_state(oc, ri, cluster, namespace, resource_type, resource_type_override=None, resource_names=None): global _log_lock resource_type_to_use = resource_type_override or resource_type msg = "Fetching {}s from {}/{}".format(resource_type_to_use, cluster, namespace) _log_lock.acquire() logging.debug(msg) _log_lock.release() if oc is None: return # some resource types may be used explicitly (<kind>.<api_group>). # we only take the first token as oc.api_resources contains only the kind. # this is the case created by using `managedResourceTypeOverrides`. if oc.api_resources and \ resource_type_to_use.split('.')[0].lower() \ not in [a.lower() for a in oc.api_resources]: msg = \ f"[{cluster}] cluster has no API resource {resource_type_to_use}." logging.warning(msg) return for item in oc.get_items(resource_type_to_use, namespace=namespace, resource_names=resource_names): openshift_resource = OR(item, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION) ri.add_current(cluster, namespace, resource_type, openshift_resource.name, openshift_resource)
def construct_oc_resource(self, name, data): body = { "apiVersion": "v1", "kind": "Secret", "type": "Opaque", "metadata": { "name": name, "annotations": { "qontract.recycle": "true" } }, "data": {} } for k, v in data.items(): if self.integration_prefix in k: continue if v == "": v = None else: v = base64.b64encode(v.encode()).decode('utf-8') body['data'][k] = v return OR(body, self.integration, self.integration_version, error_details=name)
def fetch_current_state(oc, ri, cluster, namespace, resource_type, resource_type_override=None, resource_names=None): global _log_lock resource_type_to_use = resource_type_override or resource_type msg = "Fetching {}s from {}/{}".format(resource_type_to_use, cluster, namespace) _log_lock.acquire() logging.debug(msg) _log_lock.release() if oc is None: return for item in oc.get_items(resource_type_to_use, namespace=namespace, resource_names=resource_names): openshift_resource = OR(item, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION) ri.add_current(cluster, namespace, resource_type, openshift_resource.name, openshift_resource)
def construct_oc_resource(name, source_ns): body = { "apiVersion": "extensions/v1beta1", "kind": "NetworkPolicy", "metadata": { "name": name }, "spec": { "ingress": [{ "from": [{ "namespaceSelector": { "matchLabels": { "name": source_ns } } }] }], "podSelector": {}, "policyTypes": ["Ingress"] } } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name)
def fetch_provider_vault_secret(path, version, name, labels, annotations, type, integration, integration_version): # get the fields from vault vault_client = VaultClient() raw_data = vault_client.read_all({'path': path, 'version': version}) # construct oc resource body = { "apiVersion": "v1", "kind": "Secret", "type": type, "metadata": { "name": name, "annotations": annotations } } if labels: body['metadata']['labels'] = labels if raw_data.items(): body['data'] = {} # populate data for k, v in raw_data.items(): if v == "": continue if k.lower().endswith(QONTRACT_BASE64_SUFFIX): k = k[:-len(QONTRACT_BASE64_SUFFIX)] elif v is not None: v = base64.b64encode(v.encode()).decode('utf-8') body['data'][k] = v try: return OR(body, integration, integration_version, error_details=path) except ConstructResourceError as e: raise FetchResourceError(str(e))
def test_annotates_resource(self): resource = fxt.get_anymarkup('annotates_resource.yml') openshift_resource = OR(resource, TEST_INT, TEST_INT_VER) assert openshift_resource.has_qontract_annotations() is False annotated = openshift_resource.annotate() assert annotated.has_qontract_annotations() is True
def process_template(template, values): try: manifest = template % values return OR(anymarkup.parse(manifest, force_types=None), QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION) except KeyError as e: raise ConstructResourceError( 'could not process template: missing key {}'.format(e))
def populate_current_state(spec, ri, integration, integration_version): if spec.oc is None: return for item in spec.oc.get_items(spec.resource, namespace=spec.namespace, resource_names=spec.resource_names): openshift_resource = OR(item, integration, integration_version) ri.add_current(spec.cluster, spec.namespace, spec.resource, openshift_resource.name, openshift_resource)
def populate_desired_state_saas_file(self, spec, ri): saas_file_name = spec['saas_file_name'] cluster = spec['cluster'] namespace = spec['namespace'] managed_resource_types = spec['managed_resource_types'] process_template_options = spec['process_template_options'] check_images_options_base = spec['check_images_options_base'] instance_name = spec['instance_name'] upstream = spec['upstream'] resources, html_url = \ self._process_template(process_template_options) if resources is None: ri.register_error() return # filter resources resources = [resource for resource in resources if isinstance(resource, dict) and resource['kind'] in managed_resource_types] # check images skip_check_images = upstream and self.jenkins_map and \ self.jenkins_map[instance_name].is_job_running(upstream) if skip_check_images: logging.warning( f"skipping check_image since " + f"upstream job {upstream} is running" ) else: check_images_options = { 'html_url': html_url, 'resources': resources } check_images_options.update(check_images_options_base) image_error = self._check_images(check_images_options) if image_error: ri.register_error() return # add desired resources for resource in resources: resource_kind = resource['kind'] resource_name = resource['metadata']['name'] oc_resource = OR( resource, self.integration, self.integration_version, caller_name=saas_file_name, error_details=html_url) ri.add_desired( cluster, namespace, resource_kind, resource_name, oc_resource )
def generate_resource(template_file, values): template_env = template.get_package_environment() tpl = template_env.get_template(template_file) tpl.globals['labels_to_selectors'] = labels_to_selectors tpl.globals['build_rules_aoa'] = build_rules_aoa tpl.globals['load_json'] = json.loads rendered = tpl.render(values) jsonnet_resource = jsonnet.generate_object(rendered) return OR(jsonnet_resource, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION)
def construct_sa_token_oc_resource(name, sa_name, sa_token): body = { "apiVersion": "v1", "kind": "Secret", "type": "Opaque", "metadata": { "name": name, }, "data": { "token": base64.b64encode(sa_token).decode('utf-8') } } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name)
def construct_resource(quota): body = { "apiVersion": "v1", "kind": "ResourceQuota", "metadata": { "name": quota['name'] }, "spec": { "hard": flatten(quota['resources']), "scopes": quota['scopes'] or [] } } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=quota['name'])
def populate_current_state(spec, ri, integration, integration_version): oc = spec.oc if oc is None: return api_resources = oc.api_resources if api_resources and spec.resource not in api_resources: msg = f"[{spec.cluster}] cluster has no API resource {spec.resource}." logging.warning(msg) return for item in oc.get_items(spec.resource, namespace=spec.namespace, resource_names=spec.resource_names): openshift_resource = OR(item, integration, integration_version) ri.add_current(spec.cluster, spec.namespace, spec.resource, openshift_resource.name, openshift_resource)
def construct_oc_resource(data): body = { "apiVersion": "v1", "kind": "Secret", "type": "Opaque", "metadata": { "name": "kafka", "annotations": { "qontract.recycle": "true" } }, "data": { k: base64.b64encode(v.encode()).decode('utf-8') for k, v in data.items() } } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION)
def construct_resources(namespaces): for namespace in namespaces: if 'limitRanges' not in namespace: logging.warning( "limitRanges key not found on namespace %s. Skipping." % (namespace['name']) ) continue # Get the linked limitRanges schema settings limitranges = namespace.get("limitRanges", {}) body = { 'apiVersion': 'v1', 'kind': 'LimitRange', 'metadata': { 'name': limitranges['name'], }, 'spec': { 'limits': [] } } # Build each limit item ignoring null ones for l in limitranges['limits']: speclimit = {} for ltype in SUPPORTED_LIMITRANGE_TYPES: if ltype in l and l[ltype] is not None: speclimit[ltype] = l[ltype] body['spec']['limits'].append(speclimit) resource = OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION) # k8s changes an empty array to null/None. we do this here # to be consistent if len(body['spec']['limits']) == 0: body['spec']['limits'] = None # Create the resources and append them to the namespace namespace["resources"] = [resource] return namespaces
def construct_user_oc_resource(role, user): name = f"{role}-{user}" body = { "apiVersion": "authorization.openshift.io/v1", "kind": "RoleBinding", "metadata": { "name": name }, "roleRef": { "name": role }, "subjects": [{ "kind": "User", "name": user }] } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name), name
def populate_desired_state(self, ri): for saas_file in self.saas_files: self.github = self._initiate_github(saas_file) self.image_auth = self._initiate_image_auth(saas_file) managed_resource_types = saas_file['managedResourceTypes'] resource_templates = saas_file['resourceTemplates'] # iterate over resource templates (multiple per saas_file) for rt in resource_templates: url = rt['url'] path = rt['path'] hash_length = rt['hash_length'] parameters = self._collect_parameters(rt) # iterate over targets (each target is a namespace) for target in rt['targets']: cluster, namespace = \ self._get_cluster_and_namespace(target) resources = self._process_template(url, path, hash_length, target, parameters) # add desired resources for resource in resources: resource_kind = resource['kind'] if resource_kind not in managed_resource_types: continue # check images image_error = self._check_images(resource) if image_error: ri.register_error() continue resource_name = resource['metadata']['name'] oc_resource = OR( resource, self.integration, self.integration_version, error_details=resource_name) ri.add_desired( cluster, namespace, resource_kind, resource_name, oc_resource )
def fetch_provider_resource(path, tfunc=None, tvars=None, validate_json=False): gqlapi = gql.get_api() # get resource data try: resource = gqlapi.get_resource(path) except gql.GqlGetResourceError as e: raise FetchResourceError(str(e)) content = resource['content'] if tfunc: content = tfunc(content, tvars) try: resource['body'] = anymarkup.parse(content, force_types=None) resource['body'].pop('$schema', None) except TypeError: body_type = type(resource['body']).__name__ e_msg = f"invalid resource type {body_type} found in path: {path}" raise FetchResourceError(e_msg) except anymarkup.AnyMarkupError: e_msg = f"Could not parse data. Skipping resource: {path}" raise FetchResourceError(e_msg) if validate_json: files = resource['body']['data'] for file_name, file_content in files.items(): try: json.loads(file_content) except ValueError: e_msg = f"invalid json in {path} under {file_name}" raise FetchResourceError(e_msg) try: return OR(resource['body'], QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=path) except ConstructResourceError as e: raise FetchResourceError(str(e))
def test_sha256sum(self): resource = fxt.get_anymarkup('sha256sum.yml') openshift_resource = OR(resource, TEST_INT, TEST_INT_VER) assert openshift_resource.sha256sum() == \ '1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965' annotated = openshift_resource.annotate() assert annotated.sha256sum() == \ '1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965' assert annotated.has_valid_sha256sum() annotated.body['metadata']['annotations']['qontract.sha256sum'] = \ 'test' assert annotated.sha256sum() == \ '1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965' assert not annotated.has_valid_sha256sum()
def construct_user_oc_resource(role, user): name = f"{role}-{user}" # Note: In OpenShift 4.x this resource is in rbac.authorization.k8s.io/v1 body = { "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "ClusterRoleBinding", "metadata": { "name": name }, "roleRef": { "name": role, "kind": "ClusterRole" }, "subjects": [{ "kind": "User", "name": user }] } return OR(body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name), name
def test_verify_valid_k8s_object(self): resource = fxt.get_anymarkup('valid_resource.yml') openshift_resource = OR(resource, TEST_INT, TEST_INT_VER) assert openshift_resource.verify_valid_k8s_object() is None
def fetch_provider_resource(path, tfunc=None, tvars=None, validate_json=False, add_path_to_prom_rules=True): gqlapi = gql.get_api() # get resource data try: resource = gqlapi.get_resource(path) except gql.GqlGetResourceError as e: raise FetchResourceError(str(e)) content = resource['content'] if tfunc: content = tfunc(content, tvars) try: resource['body'] = anymarkup.parse(content, force_types=None) resource['body'].pop('$schema', None) except TypeError: body_type = type(resource['body']).__name__ e_msg = f"invalid resource type {body_type} found in path: {path}" raise FetchResourceError(e_msg) except anymarkup.AnyMarkupError: e_msg = f"Could not parse data. Skipping resource: {path}" raise FetchResourceError(e_msg) if validate_json: files = resource['body']['data'] for file_name, file_content in files.items(): try: json.loads(file_content) except ValueError: e_msg = f"invalid json in {path} under {file_name}" raise FetchResourceError(e_msg) if add_path_to_prom_rules: body = resource['body'] if body['kind'] == 'PrometheusRule': try: groups = body['spec']['groups'] for group in groups: rules = group['rules'] for rule in rules: annotations = rule.get('annotations') if not annotations: continue # TODO(mafriedm): make this better rule['annotations']['html_url'] = \ f"{APP_INT_BASE_URL}/blob/master/resources{path}" except Exception: logging.warning('could not add html_url annotation to' + body['name']) try: return OR(resource['body'], QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=path) except ConstructResourceError as e: raise FetchResourceError(str(e))
def test_verify_valid_k8s_object_false(self): resource = fxt.get_anymarkup('invalid_resource.yml') with pytest.raises(ConstructResourceError): openshift_resource = OR(resource, TEST_INT, TEST_INT_VER) assert openshift_resource.verify_valid_k8s_object() is None
def generate_resource(template_file, values): rendered = render_template(template_file, values) jsonnet_resource = jsonnet.generate_object(rendered) return OR(jsonnet_resource, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION)
def populate_desired_state_saas_file(self, saas_file, ri): saas_file_name = saas_file['name'] logging.debug(f"populating desired state for {saas_file_name}") github = self._initiate_github(saas_file) image_auth = self._initiate_image_auth(saas_file) managed_resource_types = saas_file['managedResourceTypes'] image_patterns = saas_file['imagePatterns'] resource_templates = saas_file['resourceTemplates'] saas_file_parameters = self._collect_parameters(saas_file) # iterate over resource templates (multiple per saas_file) for rt in resource_templates: rt_name = rt['name'] url = rt['url'] path = rt['path'] hash_length = rt.get('hash_length') or self.settings['hashLength'] parameters = self._collect_parameters(rt) consolidated_parameters = {} consolidated_parameters.update(saas_file_parameters) consolidated_parameters.update(parameters) # iterate over targets (each target is a namespace) for target in rt['targets']: cluster, namespace = \ self._get_cluster_and_namespace(target) process_template_options = { 'saas_file_name': saas_file_name, 'resource_template_name': rt_name, 'url': url, 'path': path, 'hash_length': hash_length, 'target': target, 'parameters': consolidated_parameters, 'github': github } resources, html_url = \ self._process_template(process_template_options) if resources is None: ri.register_error() continue # add desired resources for resource in resources: resource_kind = resource['kind'] if resource_kind not in managed_resource_types: continue # check images check_images_options = { 'saas_file_name': saas_file_name, 'resource_template_name': rt_name, 'html_url': html_url, 'resource': resource, 'image_auth': image_auth, 'image_patterns': image_patterns } image_error = self._check_images(check_images_options) if image_error: ri.register_error() continue resource_name = resource['metadata']['name'] oc_resource = OR(resource, self.integration, self.integration_version, caller_name=saas_file_name, error_details=html_url) ri.add_desired(cluster, namespace, resource_kind, resource_name, oc_resource)