def test_invalid_container_name_too_long():
        resource = fxt.get_anymarkup(
            "invalid_resource_container_name_too_long.yml")

        with pytest.raises(ConstructResourceError):
            openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)
            assert openshift_resource.verify_valid_k8s_object() is None
    def test_sha256sum():
        resource = fxt.get_anymarkup("sha256sum.yml")

        openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)

        assert (
            openshift_resource.sha256sum() ==
            "1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965")

        annotated = openshift_resource.annotate()

        assert (
            annotated.sha256sum() ==
            "1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965")

        assert annotated.has_valid_sha256sum()

        annotated.body["metadata"]["annotations"][
            "qontract.sha256sum"] = "test"

        assert (
            annotated.sha256sum() ==
            "1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965")

        assert not annotated.has_valid_sha256sum()
    def test_annotates_resource():
        resource = fxt.get_anymarkup("annotates_resource.yml")
        openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)

        assert openshift_resource.has_qontract_annotations() is False

        annotated = openshift_resource.annotate()
        assert annotated.has_qontract_annotations() is True
 def test_has_owner_reference_true():
     resource = {
         'kind': 'kind',
         'metadata': {
             'name': 'resource',
             'ownerReferences': [{
                 'name': 'owner'
             }]
         }
     }
     openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)
     assert openshift_resource.has_owner_reference()
 def test_has_owner_reference_true():
     resource = {
         "kind": "kind",
         "metadata": {
             "name": "resource",
             "ownerReferences": [{
                 "name": "owner"
             }]
         },
     }
     openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)
     assert openshift_resource.has_owner_reference()
def test_managed_cluster_label_ignore():
    desired = {
        "apiVersion": "cluster.open-cluster-management.io/v1",
        "kind": "ManagedCluster",
        "metadata": {
            "labels": {
                "cloud": "Amazon",
                "vendor": "OpenShift",
                "cluster.open-cluster-management.io/clusterset": "default",
                "name": "xxx",
            },
            "name": "xxx",
        },
        "spec": {
            "hubAcceptsClient": True
        },
    }
    current = {
        "apiVersion": "cluster.open-cluster-management.io/v1",
        "kind": "ManagedCluster",
        "metadata": {
            "labels": {
                "cloud": "Amazon",
                "cluster.open-cluster-management.io/clusterset": "default",
                "name": "xxx",
                "vendor": "OpenShift",
                "clusterID": "yyy",
                "feature.open-cluster-management.io/addon-work-manager":
                "available",
                "managed-by": "platform",
                "openshiftVersion": "x.y.z",
            },
            "name": "xxx",
        },
        "spec": {
            "hubAcceptsClient": True
        },
    }

    d_r = OR(desired, TEST_INT, TEST_INT_VER)
    c_r = OR(current, TEST_INT, TEST_INT_VER)
    assert d_r == c_r
    assert d_r.sha256sum() == c_r.sha256sum()
def construct_gabi_oc_resource(name: str, users: Iterable[str]) -> OpenshiftResource:
    body = {
        "apiVersion": "v1",
        "kind": "ConfigMap",
        "metadata": {"name": name, "annotations": {"qontract.recycle": "true"}},
        "data": {"authorized-users.yaml": "\n".join(users)},
    }
    return OpenshiftResource(
        body, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, error_details=name
    )
    def test_sha256sum(self):
        resource = fxt.get_anymarkup('sha256sum.yml')

        openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)

        assert openshift_resource.sha256sum() == \
            '1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965'

        annotated = openshift_resource.annotate()

        assert annotated.sha256sum() == \
            '1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965'

        assert annotated.has_valid_sha256sum()

        annotated.body['metadata']['annotations']['qontract.sha256sum'] = \
            'test'

        assert annotated.sha256sum() == \
            '1366d8ef31f0d83419d25b446e61008b16348b9efee2216873856c49cede6965'

        assert not annotated.has_valid_sha256sum()
def build_probe(provider: EndpointMonitoringProvider,
                endpoints: list[Endpoint]) -> Optional[OpenshiftResource]:
    blackbox_exporter = provider.blackboxExporter
    if blackbox_exporter:
        prober_url = parse_prober_url(blackbox_exporter.exporterUrl)
        body: dict[str, Any] = {
            "apiVersion": "monitoring.coreos.com/v1",
            "kind": "Probe",
            "metadata": {
                "name": provider.name,
                "namespace": blackbox_exporter.namespace.get("name"),
                "labels": {
                    "prometheus": "app-sre"
                }
            },
            "spec": {
                "jobName": provider.name,
                "interval": provider.checkInterval or "10s",
                "module": blackbox_exporter.module,
                "prober": prober_url,
                "targets": {
                    "staticConfig": {
                        "relabelingConfigs": [
                            {
                                "action": "labeldrop",
                                "regex": "namespace"
                            }
                        ],
                        "labels": provider.metric_labels,
                        "static": [
                            ep.url for ep in endpoints
                        ]
                    }
                }
            }
        }
        if provider.timeout:
            body["spec"]["scrapeTimeout"] = provider.timeout
        return OpenshiftResource(
            body,
            QONTRACT_INTEGRATION,
            QONTRACT_INTEGRATION_VERSION
        )
    else:
        return None
def test_secret_string_data():
    resource = {
        "kind": "Secret",
        "metadata": {
            "name": "resource"
        },
        "stringData": {
            "k": "v"
        },
    }
    expected = {
        "kind": "Secret",
        "metadata": {
            "annotations": {},
            "name": "resource"
        },
        "data": {
            "k": "dg=="
        },
    }
    result = OR.canonicalize(resource)
    assert result == expected
def run(dry_run, enable_deletion=False):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    queries_list = collect_queries(settings=settings)
    remove_candidates = []
    for query in queries_list:
        query_name = query['name']

        # Checking the sql-query state:
        # - No state: up for execution.
        # - State is a timestamp: executed and up for removal
        #   after the JOB_TTL
        # - State is 'DONE': executed and removed.
        try:
            query_state = state[query_name]
            is_cronjob = query.get('schedule')
            if query_state != 'DONE' and not is_cronjob:
                remove_candidates.append({
                    'name': query_name,
                    'timestamp': query_state,
                    'output': query['output']
                })
            continue
        except KeyError:
            pass

        image_repository = 'quay.io/app-sre'
        use_pull_secret = False
        sql_query_settings = settings.get('sqlQuery')
        if sql_query_settings:
            use_pull_secret = True
            image_repository = sql_query_settings['imageRepository']
            pull_secret = sql_query_settings['pullSecret']
            secret_resource = orb.fetch_provider_vault_secret(
                path=pull_secret['path'],
                version=pull_secret['version'],
                name=query_name,
                labels=pull_secret['labels'] or {},
                annotations=pull_secret['annotations'] or {},
                type=pull_secret['type'],
                integration=QONTRACT_INTEGRATION,
                integration_version=QONTRACT_INTEGRATION_VERSION)

        job_yaml = process_template(query,
                                    image_repository=image_repository,
                                    use_pull_secret=use_pull_secret)
        job = yaml.safe_load(job_yaml)
        job_resource = OpenshiftResource(job, QONTRACT_INTEGRATION,
                                         QONTRACT_INTEGRATION_VERSION)
        oc_map = OC_Map(namespaces=[query['namespace']],
                        integration=QONTRACT_INTEGRATION,
                        settings=settings,
                        internal=None)

        if use_pull_secret:
            openshift_apply(dry_run, oc_map, query, secret_resource)

        if query['output'] == 'encrypted':
            render_kwargs = {
                'GPG_KEY': query['name'],
                'PUBLIC_GPG_KEY': query['public_gpg_key']
            }
            template = jinja2.Template(CONFIGMAP_TEMPLATE)
            configmap_yaml = template.render(**render_kwargs)
            configmap = yaml.safe_load(configmap_yaml)
            configmap_resource = OpenshiftResource(
                configmap, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION)
            openshift_apply(dry_run, oc_map, query, configmap_resource)

        openshift_apply(dry_run, oc_map, query, job_resource)

        if not dry_run:
            state[query_name] = time.time()

    for candidate in remove_candidates:
        if time.time() < candidate['timestamp'] + JOB_TTL:
            continue

        try:
            query = collect_queries(query_name=candidate['name'],
                                    settings=settings)[0]
        except IndexError:
            raise RuntimeError(f'sql-query {candidate["name"]} not present'
                               f'in the app-interface while its Job is still '
                               f'not removed from the cluster. Manual clean '
                               f'up is needed.')

        oc_map = OC_Map(namespaces=[query['namespace']],
                        integration=QONTRACT_INTEGRATION,
                        settings=settings,
                        internal=None)

        resource_types = ['Job', 'Secret']
        if candidate['output'] == 'encrypted':
            resource_types.append('ConfigMap')
        for resource_type in resource_types:
            openshift_delete(dry_run, oc_map, query, resource_type,
                             enable_deletion)

        if not dry_run:
            state[candidate['name']] = 'DONE'
 def test_has_owner_reference_false():
     resource = {'kind': 'kind', 'metadata': {'name': 'resource'}}
     openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)
     assert not openshift_resource.has_owner_reference()
    def test_verify_valid_k8s_object_false():
        resource = fxt.get_anymarkup("invalid_resource.yml")

        with pytest.raises(ConstructResourceError):
            openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)
            assert openshift_resource.verify_valid_k8s_object() is None
    def test_verify_valid_k8s_object():
        resource = fxt.get_anymarkup("valid_resource.yml")
        openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)

        assert openshift_resource.verify_valid_k8s_object() is None
 def test_has_owner_reference_false():
     resource = {"kind": "kind", "metadata": {"name": "resource"}}
     openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)
     assert not openshift_resource.has_owner_reference()
Beispiel #16
0
def realize_data(dry_run, oc_map, ri,
                 take_over=False,
                 caller=None,
                 wait_for_namespace=False,
                 no_dry_run_skip_compare=False,
                 override_enable_deletion=None,
                 recycle_pods=True):
    """
    Realize the current state to the desired state.

    :param dry_run: run in dry-run mode
    :param oc_map: a dictionary containing oc client per cluster
    :param ri: a ResourceInventory containing current and desired states
    :param take_over: manage resource types in a namespace exclusively
    :param caller: name of the calling entity.
                   enables multiple running instances of the same integration
                   to deploy to the same namespace
    :param wait_for_namespace: wait for namespace to exist before applying
    :param no_dry_run_skip_compare: when running without dry-run, skip compare
    :param override_enable_deletion: override calculated enable_deletion value
    :param recycle_pods: should pods be recycled if a dependency changed
    """
    actions = []
    enable_deletion = False if ri.has_error_registered() else True
    # only allow to override enable_deletion if no errors were found
    if enable_deletion is True and override_enable_deletion is False:
        enable_deletion = False

    for cluster, namespace, resource_type, data in ri:
        if ri.has_error_registered(cluster=cluster):
            msg = (
                "[{}] skipping realize_data for "
                "cluster with errors"
            ).format(cluster)
            logging.error(msg)
            continue

        # desired items
        for name, d_item in data['desired'].items():
            c_item = data['current'].get(name)

            if c_item is not None:
                if not dry_run and no_dry_run_skip_compare:
                    msg = (
                        "[{}/{}] skipping compare of resource '{}/{}'."
                    ).format(cluster, namespace, resource_type, name)
                    logging.debug(msg)
                else:
                    # If resource doesn't have annotations, annotate and apply
                    if not c_item.has_qontract_annotations():
                        msg = (
                            "[{}/{}] resource '{}/{}' present "
                            "w/o annotations, annotating and applying"
                        ).format(cluster, namespace, resource_type, name)
                        logging.info(msg)

                    # don't apply if resources match
                    # if there is a caller (saas file) and this is a take over
                    # we skip the equal compare as it's not covering
                    # cases of a removed label (for example)
                    # d_item == c_item is uncommutative
                    elif not (caller and take_over) and d_item == c_item:
                        msg = (
                            "[{}/{}] resource '{}/{}' present "
                            "and matches desired, skipping."
                        ).format(cluster, namespace, resource_type, name)
                        logging.debug(msg)
                        continue

                    # don't apply if sha256sum hashes match
                    elif c_item.sha256sum() == d_item.sha256sum():
                        if c_item.has_valid_sha256sum():
                            msg = (
                                "[{}/{}] resource '{}/{}' present "
                                "and hashes match, skipping."
                            ).format(cluster, namespace, resource_type, name)
                            logging.debug(msg)
                            continue
                        else:
                            msg = (
                                "[{}/{}] resource '{}/{}' present and "
                                "has stale sha256sum due to manual changes."
                            ).format(cluster, namespace, resource_type, name)
                            logging.info(msg)

                    logging.debug("CURRENT: " +
                                  OR.serialize(OR.canonicalize(c_item.body)))
            else:
                logging.debug("CURRENT: None")

            logging.debug("DESIRED: " +
                          OR.serialize(OR.canonicalize(d_item.body)))

            try:
                apply(dry_run, oc_map, cluster, namespace,
                      resource_type, d_item, wait_for_namespace,
                      recycle_pods=recycle_pods)
                action = {
                    'action': ACTION_APPLIED,
                    'cluster': cluster,
                    'namespace': namespace,
                    'kind': resource_type,
                    'name': d_item.name
                }
                actions.append(action)
            except StatusCodeError as e:
                ri.register_error()
                msg = "[{}/{}] {} (error details: {})".format(
                    cluster, namespace, str(e), d_item.error_details)
                logging.error(msg)

        # current items
        for name, c_item in data['current'].items():
            d_item = data['desired'].get(name)
            if d_item is not None:
                continue

            if c_item.has_qontract_annotations():
                if caller and c_item.caller != caller:
                    continue
            elif not take_over:
                continue

            try:
                delete(dry_run, oc_map, cluster, namespace,
                       resource_type, name, enable_deletion)
                action = {
                    'action': ACTION_DELETED,
                    'cluster': cluster,
                    'namespace': namespace,
                    'kind': resource_type,
                    'name': name
                }
                actions.append(action)
            except StatusCodeError as e:
                ri.register_error()
                msg = "[{}/{}] {}".format(cluster, namespace, str(e))
                logging.error(msg)

    return actions
    def test_invalid_name_format(self):
        resource = fxt.get_anymarkup('invalid_resource_name_format.yml')

        with pytest.raises(ConstructResourceError):
            openshift_resource = OR(resource, TEST_INT, TEST_INT_VER)
            assert openshift_resource.verify_valid_k8s_object() is None
def run(dry_run, enable_deletion=False):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    queries_list = collect_queries(settings=settings)
    remove_candidates = []
    for query in queries_list:
        query_name = query["name"]

        # Checking the sql-query state:
        # - No state: up for execution.
        # - State is a timestamp: executed and up for removal
        #   after the JOB_TTL
        # - State is 'DONE': executed and removed.
        try:
            query_state = state[query_name]
            is_cronjob = query.get("schedule")
            if query_state != "DONE" and not is_cronjob:
                remove_candidates.append({
                    "name": query_name,
                    "timestamp": query_state,
                    "output": query["output"],
                })
            continue
        except KeyError:
            pass

        image_repository = "quay.io/app-sre"
        use_pull_secret = False
        sql_query_settings = settings.get("sqlQuery")
        if sql_query_settings:
            use_pull_secret = True
            image_repository = sql_query_settings["imageRepository"]
            pull_secret = sql_query_settings["pullSecret"]
            secret_resource = orb.fetch_provider_vault_secret(
                path=pull_secret["path"],
                version=pull_secret["version"],
                name=query_name,
                labels=pull_secret["labels"] or {},
                annotations=pull_secret["annotations"] or {},
                type=pull_secret["type"],
                integration=QONTRACT_INTEGRATION,
                integration_version=QONTRACT_INTEGRATION_VERSION,
            )

        job_yaml = process_template(query,
                                    image_repository=image_repository,
                                    use_pull_secret=use_pull_secret)
        job = yaml.safe_load(job_yaml)
        job_resource = OpenshiftResource(job, QONTRACT_INTEGRATION,
                                         QONTRACT_INTEGRATION_VERSION)
        oc_map = OC_Map(
            namespaces=[query["namespace"]],
            integration=QONTRACT_INTEGRATION,
            settings=settings,
            internal=None,
        )

        if use_pull_secret:
            openshift_apply(dry_run, oc_map, query, secret_resource)

        if query["output"] == "encrypted":
            render_kwargs = {
                "GPG_KEY": query["name"],
                "PUBLIC_GPG_KEY": query["public_gpg_key"],
            }
            template = jinja2.Template(CONFIGMAP_TEMPLATE)
            configmap_yaml = template.render(**render_kwargs)
            configmap = yaml.safe_load(configmap_yaml)
            configmap_resource = OpenshiftResource(
                configmap, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION)
            openshift_apply(dry_run, oc_map, query, configmap_resource)

        openshift_apply(dry_run, oc_map, query, job_resource)

        if not dry_run:
            state[query_name] = time.time()

    for candidate in remove_candidates:
        if time.time() < candidate["timestamp"] + JOB_TTL:
            continue

        try:
            query = collect_queries(query_name=candidate["name"],
                                    settings=settings)[0]
        except IndexError:
            raise RuntimeError(f'sql-query {candidate["name"]} not present'
                               f"in the app-interface while its Job is still "
                               f"not removed from the cluster. Manual clean "
                               f"up is needed.")

        oc_map = OC_Map(
            namespaces=[query["namespace"]],
            integration=QONTRACT_INTEGRATION,
            settings=settings,
            internal=None,
        )

        resource_types = ["Job", "Secret"]
        if candidate["output"] == "encrypted":
            resource_types.append("ConfigMap")
        for resource_type in resource_types:
            openshift_delete(dry_run, oc_map, query, resource_type,
                             enable_deletion)

        if not dry_run:
            state[candidate["name"]] = "DONE"
def _realize_resource_data(unpacked_ri_item, dry_run, oc_map: OC_Map,
                           ri: ResourceInventory, take_over, caller,
                           wait_for_namespace, no_dry_run_skip_compare,
                           override_enable_deletion, recycle_pods):
    cluster, namespace, resource_type, data = unpacked_ri_item
    actions: list[dict] = []
    if ri.has_error_registered(cluster=cluster):
        msg = ("[{}] skipping realize_data for "
               "cluster with errors").format(cluster)
        logging.error(msg)
        return actions

    enable_deletion = False if ri.has_error_registered() else True
    # only allow to override enable_deletion if no errors were found
    if enable_deletion is True and override_enable_deletion is False:
        enable_deletion = False

    # desired items
    for name, d_item in data['desired'].items():
        c_item = data['current'].get(name)

        if c_item is not None:
            if not dry_run and no_dry_run_skip_compare:
                msg = ("[{}/{}] skipping compare of resource '{}/{}'.").format(
                    cluster, namespace, resource_type, name)
                logging.debug(msg)
            else:
                # If resource doesn't have annotations, annotate and apply
                if not c_item.has_qontract_annotations():
                    msg = ("[{}/{}] resource '{}/{}' present "
                           "w/o annotations, annotating and applying").format(
                               cluster, namespace, resource_type, name)
                    logging.info(msg)

                # don't apply if resources match
                # if there is a caller (saas file) and this is a take over
                # we skip the equal compare as it's not covering
                # cases of a removed label (for example)
                # d_item == c_item is uncommutative
                elif not (caller and take_over) and d_item == c_item:
                    msg = ("[{}/{}] resource '{}/{}' present "
                           "and matches desired, skipping.").format(
                               cluster, namespace, resource_type, name)
                    logging.debug(msg)
                    continue

                # don't apply if sha256sum hashes match
                elif c_item.sha256sum() == d_item.sha256sum():
                    if c_item.has_valid_sha256sum():
                        msg = ("[{}/{}] resource '{}/{}' present "
                               "and hashes match, skipping.").format(
                                   cluster, namespace, resource_type, name)
                        logging.debug(msg)
                        continue
                    else:
                        msg = ("[{}/{}] resource '{}/{}' present and "
                               "has stale sha256sum due to manual changes."
                               ).format(cluster, namespace, resource_type,
                                        name)
                        logging.info(msg)

                logging.debug("CURRENT: " +
                              OR.serialize(OR.canonicalize(c_item.body)))
        else:
            logging.debug("CURRENT: None")

        logging.debug("DESIRED: " + OR.serialize(OR.canonicalize(d_item.body)))

        try:
            privileged = data['use_admin_token'].get(name, False)
            apply(dry_run, oc_map, cluster, namespace, resource_type, d_item,
                  wait_for_namespace, recycle_pods, privileged)
            action = {
                'action': ACTION_APPLIED,
                'cluster': cluster,
                'namespace': namespace,
                'kind': resource_type,
                'name': d_item.name,
                'privileged': privileged
            }
            actions.append(action)
        except StatusCodeError as e:
            ri.register_error()
            err = str(e) if resource_type != 'Secret' \
                else f'error applying Secret {d_item.name}: REDACTED'
            msg = f"[{cluster}/{namespace}] {err} " + \
                f"(error details: {d_item.error_details})"
            logging.error(msg)

    # current items
    for name, c_item in data['current'].items():
        d_item = data['desired'].get(name)
        if d_item is not None:
            continue

        if c_item.has_qontract_annotations():
            if caller and c_item.caller != caller:
                continue
        elif not take_over:
            # this is reached when the current resources:
            # - does not have qontract annotations (not managed)
            # - not taking over all resources of the current kind
            msg = f"[{cluster}/{namespace}] skipping " +\
                f"{resource_type}/{c_item.name}"
            logging.debug(msg)
            continue

        if c_item.has_owner_reference():
            continue

        try:
            privileged = data['use_admin_token'].get(name, False)
            delete(dry_run, oc_map, cluster, namespace, resource_type, name,
                   enable_deletion, privileged)
            action = {
                'action': ACTION_DELETED,
                'cluster': cluster,
                'namespace': namespace,
                'kind': resource_type,
                'name': name,
                'privileged': privileged
            }
            actions.append(action)
        except StatusCodeError as e:
            ri.register_error()
            msg = "[{}/{}] {}".format(cluster, namespace, str(e))
            logging.error(msg)

    return actions