コード例 #1
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_partition_manifests_add_delete(self):
        """Local and server manifests are orthogonal sets.

        This must produce a plan where all local resources will be created, all
        cluster resources deleted and none patched.

        """
        fun = sq.partition_manifests

        # Local and cluster manifests are orthogonal.
        local_man = {
            MetaManifest('v1', 'Deployment', 'ns2', 'bar'): "0",
            MetaManifest('v1', 'Namespace', None, 'ns2'): "1",
        }
        cluster_man = {
            MetaManifest('v1', 'Deployment', 'ns1', 'foo'): "2",
            MetaManifest('v1', 'Namespace', None, 'ns1'): "3",
            MetaManifest('v1', 'Namespace', None, 'ns3'): "4",
        }
        plan = DeploymentPlan(create=[
            MetaManifest('v1', 'Deployment', 'ns2', 'bar'),
            MetaManifest('v1', 'Namespace', None, 'ns2'),
        ],
                              patch=[],
                              delete=[
                                  MetaManifest('v1', 'Deployment', 'ns1',
                                               'foo'),
                                  MetaManifest('v1', 'Namespace', None, 'ns1'),
                                  MetaManifest('v1', 'Namespace', None, 'ns3'),
                              ])
        assert fun(local_man, cluster_man) == (plan, False)
コード例 #2
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_match_api_version_nothing_to_do(self, m_fetch, k8sconfig):
        """Test various cases where the function must not do anything.

        There are two cases where it must not download a resource form K8s again:
          1) Local/Server use identical API endpoints the resource.
          2) Resource exists either on server or locally but not both.

        """
        fun = square.square.match_api_version

        # Must not have downloaded anything.
        srv, err = fun(k8sconfig, {}, {})
        assert not err and srv == {}
        assert not m_fetch.called

        # Local and server manifests are identical - must not synchronise anything.
        local_in = {
            MetaManifest("v1", "Namespace", None, "ns1"): {"ns-loc"},
            MetaManifest("apps/v1", "Deployment", "ns", "name"): {"dply-loc"},
        }
        srv, err = fun(k8sconfig, local_in, local_in)
        assert not err and srv == local_in
        assert not m_fetch.called

        # Local- and server manifests have identical Service resource but two
        # completely different deployments. Must not sync anything because the
        # deployments are actually different resources.
        local_in = {
            MetaManifest("v1", "Service", "svc-name", "ns1"): {"ns-loc"},
            MetaManifest("apps/v1", "Deployment", "ns", "foo"): {"dply-loc"},
        }
        server_in = {
            MetaManifest("v1", "Service", "svc-name", "ns1"): {"ns-srv"},
            MetaManifest("extensions/v1beta1", "Deployment", "ns", "bar"):
            {"orig-srv"},
        }
        srv, err = fun(k8sconfig, local_in, server_in)
        assert not err and srv == server_in
        assert not m_fetch.called

        # Local- and server manifests have matching Deployments in two
        # different namespaces.
        local_in = {
            MetaManifest("apps/v1beta1", "Deployment", "name", "ns1"):
            {"deploy-1"},
            MetaManifest("apps/v1beta2", "Deployment", "name", "ns2"):
            {"deploy-2"},
        }
        server_in = {
            MetaManifest("apps/v1beta1", "Deployment", "name", "ns1"):
            {"deploy-1"},
            MetaManifest("apps/v1beta2", "Deployment", "name", "ns2"):
            {"deploy-2"},
        }
        srv, err = fun(k8sconfig, local_in, server_in)
        assert not err and srv == server_in
        assert not m_fetch.called
コード例 #3
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_match_api_version_multi(self, m_fetch, k8sconfig):
        """Mix multiple deployments, some of which need re-downloading."""
        # Create local and server manifests. Both specify the same two resources
        # but the Deployment uses different `apiVersions`.
        local_in = {
            # These two exist on server as well (same API version).
            MetaManifest("apps/v1", "Deployment", "ns", "name-1"):
            {"loc-deploy-1"},
            MetaManifest("apps/v1beta1", "Deployment", "ns", "name-2"):
            {"loc-deploy-2"},

            # Also exists on server but with different version.
            MetaManifest("apps/v1beta1", "Deployment", "ns", "name-3"):
            {"loc-deploy-3"},
        }
        server_in = {
            # These two exist locally as well (same API version).
            MetaManifest("apps/v1", "Deployment", "ns", "name-1"):
            {"srv-deploy-1"},
            MetaManifest("apps/v1beta1", "Deployment", "ns", "name-2"):
            {"srv-deploy-2"},

            # Also exists locally but with different version.
            MetaManifest("apps/v1beta2", "Deployment", "ns", "name-3"):
            {"loc-deploy-3"},
        }

        # Mock the resource download to supply it from the correct API endpoint.
        meta = MetaManifest("apps/v1beta1", "Deployment", "ns", "name-3")
        resource, err = square.k8s.resource(k8sconfig, meta)
        assert not err
        m_fetch.return_value = (meta, {"new-deploy-3"}, False)
        del err

        # Test function must have re-downloaded the Deployment "name-3" from the
        # `apps/v1beta1` endpoint because that is what the local manifest uses.
        srv, err = square.square.match_api_version(k8sconfig, local_in,
                                                   server_in)
        assert not err and srv == {
            MetaManifest("apps/v1", "Deployment", "ns", "name-1"):
            {"srv-deploy-1"},
            MetaManifest("apps/v1beta1", "Deployment", "ns", "name-2"):
            {"srv-deploy-2"},

            # This must have been downloaded.
            MetaManifest("apps/v1beta1", "Deployment", "ns", "name-3"):
            {"new-deploy-3"},
        }

        # Must have downloaded exactly one deployment, namely `name-3`.
        m_fetch.assert_called_once_with(k8sconfig, resource)
コード例 #4
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_find_namespace_orphans(self):
        """Return all resource manifests that belong to non-existing
        namespaces.

        This function will be useful to sanity check the local deployments
        manifest to avoid cases where users define resources in a namespace but
        forget to define that namespace (or mis-spell it).

        """
        fun = sq.find_namespace_orphans

        # Two deployments in the same non-existing Namespace. Both are orphaned
        # because the namespace `ns1` does not exist.
        man = {
            MetaManifest('v1', 'Deployment', 'ns1', 'foo'),
            MetaManifest('v1', 'Deployment', 'ns1', 'bar'),
        }
        assert fun(man) == (man, True)

        # Two namespaces - neither is orphaned by definition.
        man = {
            MetaManifest('v1', 'Namespace', None, 'ns1'),
            MetaManifest('v1', 'Namespace', None, 'ns2'),
        }
        assert fun(man) == (set(), True)

        # Two deployments, only one of which is inside a defined Namespace.
        man = {
            MetaManifest('v1', 'Deployment', 'ns1', 'foo'),
            MetaManifest('v1', 'Deployment', 'ns2', 'bar'),
            MetaManifest('v1', 'Namespace', None, 'ns1'),
        }
        assert fun(man) == ({MetaManifest('v1', 'Deployment', 'ns2',
                                          'bar')}, True)
コード例 #5
0
    def test_resource_service(self, integrationtest, k8sconfig):
        """Verify with a Service resource.

        NOTE: this test is tailored to Kubernetes v1.16.

        """
        # Fixtures.
        k8sconfig = self.k8sconfig(integrationtest, k8sconfig)
        err_resp = (K8sResource("", "", "", False, ""), True)

        # Tuples of API version that we ask for (if any), and what the final
        # K8sResource element will contain.
        api_versions = [
            # We expect to get the version we asked for.
            ("v1", "v1"),

            # Function must automatically determine the latest version of the resource.
            ("", "v1"),
        ]

        for src, expected in api_versions:
            # A particular Service in a particular namespace.
            res, err = k8s.resource(k8sconfig, MetaManifest(src, "Service", "ns", "name"))
            assert not err
            assert res == K8sResource(
                apiVersion=expected, kind="Service", name="services", namespaced=True,
                url=f"{k8sconfig.url}/api/v1/namespaces/ns/services/name",
            )

            # All Services in all namespaces.
            res, err = k8s.resource(k8sconfig, MetaManifest(src, "Service", None, None))
            assert not err
            assert res == K8sResource(
                apiVersion=expected, kind="Service", name="services", namespaced=True,
                url=f"{k8sconfig.url}/api/v1/services",
            )

            # All Services in a particular namespace.
            res, err = k8s.resource(k8sconfig, MetaManifest(src, "Service", "ns", ""))
            assert not err
            assert res == K8sResource(
                apiVersion=expected, kind="Service", name="services", namespaced=True,
                url=f"{k8sconfig.url}/api/v1/namespaces/ns/services",
            )

            # A particular Service in all namespaces -> Invalid.
            MM = MetaManifest
            assert k8s.resource(k8sconfig, MM(src, "Service", None, "name")) == err_resp
コード例 #6
0
def download_single(k8sconfig: K8sConfig,
                    resource: K8sResource) -> Tuple[MetaManifest, dict, bool]:
    """Similar to `download(...)` but only for a single Kubernetes `resource`.

    Inputs:
        k8sconfig: K8sConfig
        resource: K8sResource

    Returns:
        MetaManifest, manifest: the K8s (meta)manifest.

    """
    try:
        # Download the resource.
        manifest, err = square.k8s.get(k8sconfig.client, resource.url)
        assert not err

        manifest, _, err = strip(k8sconfig, manifest, {})
        assert not err
    except AssertionError:
        logit.error(
            f"Could not query {k8sconfig.name} ({k8sconfig.url}/{resource.url})"
        )
        return (MetaManifest("", "", "", ""), {}, True)

    return (make_meta(manifest), manifest, False)
コード例 #7
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_compile_plan_patch_with_diff(self, config, k8sconfig):
        """Test a plan that patches all resources.

        To do this, the local and server resources are identical. As a
        result, the returned plan must nominate all manifests for patching, and
        none to create and delete.

        """
        # Define a single resource.
        meta = MetaManifest('v1', 'Namespace', None, 'ns1')

        # Local and server manifests have the same resources but their
        # definition differs. This will ensure a non-empty patch in the plan.
        loc_man = {meta: make_manifest("Namespace", None, "ns1")}
        srv_man = {meta: make_manifest("Namespace", None, "ns1")}
        loc_man[meta]["metadata"]["labels"] = {"foo": "foo"}
        srv_man[meta]["metadata"]["labels"] = {"bar": "bar"}

        # Compute the JSON patch and textual diff to populate the expected
        # output structure below.
        patch, err = sq.make_patch(config, k8sconfig, loc_man[meta],
                                   srv_man[meta])
        assert not err
        diff_str, err = manio.diff(config, k8sconfig, loc_man[meta],
                                   srv_man[meta])
        assert not err

        # Verify the test function returns the correct Patch and diff.
        expected = DeploymentPlan(create=[],
                                  patch=[DeltaPatch(meta, diff_str, patch)],
                                  delete=[])
        ret = sq.compile_plan(config, k8sconfig, loc_man, srv_man)
        assert ret == (expected, False)
コード例 #8
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_compile_plan_err(self, m_apply, m_plan, m_part, config,
                              k8sconfig):
        """Use mocks for the internal function calls to simulate errors."""
        err_resp = (DeploymentPlan(tuple(), tuple(), tuple()), True)

        # Define a single resource and valid dummy return value for
        # `sq.partition_manifests`.
        meta = MetaManifest('v1', 'Namespace', None, 'ns1')
        plan = DeploymentPlan(create=[], patch=[meta], delete=[])

        # Local and server manifests have the same resources but their
        # definition differs. This will ensure a non-empty patch in the plan.
        loc_man = srv_man = {meta: make_manifest("Namespace", None, "ns1")}

        # Simulate an error in `partition_manifests`.
        m_part.return_value = (None, True)
        assert sq.compile_plan(config, k8sconfig, loc_man, srv_man) == err_resp

        # Simulate an error in `diff`.
        m_part.return_value = (plan, False)
        m_plan.return_value = (None, True)
        assert sq.compile_plan(config, k8sconfig, loc_man, srv_man) == err_resp

        # Simulate an error in `make_patch`.
        m_part.return_value = (plan, False)
        m_plan.return_value = ("some string", False)
        m_apply.return_value = (None, True)
        assert sq.compile_plan(config, k8sconfig, loc_man, srv_man) == err_resp
コード例 #9
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_make_patch_ok(self, config, k8sconfig):
        """Compute patch between two manifests.

        This test function first verifies that the patch between two identical
        manifests is empty. The second used two manifests that have different
        labels. This must produce two patch operations, one to remove the old
        label and one to add the new ones.

        """
        # Two valid manifests.
        kind, namespace, name = "Deployment", "namespace", "name"
        srv = make_manifest(kind, namespace, name)
        loc = make_manifest(kind, namespace, name)
        srv["metadata"]["labels"] = {"old": "old"}
        loc["metadata"]["labels"] = {"new": "new"}

        # The Patch between two identical manifests must be a No-Op.
        res, err = resource(k8sconfig,
                            MetaManifest("apps/v1", kind, namespace, name))
        assert not err
        expected = JsonPatch(url=res.url, ops=[])
        assert sq.make_patch(config, k8sconfig, loc, loc) == (expected, False)

        # The patch between `srv` and `loc` must remove the old label and add
        # the new one.
        expected = JsonPatch(url=res.url,
                             ops=[{
                                 'op': 'remove',
                                 'path': '/metadata/labels/old'
                             }, {
                                 'op': 'add',
                                 'path': '/metadata/labels/new',
                                 'value': 'new'
                             }])
        assert sq.make_patch(config, k8sconfig, loc, srv) == (expected, False)
コード例 #10
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_compile_plan_patch_no_diff(self, config, k8sconfig):
        """The plan must be empty if the local and server manifests are too."""
        # Define two namespaces with 1 deployment in each.
        meta = [
            MetaManifest('v1', 'Namespace', None, 'ns1'),
            MetaManifest('apps/v1', 'Deployment', 'ns1', 'res_0'),
            MetaManifest('v1', 'Namespace', None, 'ns2'),
            MetaManifest('apps/v1', 'Deployment', 'ns2', 'res_1'),
        ]

        # Local and server manifests are identical. The plan must therefore
        # only nominate patches but nothing to create or delete.
        src = {_: make_manifest(_.kind, _.namespace, _.name) for _ in meta}

        expected = DeploymentPlan(create=[], patch=[], delete=[])
        assert sq.compile_plan(config, k8sconfig, src,
                               src) == (expected, False)
コード例 #11
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_partition_manifests_patch(self):
        """Local and server manifests match.

        If all resource exist both locally and remotely then nothing needs to
        be created or deleted. However, the resources may need patching but
        that is not something `partition_manifests` concerns itself with.

        """
        # Local and cluster manifests are identical - the Plan must not
        # create/add anything but mark all resources for (possible)
        # patching.
        local_man = cluster_man = {
            MetaManifest('v1', 'Namespace', None, 'ns3'): "0",
            MetaManifest('v1', 'Namespace', None, 'ns1'): "1",
            MetaManifest('v1', 'Deployment', 'ns2', 'bar'): "2",
            MetaManifest('v1', 'Namespace', None, 'ns2'): "3",
            MetaManifest('v1', 'Deployment', 'ns1', 'foo'): "4",
        }
        plan = DeploymentPlan(create=[],
                              patch=list(local_man.keys()),
                              delete=[])
        assert sq.partition_manifests(local_man, cluster_man) == (plan, False)
コード例 #12
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_make_patch_empty(self, config, k8sconfig):
        """Basic test: compute patch between two identical resources."""
        # Setup.
        kind, ns, name = 'Deployment', 'ns', 'foo'

        # PATCH URLs require the resource name at the end of the request path.
        url = resource(k8sconfig, MetaManifest("apps/v1", kind, ns,
                                               name))[0].url

        # The patch must be empty for identical manifests.
        loc = srv = make_manifest(kind, ns, name)
        data, err = sq.make_patch(config, k8sconfig, loc, srv)
        assert (data, err) == (JsonPatch(url, []), False)
        assert isinstance(data, JsonPatch)
コード例 #13
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_compile_plan_invalid_api_version(self, config, k8sconfig):
        """Test a plan that patches no resources.

        The local and server manifests are identical except for the API
        version. The plan must still be empty because Square adapts to the
        local manifests to the default API group.

        """
        # Define a namespaces with an Ingress. The Ingress uses the legacy API group.
        meta = [
            MetaManifest("invalid", "Deployment", "ns", "name"),
        ]

        # Local and server manifests will be identical.
        src = {_: make_manifest(_.kind, _.namespace, _.name) for _ in meta}

        # The plan must fail because the API group is invalid.
        ret = sq.compile_plan(config, k8sconfig, src, src)
        assert ret == (DeploymentPlan(tuple(), tuple(), tuple()), True)
コード例 #14
0
def make_meta(manifest: dict) -> MetaManifest:
    """Compile `MetaManifest` information from `manifest` and return it.

    Throw `KeyError` if manifest lacks essential fields like `apiVersion`,
    `kind`, etc because it cannot possibly be a valid K8s manifest then.

    """
    # Unpack the namespace. For Namespace resources, this will be the "name".
    if manifest["kind"] == "Namespace":
        ns = None
    else:
        # For non-Namespace manifests, the namespace may genuinely be None if
        # the resource applies globally, eg ClusterRole.
        ns = manifest['metadata'].get("namespace", None)

    # Return the populated MetaManifest.
    return MetaManifest(apiVersion=manifest['apiVersion'],
                        kind=manifest['kind'],
                        namespace=ns,
                        name=manifest['metadata']['name'])
コード例 #15
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_match_api_version_namespace(self, m_fetch, k8sconfig):
        """Define a set of resources and verify the function downloads the ones
        where the `apiVersion` fields do not match.

        """
        # Create local and server manifests. Both specify the same two resources
        # but the Deployment uses different `apiVersions`.
        local = {
            MetaManifest("apps/v1beta1", "Deployment", "name", "ns1"):
            {"deploy-1"},
            MetaManifest("apps/v1beta2", "Deployment", "name", "ns2"):
            {"deploy-2"},
        }
        server_in = {
            # Same as in `local`
            MetaManifest("apps/v1beta1", "Deployment", "name", "ns1"):
            {"deploy-1"},

            # Different than in `local`.
            MetaManifest("apps/v1beta1", "Deployment", "name", "ns2"):
            {"deploy-2"},
        }

        # Mock the resource download to supply it from the correct API endpoint.
        meta = MetaManifest("apps/v1beta2", "Deployment", "name", "ns2")
        assert meta in local
        resource, err = square.k8s.resource(k8sconfig, meta)
        assert not err
        m_fetch.return_value = (meta, {"new-deploy-2"}, False)
        del err

        # Test function must have re-downloaded the Deployment from the
        # `extensions/v1beta1` endpoint.
        srv, err = square.square.match_api_version(k8sconfig, local, server_in)
        assert not err and srv == {
            MetaManifest("apps/v1beta1", "Deployment", "name", "ns1"):
            {"deploy-1"},
            MetaManifest("apps/v1beta2", "Deployment", "name", "ns2"):
            {"new-deploy-2"},
        }

        # Must have downloaded the deployments.
        m_fetch.assert_called_once_with(k8sconfig, resource)
コード例 #16
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_match_api_version_basic(self, m_fetch, k8sconfig):
        """Define tow resources and verify the test function downloads the one
        where the `apiVersion` does not match.

        """
        # Create local and server manifests. Both specify the same two resources
        # but the Deployment uses different `apiVersions`.
        meta_deploy_loc = MetaManifest("extensions/v1beta1", "Deployment",
                                       "ns", "name")
        meta_deploy_srv = MetaManifest("apps/v1", "Deployment", "ns", "name")
        local = {
            MetaManifest("v1", "Namespace", None, "ns1"): {"ns-loc"},
            meta_deploy_loc: {"dply-loc"},
        }
        server_in = {
            MetaManifest("v1", "Namespace", None, "ns1"): {"ns-srv"},
            meta_deploy_srv: {"orig-srv"},
        }

        # Mock the resource download to supply it from the correct API endpoint.
        resource, err = square.k8s.resource(k8sconfig, meta_deploy_loc)
        assert not err
        m_fetch.return_value = (meta_deploy_loc, {"new-srv"}, False)
        del err

        # Test function must have re-downloaded the Deployment from the
        # `extensions/v1beta1` endpoint.
        srv, err = square.square.match_api_version(k8sconfig, local, server_in)
        assert not err and srv == {
            MetaManifest("v1", "Namespace", None, "ns1"): {"ns-srv"},
            MetaManifest("extensions/v1beta1", "Deployment", "ns", "name"):
            {"new-srv"},
        }

        # Must have downloaded the deployments.
        m_fetch.assert_called_once_with(k8sconfig, resource)
コード例 #17
0
def download(config: Config, k8sconfig: K8sConfig) -> Tuple[ServerManifests, bool]:
    """Download and return the resources that match `selectors`.

    Set `selectors.namespace` to `None` to download the resources from all
    Kubernetes namespaces.

    Either returns all the data or an error; never returns partial results.

    Inputs:
        config: Square configuration.
        k8sconfig: K8sConfig

    Returns:
        Dict[MetaManifest, dict]: the K8s manifests from K8s.

    """
    # Output.
    server_manifests = {}

    # Ensure `namespaces` is always a list to avoid special casing below.
    all_namespaces: Iterable[Optional[str]]
    if not config.selectors.namespaces:
        all_namespaces = [None]
    else:
        all_namespaces = config.selectors.namespaces

    # Download each resource type. Abort at the first error and return nothing.
    for namespace in all_namespaces:
        for kind in sorted(config.selectors.kinds):
            # Get the K8s URL for the current resource kind. Ignore this
            # resource if K8s does not know about it. The reason for that could
            # be a typo or that it is a Custom Resource that does not (yet) exist.
            resource, err = square.k8s.resource(k8sconfig, MetaManifest("", kind, namespace, ""))  # noqa
            if err:
                logit.warning(f"Skipping unknown resource <{kind}>")
                continue

            try:
                # Download the resource manifests for the current `kind` from K8s.
                manifest_list, err = square.k8s.get(k8sconfig.client, resource.url)
                assert not err and manifest_list is not None

                # Parse the K8s List (eg DeploymentList, NamespaceList, ...) into a
                # Dict[MetaManifest, dict] dictionary.
                manifests, err = unpack_list(manifest_list, config.selectors)
                assert not err and manifests is not None

                # Strip off the fields defined in `config.filters`.
                ret = {k: strip(k8sconfig, man, config.filters)
                       for k, man in manifests.items()}

                # Ensure `strip` worked for every manifest.
                err = any((v[2] for v in ret.values()))
                assert not err

                # Unpack the stripped manifests from the `strip` response. The
                # "if v[0] is not None" statement exists to satisfy MyPy - we
                # already know they are not None or otherwise the previous
                # assert would have failed.
                manifests = {k: v[0] for k, v in ret.items() if v[0] is not None}
            except AssertionError:
                # Return nothing, even if we had downloaded other kinds already.
                return ({}, True)
            else:
                # Copy the manifests into the output dictionary.
                server_manifests.update(manifests)
    return (server_manifests, False)
コード例 #18
0
def resource(k8sconfig: K8sConfig,
             meta: MetaManifest) -> Tuple[K8sResource, bool]:
    """Return `K8sResource` object.

    That object will contain the full path to a resource, eg.
    https://1.2.3.4/api/v1/namespace/foo/services.

    Inputs:
        k8sconfig: K8sConfig
        meta: MetaManifest

    Returns:
        K8sResource

    """
    err_resp = (K8sResource("", "", "", False, ""), True)

    # Compile the lookup key for the resource, eg `("Service", "v1")`.
    if not meta.apiVersion:
        # Use the most recent version of the API if None was specified.
        candidates = [(kind, ver) for kind, ver in k8sconfig.apis
                      if kind == meta.kind]
        if len(candidates) == 0:
            logit.warning(f"Cannot determine API version for <{meta.kind}>")
            return err_resp
        candidates.sort()
        key = candidates.pop(0)
    else:
        key = (meta.kind, meta.apiVersion)

    # Retrieve the resource.
    try:
        resource = k8sconfig.apis[key]
    except KeyError:
        logit.error(f"Unsupported resource <{meta.kind}> {key}.")
        return err_resp

    # Void the "namespace" key for non-namespaced resources.
    if not resource.namespaced:
        meta = meta._replace(namespace=None)

    # Namespaces are special because they lack the `namespaces/` path prefix.
    if meta.kind == "Namespace":
        # Return the correct URL, depending on whether we want all namespaces
        # or a particular one.
        url = f"{resource.url}/namespaces"
        if meta.name:
            url += f"/{meta.name}"
        return resource._replace(url=url), False

    # Determine if the prefix for namespaced resources.
    if meta.namespace is None:
        namespace = ""
    else:
        # Namespace name must conform to K8s standards.
        match = re.match(r"[a-z0-9]([-a-z0-9]*[a-z0-9])?", meta.namespace)
        if match is None or match.group() != meta.namespace:
            logit.error(f"Invalid namespace name <{meta.namespace}>.")
            return err_resp
        namespace = f"namespaces/{meta.namespace}"

    # Sanity check: we cannot search for a namespaced resource by name in all
    # namespaces. Example: we cannot search for a Service `foo` in all
    # namespaces. We could only search for Service `foo` in namespace `bar`, or
    # all services in all namespaces.
    if resource.namespaced and meta.name and not meta.namespace:
        logit.error(
            f"Cannot search for {meta.kind} {meta.name} in {meta.namespace}")
        return err_resp

    # Create the full path to the resource depending on whether we have a
    # namespace and resource name. Here are all three possibilities:
    #  - /api/v1/namespaces/services
    #  - /api/v1/namespaces/my-namespace/services
    #  - /api/v1/namespaces/my-namespace/services/my-service
    path = f"{namespace}/{resource.name}" if namespace else resource.name
    path = f"{path}/{meta.name}" if meta.name else path

    # The concatenation above may have introduced `//`. Here we remove them.
    path = path.replace("//", "/")

    # Return the K8sResource with the correct URL.
    resource = resource._replace(url=f"{resource.url}/{path}")
    return resource, False
コード例 #19
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_compile_plan_create_delete_ok(self, config, k8sconfig):
        """Test a plan that creates and deletes resource, but not patch any.

        To do this, the local and server resources are all distinct. As a
        result, the returned plan must dictate that all local resources shall
        be created, all server resources deleted, and none patched.

        """
        # Local: defines Namespace "ns1" with 1 deployment.
        meta = [
            MetaManifest('v1', 'Namespace', None, 'ns1'),
            MetaManifest('apps/v1', 'Deployment', 'ns1', 'res_0'),

            # Server: has a Namespace "ns2" with 2 deployments.
            MetaManifest('v1', 'Namespace', None, 'ns2'),
            MetaManifest('apps/v1', 'Deployment', 'ns2', 'res_1'),
            MetaManifest('apps/v1', 'Deployment', 'ns2', 'res_2'),
        ]

        # Determine the K8sResource for all involved resources. Also verify
        # that all resources specify a valid API group.
        res = [resource(k8sconfig, _._replace(name="")) for _ in meta]
        assert not any([_[1] for _ in res])
        res = [_[0] for _ in res]

        # Compile local and server manifests. Their resources have no overlap.
        # This will ensure that we have to create all the local resources,
        # delete all the server resources, and patch nothing.
        loc_man = {
            _: make_manifest(_.kind, _.namespace, _.name)
            for _ in meta[:2]
        }
        srv_man = {
            _: make_manifest(_.kind, _.namespace, _.name)
            for _ in meta[2:]
        }

        # The resources require a manifest to specify the terms of deletion.
        # This is currently hard coded into the function.
        del_opts = {
            "apiVersion": "v1",
            "kind": "DeleteOptions",
            "gracePeriodSeconds": 0,
            "orphanDependents": False,
        }

        # Resources declared in local files must be created and server resources deleted.
        expected = DeploymentPlan(
            create=[
                DeltaCreate(meta[0], res[0].url, loc_man[meta[0]]),
                DeltaCreate(meta[1], res[1].url, loc_man[meta[1]]),
            ],
            patch=[],
            delete=[
                DeltaDelete(meta[2], res[2].url + "/" + meta[2].name,
                            del_opts),
                DeltaDelete(meta[3], res[3].url + "/" + meta[3].name,
                            del_opts),
                DeltaDelete(meta[4], res[4].url + "/" + meta[4].name,
                            del_opts),
            ],
        )
        ret, err = sq.compile_plan(config, k8sconfig, loc_man, srv_man)
        assert ret.create == expected.create
        assert (ret, err) == (expected, False)
コード例 #20
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_sort_plan(self, config):
        # Dummy MetaManifests that we will use in our test plan.
        meta_ns0 = MetaManifest('v1', 'Namespace', None, 'ns0')
        meta_ns1 = MetaManifest('v1', 'Namespace', None, 'ns1')
        meta_svc0 = MetaManifest('v1', 'Service', "ns0", 'svc0')
        meta_svc1 = MetaManifest('v1', 'Service', "ns1", 'svc1')
        meta_dpl0 = MetaManifest('apps/v1', 'Deployment', 'ns0', 'deploy_0')
        meta_dpl1 = MetaManifest('apps/v1', 'Deployment', 'ns1', 'deploy_1')

        expected = DeploymentPlan(
            create=[
                DeltaCreate(meta_ns0, None, None),
                DeltaCreate(meta_ns1, None, None),
                DeltaCreate(meta_svc0, None, None),
                DeltaCreate(meta_svc1, None, None),
                DeltaCreate(meta_dpl0, None, None),
                DeltaCreate(meta_dpl1, None, None),
            ],
            patch=[
                DeltaCreate(meta_ns0, None, None),
                DeltaCreate(meta_ns1, None, None),
                DeltaCreate(meta_svc0, None, None),
                DeltaCreate(meta_svc1, None, None),
                DeltaCreate(meta_dpl0, None, None),
                DeltaCreate(meta_dpl1, None, None),
            ],
            delete=[
                DeltaCreate(meta_dpl1, None, None),
                DeltaCreate(meta_dpl0, None, None),
                DeltaCreate(meta_svc1, None, None),
                DeltaCreate(meta_svc0, None, None),
                DeltaCreate(meta_ns1, None, None),
                DeltaCreate(meta_ns0, None, None),
            ],
        )

        config.priorities = ["Namespace", "Service", "Deployment"]
        plan = copy.deepcopy(expected)
        for i in range(10):
            random.shuffle(plan.create)
            random.shuffle(plan.patch)
            random.shuffle(plan.delete)
            ret, err = sq.sort_plan(config, plan)
            assert not err
            assert ret.create == expected.create
            assert ret.delete == expected.delete
            assert ret.patch == plan.patch

        # Service must be last because it is not in the priority list.
        expected = DeploymentPlan(
            create=[
                DeltaCreate(meta_ns0, None, None),
                DeltaCreate(meta_ns1, None, None),
                DeltaCreate(meta_dpl0, None, None),
                DeltaCreate(meta_svc1, None, None),
            ],
            patch=[
                DeltaCreate(meta_ns0, None, None),
                DeltaCreate(meta_ns1, None, None),
                DeltaCreate(meta_svc0, None, None),
                DeltaCreate(meta_svc1, None, None),
                DeltaCreate(meta_dpl0, None, None),
                DeltaCreate(meta_dpl1, None, None),
            ],
            delete=[
                DeltaCreate(meta_svc0, None, None),
                DeltaCreate(meta_dpl1, None, None),
                DeltaCreate(meta_ns1, None, None),
                DeltaCreate(meta_ns0, None, None),
            ],
        )
        config.priorities = ["Namespace", "Deployment"]
        plan = copy.deepcopy(expected)
        for i in range(10):
            random.shuffle(plan.create)
            random.shuffle(plan.patch)
            random.shuffle(plan.delete)
            ret, err = sq.sort_plan(config, plan)
            assert not err
            assert ret.create == expected.create
            assert ret.delete == expected.delete
            assert ret.patch == plan.patch
コード例 #21
0
ファイル: test_square.py プロジェクト: mmingorance-dh/square
    def test_partition_manifests_patch_delete(self):
        """Create plan with resources to delete and patch.

        The local manifests are a strict subset of the cluster. The deployment
        plan must therefore not create any resources, delete everything absent
        from the local manifests and mark the rest for patching.

        """
        fun = sq.partition_manifests

        # The local manifests are a subset of the server's. Therefore, the plan
        # must contain patches for those resources that exist locally and on
        # the server. All the other manifest on the server are obsolete.
        local_man = {
            MetaManifest('v1', 'Deployment', 'ns2', 'bar1'): "0",
            MetaManifest('v1', 'Namespace', None, 'ns2'): "1",
        }
        cluster_man = {
            MetaManifest('v1', 'Deployment', 'ns1', 'foo'): "2",
            MetaManifest('v1', 'Deployment', 'ns2', 'bar1'): "3",
            MetaManifest('v1', 'Deployment', 'ns2', 'bar2'): "4",
            MetaManifest('v1', 'Namespace', None, 'ns1'): "5",
            MetaManifest('v1', 'Namespace', None, 'ns2'): "6",
            MetaManifest('v1', 'Namespace', None, 'ns3'): "7",
        }
        plan = DeploymentPlan(create=[],
                              patch=[
                                  MetaManifest('v1', 'Deployment', 'ns2',
                                               'bar1'),
                                  MetaManifest('v1', 'Namespace', None, 'ns2'),
                              ],
                              delete=[
                                  MetaManifest('v1', 'Deployment', 'ns1',
                                               'foo'),
                                  MetaManifest('v1', 'Deployment', 'ns2',
                                               'bar2'),
                                  MetaManifest('v1', 'Namespace', None, 'ns1'),
                                  MetaManifest('v1', 'Namespace', None, 'ns3'),
                              ])
        assert fun(local_man, cluster_man) == (plan, False)