コード例 #1
0
    def _copy_path_from_pod(self, local_dir: Path, pod_dir: Path):
        """
        copy content of a dir from pod to local dir

        :param local_dir: path to the local dir
        :param pod_dir: path within the pod
        """
        try:
            run_command([
                "oc",
                "rsync",
                "--delete",  # delete files in local_dir which are not in pod_dir
                "--quiet=true",  # avoid huge logs
                f"--namespace={self.k8s_namespace_name}",
                f"{self.pod_name}:{pod_dir}/",  # trailing / to copy only content of dir
                f"{local_dir}",
            ])
        except Exception as ex:
            # There is a race condition in k8s that it tells the pod is running even
            # though it already killed an exec session and hence we couldn't copy
            # anything from the pod
            if not self.is_pod_running(
            ) or "not available in container" in str(ex):
                logger.warning(
                    "The pod is not running while we tried to copy data out of it."
                )
                raise SandcastleException(
                    "Cannot copy data from the sandbox - the pod is not running."
                )
            raise
コード例 #2
0
ファイル: test_ironman.py プロジェクト: marusak/sandcastle
def test_md_e2e(tmp_path, git_url, branch):
    # running in k8s
    if "KUBERNETES_SERVICE_HOST" in os.environ:
        t = Path(SANDCASTLE_MOUNTPOINT, f"clone-{get_timestamp_now()}")
    else:
        t = tmp_path
    m_dir = MappedDir(t, SANDCASTLE_MOUNTPOINT, with_interim_pvc=True)

    run_command(["git", "clone", "-b", branch, git_url, t])

    o = Sandcastle(image_reference=SANDBOX_IMAGE,
                   k8s_namespace_name=NAMESPACE,
                   mapped_dir=m_dir)
    o.run()
    try:
        o.exec(command=["packit", "--debug", "srpm"])
        assert list(t.glob("*.src.rpm"))
        o.exec(command=["packit", "--help"])

        with pytest.raises(SandcastleCommandFailed) as ex:
            o.exec(command=["bash", "-c", "echo 'I quit!'; exit 120"])
        e = ex.value
        assert "I quit!" in e.output
        assert 120 == e.rc
        assert "command terminated with non-zero exit code" in e.reason
    finally:
        o.delete_pod()
コード例 #3
0
def build_now():
    """ build a container image with sandcastle """
    project_root = Path(__file__).parent.parent
    run_command([
        "docker",
        "build",
        "-t",
        TEST_IMAGE_NAME,
        "-f",
        "Dockerfile.tests",
        str(project_root),
    ])
コード例 #4
0
    def _copy_path_to_pod(self, local_path: Path, pod_dir: Path):
        """
        copy local_path (dir or file) inside pod

        :param local_path: path to a local file or a dir
        :param pod_dir: Directory within the pod where the content of local_path is extracted
        """
        with tempfile.TemporaryDirectory() as tmpdir:
            tmp_tarball_dir = Path(tmpdir)
            tmp_tarball_path = tmp_tarball_dir.joinpath("t.tar.gz")
            cmd = ["tar", "--preserve-permissions", "-czf", tmp_tarball_path]

            working_dir = local_path
            if local_path.is_file():
                items = [local_path]
                working_dir = local_path.parent
            else:
                # this has to be list, because mypy:
                #   Incompatible types in assignment (expression has type
                #     "Generator[Path, None, None]", variable has type "List[Path]")
                items = list(local_path.iterdir())
            # tar: lost+found: Cannot utime: Operation not permitted
            # list comprehension no likey mypy
            for x in items:
                if x.name == "lost+found":
                    continue
                cmd.append(str(x.relative_to(working_dir)))
            run_command(cmd, cwd=working_dir)
            remote_tmp_dir = Path(self._do_exec(["mktemp", "-d"]).strip())
            try:
                remote_tar_path = remote_tmp_dir.joinpath("t.tar.gz")
                # Copy /tmp/foo local file to /tmp/bar
                # in a remote pod in namespace <some-namespace>:
                #   oc cp /tmp/foo <some-namespace>/<some-pod>:/tmp/bar
                target = f"{self.k8s_namespace_name}/{self.pod_name}:{remote_tmp_dir}"
                # if you're interested: the way openshift does this is that
                # it creates a tarball locally
                # and streams it via exec into the container to a pod process
                run_command(["oc", "cp", tmp_tarball_path, target])
                unpack_cmd = [
                    "tar",
                    "--preserve-permissions",
                    "-xzf",
                    str(remote_tar_path),
                    "-C",
                    str(pod_dir),
                ]
                self._do_exec(unpack_cmd)
            finally:
                self._do_exec(["rm", "-rf", str(remote_tmp_dir)])
コード例 #5
0
    def _copy_path_from_pod(self, local_dir: Path, pod_dir: Path):
        """
        copy content of a dir from pod to local dir

        :param local_dir: path to the local dir
        :param pod_dir: path within the pod
        """
        run_command([
            "oc",
            "rsync",
            "--delete",  # delete files in local_dir which are not in pod_dir
            "--quiet=true",  # avoid huge logs
            f"--namespace={self.k8s_namespace_name}",
            f"{self.pod_name}:{pod_dir}/",  # trailing / to copy only content of dir
            f"{local_dir}",
        ])
コード例 #6
0
    def _copy_path_from_pod(self, local_dir: Path, pod_dir: Path):
        """
        copy content of a dir from pod locally

        :param local_dir: path to the local dir
        :param pod_dir: path within the pod
        """
        remote_tmp_dir = Path(self._do_exec(["mktemp", "-d"]).strip())
        try:
            remote_tar_path = remote_tmp_dir.joinpath("t.tar.gz")
            grc = (
                rf"cd {pod_dir} && ls -d -1 .* * | egrep -v '^\.$' | egrep -v '^\.\.$'"
            )
            tar_cmd = f"tar -czf {remote_tar_path} -C {pod_dir} $({grc})"
            pack_cmd = ["bash", "-c", tar_cmd]
            self._do_exec(pack_cmd)

            # Copy /tmp/foo from a remote pod to /tmp/bar locally
            #   oc cp <some-namespace>/<some-pod>:/tmp/foo /tmp/bar
            target = f"{self.k8s_namespace_name}/{self.pod_name}:{remote_tar_path}"
            fd, tmp_tarball_path = tempfile.mkstemp()
            try:
                os.close(fd)

                logger.info(f"copy {target} -> {tmp_tarball_path}")
                run_command(["oc", "cp", target, tmp_tarball_path])

                purge_dir_content(local_dir)

                unpack_cmd = [
                    "tar",
                    "--preserve-permissions",
                    "-xzf",
                    tmp_tarball_path,
                    "-C",
                    str(local_dir),
                ]
                run_command(unpack_cmd)
            finally:
                os.unlink(tmp_tarball_path)
        finally:
            self._do_exec(["rm", "-rf", str(remote_tmp_dir)])
コード例 #7
0
def test_md_e2e(tmpdir, git_url, branch):
    # running in k8s
    if "KUBERNETES_SERVICE_HOST" in os.environ:
        t = Path(SANDCASTLE_MOUNTPOINT).joinpath(f"clone-{get_timestamp_now()}")
    else:
        t = Path(tmpdir)
    m_dir = MappedDir(str(t), SANDCASTLE_MOUNTPOINT, with_interim_pvc=True)

    run_command(["git", "clone", "-b", branch, git_url, t])

    o = Sandcastle(
        image_reference=SANDBOX_IMAGE, k8s_namespace_name=NAMESPACE, mapped_dir=m_dir
    )
    o.run()
    try:
        o.exec(command=["packit", "--debug", "srpm"])
        assert list(t.glob("*.src.rpm"))
        o.exec(command=["packit", "--help"])
    finally:
        o.delete_pod()
コード例 #8
0
    def _copy_path_to_pod(self,
                          local_path: Path,
                          pod_dir: Path,
                          no_perms: bool = False):
        """
        copy local_path (dir or file) inside pod

        :param local_path: path to a local file or a dir
        :param pod_dir: Directory within the pod where the content of local_path is extracted
        :param no_perms: If true, do not transfer permissions

        https://www.openshift.com/blog/transferring-files-in-and-out-of-containers-in-openshift-part-1-manually-copying-files
        """
        if local_path.is_dir():
            exclude = "--exclude=lost+found"  # can't touch that
            include = "--include=[]"  # default
        elif local_path.is_file():
            exclude = "--exclude=*"  # everything
            include = f"--include={local_path.name}"  # only the file
            local_path = local_path.parent
        else:
            raise SandcastleException(
                f"{local_path} is neither a dir nor a file")

        cmd = [
            "oc",
            "rsync",
            exclude,
            include,
            "--quiet=true",  # avoid huge logs
            f"--namespace={self.k8s_namespace_name}",
            f"{local_path}/",  # ??? rsync doesn't work without the trailing /
            f"{self.pod_name}:{pod_dir}",
        ]
        if no_perms:
            cmd += ["--no-perms"]
        run_command(cmd)
コード例 #9
0
def test_md_new_namespace(tmpdir):
    t = Path(tmpdir)
    m_dir = MappedDir(str(t), SANDCASTLE_MOUNTPOINT, with_interim_pvc=True)

    d = t.joinpath("dir")
    d.mkdir()
    d.joinpath("file").write_text("asd")

    # running within openshift
    namespace = os.getenv("SANDCASTLE_TESTS_NAMESPACE")
    if not namespace:
        # running on a host - you can't create new projects from inside a pod
        namespace = f"sandcastle-tests-{get_timestamp_now()}"
        c = ["oc", "new-project", namespace]
        run_command(c)

    try:
        o = Sandcastle(
            image_reference=SANDBOX_IMAGE,
            k8s_namespace_name=namespace,
            mapped_dir=m_dir,
        )
        o.run()
        try:
            o.exec(command=["ls", "-lha", f"./dir/file"])
            assert d.joinpath("file").read_text() == "asd"
            cmd = [
                "bash",
                "-c",
                "curl -skL https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/metrics",
            ]
            out = o.exec(command=cmd)
            j = json.loads(out)
            # a small proof we are safe
            assert j["reason"] == "Forbidden"
        finally:
            o.delete_pod()
    finally:
        if not os.getenv("SANDCASTLE_TESTS_NAMESPACE"):
            run_command(["oc", "delete", "project", namespace])
            run_command(["oc", "project", NAMESPACE])
コード例 #10
0
ファイル: conftest.py プロジェクト: pombredanne/sandcastle
def run_test_within_pod(test_path: str,
                        with_pv_at: Optional[str] = None,
                        new_namespace: bool = False):
    """
    run selected test from within an openshift pod

    :param test_path: relative path to the test
    :param with_pv_at: path to PV within the pod
    :param new_namespace: create new namespace and pass it via env var
    """
    config.load_kube_config()
    configuration = client.Configuration()
    assert configuration.api_key
    api = client.CoreV1Api(client.ApiClient(configuration))

    pod_name = f"test-orchestrator-{get_timestamp_now()}"

    cont_cmd = [
        "bash",
        "-c",
        "ls -lha "
        f"&& pytest-3 -vv -l -p no:cacheprovider {test_path}",
    ]

    container: Dict[str, Any] = {
        "image": TEST_IMAGE_NAME,
        "name": pod_name,
        "tty": True,  # corols
        "command": cont_cmd,
        "imagePullPolicy": "Never",
        "env": [],
    }

    test_namespace = None
    if new_namespace:
        test_namespace = f"sandcastle-tests-{get_timestamp_now()}"
        c = ["oc", "new-project", test_namespace]
        run_command(c)
        c = [
            "oc",
            "adm",
            "-n",
            test_namespace,
            "policy",
            "add-role-to-user",
            "edit",
            f"system:serviceaccount:{NAMESPACE}:default",
        ]
        run_command(c)
        container["env"] += [{
            "name": "SANDCASTLE_TESTS_NAMESPACE",
            "value": test_namespace
        }]

    spec = {"containers": [container], "restartPolicy": "Never"}

    pod_manifest = {
        "apiVersion": "v1",
        "kind": "Pod",
        "metadata": {
            "name": pod_name
        },
        "spec": spec,
    }
    if with_pv_at:
        cleaned_test_name = clean_string(test_path)
        ts = get_timestamp_now()
        volume_name = f"{cleaned_test_name}-{ts}-vol"[-63:]
        claim_name = f"{cleaned_test_name}-{ts}-pvc"[-63:]
        container["env"] = [{"name": "SANDCASTLE_PVC", "value": claim_name}]
        pvc_dict = {
            "kind": "PersistentVolumeClaim",
            "spec": {
                "accessModes": ["ReadWriteMany"],
                "resources": {
                    "requests": {
                        "storage": "1Gi"
                    }
                },
            },
            "apiVersion": "v1",
            "metadata": {
                "name": claim_name
            },
        }
        api.create_namespaced_persistent_volume_claim(NAMESPACE, pvc_dict)
        container["volumeMounts"] = [{
            "mountPath": with_pv_at,
            "name": volume_name
        }]
        spec["volumes"] = [{
            "name": volume_name,
            "persistentVolumeClaim": {
                "claimName": claim_name
            }
        }]
    try:
        api.delete_namespaced_pod(pod_name, NAMESPACE, body=V1DeleteOptions())
    except ApiException as ex:
        if ex.status != 404:
            raise

    try:
        api.create_namespaced_pod(body=pod_manifest, namespace=NAMESPACE)
        counter = 15
        while True:
            if counter < 0:
                raise RuntimeError("Pod did not start on time.")
            info = api.read_namespaced_pod(pod_name, NAMESPACE)
            if info.status.phase == "Running":
                break
            time.sleep(2.0)
            counter -= 1
        print(
            api.read_namespaced_pod_log(name=pod_name,
                                        namespace=NAMESPACE,
                                        follow=True))
        counter = 15
        while True:
            if counter < 0:
                raise RuntimeError("Pod did not finish on time.")
            info = api.read_namespaced_pod(pod_name, NAMESPACE)
            if info.status.phase == "Succeeded":
                break
            if info.status.phase == "Failed":
                raise RuntimeError("Test failed")
            time.sleep(2.0)
            counter -= 1
    finally:
        print(
            api.read_namespaced_pod_log(name=pod_name,
                                        namespace=NAMESPACE,
                                        follow=True))
        api.delete_namespaced_pod(pod_name, NAMESPACE, body=V1DeleteOptions())
        if new_namespace:
            run_command(["oc", "delete", "project", test_namespace])
        if with_pv_at:
            api.delete_namespaced_persistent_volume_claim(
                name=claim_name, namespace=NAMESPACE, body=V1DeleteOptions())
コード例 #11
0
ファイル: conftest.py プロジェクト: marusak/sandcastle
def run_test_within_pod(test_path: str,
                        with_pv_at: Optional[str] = None,
                        new_namespace: bool = False):
    """
    run selected test from within an openshift pod

    :param test_path: relative path to the test
    :param with_pv_at: path to PV within the pod
    :param new_namespace: create new namespace and pass it via env var
    """
    # this will connect to the cluster you have active right now - see `oc status`
    current_context = check_output(["oc", "config",
                                    "current-context"]).strip().decode()
    api_client = new_client_from_config(context=current_context)
    api = client.CoreV1Api(api_client)

    # you need this for minishift; or do `eval $(minishift docker-env) && make build`
    # # FIXME: get this from env or cli (`minishift openshift registry`)
    # also it doesn't work for me right now, unable to reach minishift's docker registry
    # registry = "172.30.1.1:5000"
    # openshift_image_name = f"{registry}/myproject/{TEST_IMAGE_BASENAME}"
    # # {'authorization': 'Bearer pRW5rGmqgBREnCeVweLcHbEhXvluvG1cImWfIrxWJ2A'}
    # api_token = list(api_client.configuration.api_key.values())[0].split(" ")[1]
    # check_call(["docker", "login", "-u", "developer", "-p", api_token, registry])
    # check_call(["docker", "tag", TEST_IMAGE_NAME, openshift_image_name])
    # check_call(["docker", "push", openshift_image_name])

    pod_name = f"test-orchestrator-{get_timestamp_now()}"

    cont_cmd = [
        "bash",
        "-c",
        "~/setup_env_in_openshift.sh "
        "&& ls -lha && pwd && id "
        f"&& pytest-3 -vv -l -p no:cacheprovider {test_path}",
    ]

    container: Dict[str, Any] = {
        "image": TEST_IMAGE_NAME,  # make sure this is correct
        "name": pod_name,
        "tty": True,  # corols
        "command": cont_cmd,
        "imagePullPolicy": "Never",
        "env": [],
    }

    user = f"system:serviceaccount:{NAMESPACE}:default"
    enable_user_access_namespace(user, NAMESPACE)
    test_namespace = None
    if new_namespace:
        test_namespace = f"sandcastle-tests-{get_timestamp_now()}"
        c = ["oc", "new-project", test_namespace]
        run_command(c)
        enable_user_access_namespace(user, test_namespace)
        container["env"] += [{
            "name": "SANDCASTLE_TESTS_NAMESPACE",
            "value": test_namespace
        }]

    spec = {"containers": [container], "restartPolicy": "Never"}

    pod_manifest = {
        "apiVersion": "v1",
        "kind": "Pod",
        "metadata": {
            "name": pod_name
        },
        "spec": spec,
    }
    if with_pv_at:
        cleaned_test_name = clean_string(test_path)
        ts = get_timestamp_now()
        volume_name = f"{cleaned_test_name}-{ts}-vol"[-63:]
        claim_name = f"{cleaned_test_name}-{ts}-pvc"[-63:]
        container["env"] = [{"name": "SANDCASTLE_PVC", "value": claim_name}]
        pvc_dict = {
            "kind": "PersistentVolumeClaim",
            "spec": {
                "accessModes": ["ReadWriteMany"],
                "resources": {
                    "requests": {
                        "storage": "1Gi"
                    }
                },
            },
            "apiVersion": "v1",
            "metadata": {
                "name": claim_name
            },
        }
        api.create_namespaced_persistent_volume_claim(NAMESPACE, pvc_dict)
        container["volumeMounts"] = [{
            "mountPath": with_pv_at,
            "name": volume_name
        }]
        spec["volumes"] = [{
            "name": volume_name,
            "persistentVolumeClaim": {
                "claimName": claim_name
            }
        }]
    try:
        api.delete_namespaced_pod(pod_name, NAMESPACE, body=V1DeleteOptions())
    except ApiException as ex:
        if ex.status != 404:
            raise

    try:
        api.create_namespaced_pod(body=pod_manifest, namespace=NAMESPACE)
        counter = 15
        while True:
            if counter < 0:
                raise RuntimeError("Pod did not start on time.")
            info = api.read_namespaced_pod(pod_name, NAMESPACE)
            if info.status.phase == "Running":
                break
            elif info.status.phase == "Failed":
                print_pod_logs(api, pod_name, NAMESPACE)
                raise RuntimeError("The pod failed to start.")
            time.sleep(2.0)
            counter -= 1
        print_pod_logs(api, pod_name, NAMESPACE)
        counter = 15
        while True:
            if counter < 0:
                raise RuntimeError("Pod did not finish on time.")
            info = api.read_namespaced_pod(pod_name, NAMESPACE)
            if info.status.phase == "Succeeded":
                break
            if info.status.phase == "Failed":
                raise RuntimeError("Test failed")
            time.sleep(2.0)
            counter -= 1
    finally:
        print_pod_logs(api, pod_name, NAMESPACE)
        api.delete_namespaced_pod(pod_name, NAMESPACE, body=V1DeleteOptions())
        if new_namespace:
            run_command(["oc", "delete", "project", test_namespace])
        if with_pv_at:
            api.delete_namespaced_persistent_volume_claim(
                name=claim_name, namespace=NAMESPACE, body=V1DeleteOptions())
コード例 #12
0
ファイル: conftest.py プロジェクト: marusak/sandcastle
def enable_user_access_namespace(user: str, namespace: str):
    c = [
        "oc", "adm", "-n", namespace, "policy", "add-role-to-user", "edit",
        user
    ]
    run_command(c)