Exemplo n.º 1
0
    def run_agent(self, agent_yaml, wait_for_ready=True):
        with start_fake_backend() as backend:
            with self.run_tunnels(backend) as pod_ip:
                agent = Agent(
                    agent_image_name=self.agent_image_name,
                    cluster=self,
                    namespace=self.test_namespace,
                    fake_services=backend,
                    fake_services_pod_ip=pod_ip,
                )
                with agent.deploy(agent_yaml, wait_for_ready=wait_for_ready):
                    try:
                        yield agent
                    finally:
                        print("\nDatapoints received:")
                        for dp in backend.datapoints or []:
                            print_dp_or_event(dp)

                        print("\nEvents received:")
                        for event in backend.events or []:
                            print_dp_or_event(event)
                        print(f"\nDimensions set: {backend.dims}")
                        print("\nTrace spans received:")
                        for span in backend.spans or []:
                            print(span)
Exemplo n.º 2
0
 def __init__(self):
     self.agent = Agent()
     self.client = None
     self.cluster_name = None
     self.container = None
     self.container_ip = None
     self.container_name = None
     self.host_client = get_docker_client()
     self.image_tag = None
     self.k8s_version = None
     self.kubeconfig = None
     self.registry_port = 5000
     self.resources = []
     self.version = None
Exemplo n.º 3
0
 def __init__(self):
     self.bootstrapper = None
     self.container = None
     self.client = None
     self.version = None
     self.k8s_version = None
     self.name = None
     self.host_client = get_docker_client()
     self.yamls = []
     self.agent = Agent()
     self.cluster_name = "minikube"
     self.kubeconfig = None
     self.namespace = "default"
     self.worker_id = "master"
     self.registry_port = None
Exemplo n.º 4
0
 def deploy_agent(
     self,
     configmap_path,
     daemonset_path,
     serviceaccount_path,
     clusterrole_path,
     clusterrolebinding_path,
     observer=None,
     monitors=None,
     cluster_name="minikube",
     backend=None,
     image_name=None,
     image_tag=None,
     namespace="default",
 ):  # pylint: disable=too-many-arguments
     if monitors is None:
         monitors = []
     self.agent.deploy(
         self.client,
         configmap_path,
         daemonset_path,
         serviceaccount_path,
         clusterrole_path,
         clusterrolebinding_path,
         observer,
         monitors,
         cluster_name=cluster_name,
         backend=backend,
         image_name=image_name,
         image_tag=image_tag,
         namespace=namespace,
     )
     try:
         yield self.agent
         print("\nAgent status:\n%s\n" % self.agent.get_status())
         print("\nAgent container logs:\n%s\n" %
               self.agent.get_container_logs())
     except Exception:
         print("\n%s\n" % get_all_logs(self))
         raise
     finally:
         self.agent.delete()
         self.agent = Agent()
Exemplo n.º 5
0
class Minikube:  # pylint: disable=too-many-instance-attributes
    def __init__(self):
        self.bootstrapper = None
        self.container = None
        self.client = None
        self.version = None
        self.k8s_version = None
        self.name = None
        self.host_client = get_docker_client()
        self.yamls = []
        self.agent = Agent()
        self.cluster_name = "minikube"
        self.kubeconfig = None
        self.namespace = "default"
        self.worker_id = "master"
        self.registry_port = None

    def get_client(self):
        if self.container:
            self.container.reload()
            self.client = docker.DockerClient(base_url="tcp://%s:2375" %
                                              container_ip(self.container),
                                              version="auto")

        return self.client

    def load_kubeconfig(self, kubeconfig_path="/kubeconfig", timeout=300):
        with tempfile.NamedTemporaryFile(dir="/tmp/scratch") as fd:
            kubeconfig = fd.name
            assert wait_for(
                p(container_cmd_exit_0, self.container,
                  "test -f %s" % kubeconfig_path),
                timeout_seconds=timeout,
                interval_seconds=2,
            ), ("timed out waiting for the minikube cluster to be ready!\n\n%s\n\n"
                % self.get_logs())
            time.sleep(2)
            exit_code, output = self.container.exec_run(
                "cp -f %s %s" % (kubeconfig_path, kubeconfig))
            assert exit_code == 0, "failed to get %s from minikube!\n%s" % (
                kubeconfig_path, output.decode("utf-8"))
            self.kubeconfig = kubeconfig
            kube_config.load_kube_config(config_file=self.kubeconfig)

    def get_bootstrapper(self):
        code, _ = self.container.exec_run("which localkube")
        if code == 0:
            self.bootstrapper = "localkube"
        else:
            code, _ = self.container.exec_run("which kubeadm")
            if code == 0:
                self.bootstrapper = "kubeadm"
        return self.bootstrapper

    def connect(self, name, timeout, version=None):
        print("\nConnecting to %s container ..." % name)
        assert wait_for(
            p(container_is_running, self.host_client, name),
            timeout_seconds=timeout,
            interval_seconds=2), ("timed out waiting for container %s!" % name)
        self.container = self.host_client.containers.get(name)
        self.load_kubeconfig(timeout=timeout)
        self.client = self.get_client()
        self.get_bootstrapper()
        self.name = name
        self.k8s_version = version

    def deploy(self, version, timeout, options=None):
        if options is None:
            options = {}
        self.registry_port = get_free_port()
        if container_is_running(self.host_client, "minikube"):
            self.host_client.containers.get("minikube").remove(force=True,
                                                               v=True)
        self.k8s_version = version
        if self.k8s_version[0] != "v":
            self.k8s_version = "v" + self.k8s_version
        if not options:
            options = {
                "name": "minikube",
                "privileged": True,
                "environment": {
                    "K8S_VERSION": self.k8s_version,
                    "TIMEOUT": str(timeout)
                },
                "ports": {
                    "8080/tcp": None,
                    "8443/tcp": None,
                    "2375/tcp": None,
                    "%d/tcp" % self.registry_port: self.registry_port,
                },
                "volumes": {
                    "/tmp/scratch": {
                        "bind": "/tmp/scratch",
                        "mode": "rw"
                    }
                },
            }
        if MINIKUBE_VERSION:
            self.version = MINIKUBE_VERSION
        elif semver.match(self.k8s_version.lstrip("v"), ">=1.11.0"):
            self.version = MINIKUBE_KUBEADM_VERSION
        else:
            self.version = MINIKUBE_LOCALKUBE_VERSION
        if self.version == "latest" or semver.match(
                self.version.lstrip("v"),
                ">" + MINIKUBE_LOCALKUBE_VERSION.lstrip("v")):
            options["command"] = "/lib/systemd/systemd"
        else:
            options["command"] = "sleep inf"
        print("\nDeploying minikube %s cluster ..." % self.k8s_version)
        image, _ = self.host_client.images.build(
            path=os.path.join(TEST_SERVICES_DIR, "minikube"),
            buildargs={"MINIKUBE_VERSION": self.version},
            tag="minikube:%s" % self.version,
            rm=True,
            forcerm=True,
        )
        self.container = self.host_client.containers.run(image.id,
                                                         detach=True,
                                                         **options)
        self.name = self.container.name
        self.container.exec_run("start-minikube.sh", detach=True)
        self.load_kubeconfig(timeout=timeout)
        self.client = self.get_client()
        self.get_bootstrapper()

    def start_registry(self):
        if not self.client:
            self.client = self.get_client()
        print("\nStarting registry container localhost:%d in minikube ..." %
              self.registry_port)
        self.client.containers.run(
            image="registry:latest",
            name="registry",
            detach=True,
            environment={
                "REGISTRY_HTTP_ADDR": "0.0.0.0:%d" % self.registry_port
            },
            ports={"%d/tcp" % self.registry_port: self.registry_port},
        )

    def build_image(self, dockerfile_dir, build_opts=None):
        if build_opts is None:
            build_opts = {}
        if not self.client:
            self.get_client()
        print("\nBuilding docker image from %s ..." %
              os.path.realpath(dockerfile_dir))
        self.client.images.build(path=dockerfile_dir,
                                 rm=True,
                                 forcerm=True,
                                 **build_opts)

    @contextmanager
    def deploy_k8s_yamls(self, yamls=None, namespace=None, timeout=180):
        if yamls is None:
            yamls = []
        self.yamls = []
        for yaml_file in yamls:
            assert os.path.isfile(yaml_file), '"%s" not found!' % yaml_file
            docs = []
            with open(yaml_file, "r") as fd:
                docs = yaml.load_all(fd.read())

            for doc in docs:
                kind = doc["kind"]
                name = doc["metadata"]["name"]
                api_version = doc["apiVersion"]
                api_client = api_client_from_version(api_version)

                if not doc.get("metadata", {}).get("namespace"):
                    if "metadata" not in doc:
                        doc["metadata"] = {}
                    doc["metadata"]["namespace"] = namespace

                if has_resource(name, kind, api_client, namespace):
                    print('Deleting %s "%s" ...' % (kind, name))
                    delete_resource(name,
                                    kind,
                                    api_client,
                                    namespace=namespace)

                print("Creating %s from %s ..." % (kind, yaml_file))
                create_resource(doc,
                                api_client,
                                namespace=namespace,
                                timeout=timeout)
                self.yamls.append(doc)

        for doc in filter(lambda d: d["kind"] == "Deployment", self.yamls):
            print("Waiting for deployment %s to be ready ..." %
                  doc["metadata"]["name"])
            wait_for_deployment(doc, timeout)

        try:
            yield
        finally:
            for res in self.yamls:
                print('Deleting %s "%s" ...' % (kind, name))
                kind = res["kind"]
                api_version = res["apiVersion"]
                api_client = api_client_from_version(api_version)
                delete_resource(name, kind, api_client, namespace=namespace)
            self.yamls = []

    def pull_agent_image(self, name, tag, image_id=None):
        if image_id and has_docker_image(self.client, image_id):
            return self.client.images.get(image_id)

        if has_docker_image(self.client, name, tag):
            return self.client.images.get(name + ":" + tag)

        return self.client.images.pull(name, tag=tag)

    @contextmanager
    def deploy_agent(
        self,
        configmap_path,
        daemonset_path,
        serviceaccount_path,
        clusterrole_path,
        clusterrolebinding_path,
        observer=None,
        monitors=None,
        cluster_name="minikube",
        backend=None,
        image_name=None,
        image_tag=None,
        namespace="default",
    ):  # pylint: disable=too-many-arguments
        if monitors is None:
            monitors = []
        self.agent.deploy(
            self.client,
            configmap_path,
            daemonset_path,
            serviceaccount_path,
            clusterrole_path,
            clusterrolebinding_path,
            observer,
            monitors,
            cluster_name=cluster_name,
            backend=backend,
            image_name=image_name,
            image_tag=image_tag,
            namespace=namespace,
        )
        try:
            yield self.agent
            print("\nAgent status:\n%s\n" % self.agent.get_status())
            print("\nAgent container logs:\n%s\n" %
                  self.agent.get_container_logs())
        except Exception:
            print("\n%s\n" % get_all_logs(self))
            raise
        finally:
            self.agent.delete()
            self.agent = Agent()

    def get_container_logs(self):
        try:
            return self.container.logs().decode("utf-8").strip()
        except Exception as e:  # pylint: disable=broad-except
            return "Failed to get minikube container logs!\n%s" % str(e)

    def get_localkube_logs(self):
        try:
            exit_code, _ = self.container.exec_run(
                "test -f /var/lib/localkube/localkube.err")
            if exit_code == 0:
                _, output = self.container.exec_run(
                    "cat /var/lib/localkube/localkube.err")
                return output.decode("utf-8").strip()
        except Exception as e:  # pylint: disable=broad-except
            return "Failed to get localkube logs from minikube!\n%s" % str(e)
        return None

    def get_logs(self):
        if self.container and self.bootstrapper:
            _, start_minikube_output = self.container.exec_run(
                "cat /var/log/start-minikube.log")
            if self.bootstrapper == "localkube":
                return "/var/log/start-minikube.log:\n%s\n\n/var/lib/localkube/localkube.err:\n%s" % (
                    start_minikube_output.decode("utf-8").strip(),
                    self.get_localkube_logs(),
                )
            if self.bootstrapper == "kubeadm":
                _, minikube_logs = self.container.exec_run("minikube logs")
                return "/var/log/start-minikube.log:\n%s\n\nminikube logs:\n%s" % (
                    start_minikube_output.decode("utf-8").strip(),
                    minikube_logs.decode("utf-8").strip(),
                )
        return ""
Exemplo n.º 6
0
class Minikube:  # pylint: disable=too-many-instance-attributes
    def __init__(self):
        self.agent = Agent()
        self.client = None
        self.cluster_name = None
        self.container = None
        self.container_ip = None
        self.container_name = None
        self.host_client = get_docker_client()
        self.image_tag = None
        self.k8s_version = None
        self.kubeconfig = None
        self.registry_port = 5000
        self.resources = []
        self.version = None

    def get_version(self, k8s_version):
        if MINIKUBE_VERSION:
            self.version = MINIKUBE_VERSION
        elif k8s_version.lower() == "latest" or semver.match(
                k8s_version.lstrip("v"), ">=" + K8S_MIN_KUBEADM_VERSION):
            self.version = MINIKUBE_KUBEADM_VERSION
        else:
            self.version = MINIKUBE_LOCALKUBE_VERSION
        if self.version:
            self.version = "v" + self.version.lstrip("v")
            self.image_tag = MINIKUBE_IMAGE_NAME + ":" + self.version
        return self.version

    def is_running(self):
        if not self.container:
            filters = {"name": self.container_name, "status": "running"}
            if self.host_client and self.container_name and self.host_client.containers.list(
                    filters=filters):
                self.container = self.host_client.containers.get(
                    self.container_name)
        if self.container:
            self.container.reload()
            self.container_ip = container_ip(self.container)
            return self.container.status == "running" and self.container_ip
        return False

    def is_ready(self):
        def kubeconfig_exists():
            try:
                return container_cmd_exit_0(
                    self.container, "test -f %s" % MINIKUBE_KUBECONFIG_PATH)
            except requests.exceptions.RequestException as e:
                print("requests.exceptions.RequestException:\n%s" % str(e))
                return False

        return self.is_running() and tcp_socket_open(
            self.container_ip, K8S_API_PORT) and kubeconfig_exists()

    def exec_cmd(self, command):
        if self.container:
            print("Executing '%s' ..." % command)
            code, output = self.container.exec_run(command)
            output = output.decode("utf-8")
            assert code == 0, output
            print(output)
            return output
        return ""

    def exec_kubectl(self, command, namespace=None):
        command = "kubectl %s" % command
        if namespace:
            command += " -n %s" % namespace
        return self.exec_cmd(command)

    def get_cluster_version(self):
        version_yaml = self.exec_kubectl("version --output=yaml")
        assert version_yaml, "failed to get kubectl version"
        cluster_version = yaml.safe_load(version_yaml).get(
            "serverVersion").get("gitVersion")
        return check_k8s_version(cluster_version)

    def get_client(self):
        if not self.client:
            assert wait_for(
                self.is_running, timeout_seconds=30,
                interval_seconds=2), ("timed out waiting for %s container" %
                                      self.container_name)
            assert wait_for(
                p(tcp_socket_open, self.container_ip, 2375),
                timeout_seconds=30,
                interval_seconds=2), (
                    "timed out waiting for docker engine in %s container!" %
                    self.container_name)
            self.client = docker.DockerClient(base_url="tcp://%s:2375" %
                                              self.container_ip,
                                              version="auto")
        return self.client

    def get_logs(self):
        if self.is_running():
            return "/var/log/start-minikube.log:\n%s" % get_container_file_content(
                self.container, "/var/log/start-minikube.log")
        return "%s container is not running" % self.container_name

    def connect_to_cluster(self, timeout=300):
        print("Waiting for minikube cluster to be ready ...")
        start_time = time.time()
        assert wait_for(
            self.is_ready, timeout_seconds=timeout, interval_seconds=2), (
                "timed out waiting for minikube cluster to be ready!\n%s" %
                self.get_logs())
        print("Waited %d seconds" % (time.time() - start_time))
        time.sleep(2)
        if self.k8s_version:
            cluster_version = self.get_cluster_version()
            assert self.k8s_version.lstrip("v") == cluster_version.lstrip(
                "v"
            ), ("desired K8S version (%s) does not match actual cluster version (%s):\n%s"
                % (self.k8s_version, cluster_version, self.get_logs()))
        else:
            self.k8s_version = self.get_cluster_version()
        content = get_container_file_content(self.container,
                                             MINIKUBE_KUBECONFIG_PATH)
        self.kubeconfig = yaml.safe_load(content)
        current_context = self.kubeconfig.get("current-context")
        for context in self.kubeconfig.get("contexts"):
            if context.get("name") == current_context:
                self.cluster_name = context.get("context").get("cluster")
                break
        assert self.cluster_name, "cluster not found in %s:\n%s" % (
            MINIKUBE_KUBECONFIG_PATH, content)
        with tempfile.NamedTemporaryFile(mode="w") as fd:
            fd.write(content)
            fd.flush()
            kube_config.load_kube_config(config_file=fd.name)
        self.get_client()

    def connect(self,
                name=MINIKUBE_CONTAINER_NAME,
                k8s_version=None,
                timeout=300):
        self.container_name = name
        if k8s_version:
            assert self.get_version(
                k8s_version), "failed to get minikube version"
        if self.image_tag:
            start_time = time.time()
            print("Waiting for %s image to be built ..." % self.image_tag)
            assert wait_for(
                p(has_docker_image, self.host_client, self.image_tag),
                timeout_seconds=MINIKUBE_IMAGE_TIMEOUT,
                interval_seconds=2,
            ), ("timed out waiting for %s image to be built" % self.image_tag)
            print("Waited %d seconds" % (time.time() - start_time))
        print("\nConnecting to cluster in %s container ..." %
              self.container_name)
        self.connect_to_cluster(timeout)

    def deploy(self, k8s_version, timeout=300, options=None):
        self.k8s_version = check_k8s_version(k8s_version)
        assert self.get_version(k8s_version), "failed to get minikube version"
        if options is None:
            options = {}
        options.setdefault("name", MINIKUBE_CONTAINER_NAME)
        try:
            self.host_client.containers.get(options["name"]).remove(force=True,
                                                                    v=True)
        except docker.errors.NotFound:
            pass
        options.setdefault("privileged", True)
        options.setdefault(
            "environment",
            {
                "K8S_VERSION": self.k8s_version,
                "TIMEOUT": str(timeout),
                "KUBECONFIG_PATH": MINIKUBE_KUBECONFIG_PATH
            },
        )
        if tcp_socket_open("127.0.0.1", self.registry_port):
            self.registry_port = get_free_port()
        options.setdefault("ports",
                           {"%d/tcp" % self.registry_port: self.registry_port})
        options.setdefault("detach", True)
        print("\nBuilding %s image ..." % self.image_tag)
        build_opts = dict(buildargs={"MINIKUBE_VERSION": self.version},
                          tag=self.image_tag,
                          dockerfile=MINIKUBE_DOCKERFILE_PATH)
        image_id = self.build_image(REPO_ROOT_DIR, build_opts,
                                    "unix://var/run/docker.sock")
        print("\nDeploying minikube %s cluster ..." % self.k8s_version)
        self.container = self.host_client.containers.run(image_id, **options)
        self.container_name = self.container.name
        assert wait_for(
            self.is_running, timeout_seconds=30,
            interval_seconds=2), ("timed out waiting for %s container" %
                                  self.container_name)
        self.container.exec_run("start-minikube.sh", detach=True)
        self.connect_to_cluster(timeout)
        self.start_registry()

    def start_registry(self):
        self.get_client()
        print("\nStarting registry container localhost:%d in minikube ..." %
              self.registry_port)
        retry(
            p(
                self.client.containers.run,
                image="registry:2.7",
                name="registry",
                detach=True,
                environment={
                    "REGISTRY_HTTP_ADDR": "0.0.0.0:%d" % self.registry_port
                },
                ports={"%d/tcp" % self.registry_port: self.registry_port},
            ),
            docker.errors.DockerException,
        )
        assert wait_for(
            p(tcp_socket_open, self.container_ip, self.registry_port),
            timeout_seconds=30,
            interval_seconds=2), "timed out waiting for registry to start!"

    def pull_agent_image(self, name, tag, image_id=None):
        if image_id and has_docker_image(self.client, image_id):
            return self.client.images.get(image_id)

        if has_docker_image(self.client, name, tag):
            return self.client.images.get(name + ":" + tag)

        return self.client.images.pull(name, tag=tag)

    def build_image(self, dockerfile_dir, build_opts=None, docker_url=None):
        """
        Use low-level api client to build images in order to get build logs.
        Returns the image id.
        """
        def _build():
            client = docker.APIClient(base_url=docker_url, version="auto")
            build_log = []
            has_error = False
            image_id = None
            for line in client.build(path=dockerfile_dir,
                                     rm=True,
                                     forcerm=True,
                                     **build_opts):
                json_line = json.loads(line)
                keys = json_line.keys()
                if "stream" in keys:
                    build_log.append(json_line.get("stream").strip())
                else:
                    build_log.append(str(json_line))
                    if "error" in keys:
                        has_error = True
                    elif "aux" in keys:
                        image_id = json_line.get("aux").get("ID")
            assert not has_error, "build failed for %s:\n%s" % (
                dockerfile_dir, "\n".join(build_log))
            assert image_id, "failed to get id from output for built image:\n%s" % "\n".join(
                build_log)
            return image_id

        if os.path.isdir(os.path.join(TEST_SERVICES_DIR, dockerfile_dir)):
            dockerfile_dir = os.path.join(TEST_SERVICES_DIR, dockerfile_dir)
        else:
            assert os.path.isdir(
                dockerfile_dir
            ), "Dockerfile directory %s not found!" % dockerfile_dir
        if build_opts is None:
            build_opts = {}
        if not docker_url:
            docker_url = "tcp://%s:2375" % self.container_ip
        print("\nBuilding image from %s ..." % dockerfile_dir)
        return retry(_build, AssertionError)

    def delete_resources(self):
        for doc in self.resources:
            kind = doc["kind"]
            name = doc["metadata"]["name"]
            namespace = doc["metadata"]["namespace"]
            api_client = k8s.api_client_from_version(doc["apiVersion"])
            if k8s.has_resource(name, kind, api_client, namespace):
                print('Deleting %s "%s" ...' % (kind, name))
                k8s.delete_resource(name,
                                    kind,
                                    api_client,
                                    namespace=namespace)

    @contextmanager
    def create_resources(self,
                         yamls=None,
                         namespace="default",
                         timeout=k8s.K8S_CREATE_TIMEOUT):
        def wait_for_deployments():
            for doc in filter(lambda d: d["kind"] == "Deployment",
                              self.resources):
                name = doc["metadata"]["name"]
                nspace = doc["metadata"]["namespace"]
                print("Waiting for deployment %s to be ready ..." % name)
                try:
                    start_time = time.time()
                    assert wait_for(
                        p(k8s.deployment_is_ready, name, nspace),
                        timeout_seconds=timeout,
                        interval_seconds=2
                    ), 'timed out waiting for deployment "%s" to be ready!\n%s' % (
                        name, k8s.get_pod_logs(name, nspace))
                    print("Waited %d seconds" % (time.time() - start_time))
                finally:
                    print(
                        self.exec_kubectl("describe deployment %s" % name,
                                          namespace=nspace))
                    for pod in k8s.get_all_pods(nspace):
                        print(
                            self.exec_kubectl("describe pod %s" %
                                              pod.metadata.name,
                                              namespace=nspace))

        if yamls is None:
            yamls = []
        for yaml_file in yamls:
            assert os.path.isfile(yaml_file), '"%s" not found!' % yaml_file
            with open(yaml_file, "r") as fd:
                for doc in yaml.safe_load_all(fd.read()):
                    kind = doc["kind"]
                    name = doc["metadata"]["name"]
                    nspace = doc["metadata"].setdefault("namespace", namespace)
                    api_client = k8s.api_client_from_version(doc["apiVersion"])
                    if k8s.has_resource(name,
                                        kind,
                                        api_client,
                                        namespace=nspace):
                        print('Deleting %s "%s" ...' % (kind, name))
                        k8s.delete_resource(name,
                                            kind,
                                            api_client,
                                            namespace=nspace)
                    print("Creating %s from %s ..." % (kind, yaml_file))
                    k8s.create_resource(doc,
                                        api_client,
                                        namespace=nspace,
                                        timeout=timeout)
                    self.resources.append(doc)

        wait_for_deployments()

        try:
            yield
        finally:
            self.delete_resources()
            self.resources = []

    @contextmanager
    def run_agent(self,
                  agent_image,
                  config=None,
                  observer=None,
                  monitors=None,
                  namespace="default"):
        """
        Start the fake backend services and configure/create the k8s agent resources within the minikube container.

        Required Argument:
        agent_image:    Object returned from the agent_image fixture containing the agent image's name, tag, and id.

        Optional Arguments:
        config:         Configuration YAML for the agent (overwrites the configmap agent.yaml).
                        If not None, takes precedence over `observer` and `monitors` arguments (default: None).
        observer:       Name of the observer to set in the configmap agent.yaml (default: None).
        monitors:       List of monitors to set in the configmap agent.yaml (default: []).
        namespace:      Namespace for the agent (default: "default").
        """

        if not monitors:
            monitors = []
        with start_fake_backend(ip_addr=get_host_ip()) as backend:
            options = dict(
                image_name=agent_image["name"],
                image_tag=agent_image["tag"],
                observer=observer,
                monitors=monitors,
                config=config,
                cluster_name=self.cluster_name,
                namespace=namespace,
                backend=backend,
            )
            with self.agent.deploy(**options):
                try:
                    yield self.agent, backend
                finally:
                    if backend.datapoints:
                        print("\nDatapoints received:")
                        for dp in backend.datapoints:
                            print_dp_or_event(dp)
                    if backend.events:
                        print("\nEvents received:")
                        for event in backend.events:
                            print_dp_or_event(event)