def create_agent_daemonset(self, daemonset_path): daemonset_yaml = yaml.load(open(daemonset_path).read()) self.daemonset_name = daemonset_yaml["metadata"]["name"] daemonset_labels = daemonset_yaml["spec"]["selector"]["matchLabels"] self.delete_agent_daemonset() daemonset_yaml["spec"]["template"]["spec"]["containers"][0][ "resources"] = { "requests": { "cpu": "100m" } } if self.image_name and self.image_tag: print('Creating daemonset "%s" for %s:%s from %s ...' % (self.daemonset_name, self.image_name, self.image_tag, daemonset_path)) daemonset_yaml["spec"]["template"]["spec"]["containers"][0][ "image"] = (self.image_name + ":" + self.image_tag) else: print('Creating daemonset "%s" from %s ...' % (self.daemonset_name, daemonset_path)) k8s.create_daemonset(body=daemonset_yaml, namespace=self.namespace) assert ensure_always( lambda: k8s.daemonset_is_ready(self.daemonset_name, namespace=self.namespace), 5) labels = ",".join( ["%s=%s" % keyval for keyval in daemonset_labels.items()]) self.pods = k8s.get_pods_by_labels(labels, namespace=self.namespace) assert self.pods, "no agent pods found" assert all([ k8s.pod_is_ready(pod.metadata.name, namespace=self.namespace) for pod in self.pods ])
def test_helm(k8s_cluster): with k8s_cluster.create_resources([ NGINX_YAML_PATH ]), tiller_rbac_resources(k8s_cluster), fake_backend.start() as backend: init_helm(k8s_cluster) with k8s_cluster.run_tunnels(backend) as proxy_pod_ip: with release_values_yaml(k8s_cluster, proxy_pod_ip, backend) as values_path: install_helm_chart(k8s_cluster, values_path) try: assert wait_for( p(has_datapoint, backend, dimensions={"plugin": "nginx"})) assert wait_for( p(has_datapoint, backend, dimensions={"plugin": "signalfx-metadata"})) finally: for pod in get_pods_by_labels( "app=signalfx-agent", namespace=k8s_cluster.test_namespace): print("pod/%s:" % pod.metadata.name) status = exec_pod_command( pod.metadata.name, AGENT_STATUS_COMMAND, namespace=k8s_cluster.test_namespace) print("Agent Status:\n%s" % status) logs = get_pod_logs( pod.metadata.name, namespace=k8s_cluster.test_namespace) print("Agent Logs:\n%s" % logs)
def test_helm(k8s_cluster, helm_version): helm_major_version = int(helm_version.split(".")[0]) with run_helm_image(k8s_cluster, helm_version) as cont: with k8s_cluster.create_resources( [APP_YAML_PATH]), tiller_rbac_resources( k8s_cluster, helm_major_version), fake_backend.start() as backend: if helm_major_version < 3: init_helm(k8s_cluster, cont, helm_major_version) with k8s_cluster.run_tunnels(backend) as proxy_pod_ip: with release_values_yaml(k8s_cluster, proxy_pod_ip, backend) as values_path: copy_file_into_container(values_path, cont, values_path) install_helm_chart(k8s_cluster, values_path, cont, helm_major_version) try: assert wait_for( p( has_datapoint, backend, dimensions={ "container_name": "prometheus", "application": "helm-test" }, ), timeout_seconds=60, ) assert wait_for(p( has_datapoint, backend, dimensions={"plugin": "signalfx-metadata"}), timeout_seconds=60) finally: for pod in get_pods_by_labels( "app=signalfx-agent", namespace=k8s_cluster.test_namespace): print("pod/%s:" % pod.metadata.name) status = exec_pod_command( pod.metadata.name, AGENT_STATUS_COMMAND, namespace=k8s_cluster.test_namespace) print("Agent Status:\n%s" % status) logs = get_pod_logs( pod.metadata.name, namespace=k8s_cluster.test_namespace) print("Agent Logs:\n%s" % logs) print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}")
def get_agent_pods(self): return utils.get_pods_by_labels("app=signalfx-agent", namespace=self.namespace)