def run_agent(self, agent_yaml): with start_fake_backend() as backend: with self.run_tunnels(backend) as pod_ip: agent = Agent( agent_image_name=self.agent_image_name, cluster=self, namespace=self.test_namespace, fake_services=backend, fake_services_pod_ip=pod_ip, ) with agent.deploy(agent_yaml): try: yield agent finally: print("\nDatapoints received:") for dp in backend.datapoints or []: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events or []: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}") print("\nTrace spans received:") for span in backend.spans or []: print(span)
def test_chef_on_windows(chef_version): run_win_chef_setup(chef_version) with ensure_fake_backend() as backend: try: run_win_chef_client(backend, INITIAL_VERSION, STAGE) assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" # upgrade agent run_win_chef_client(backend, UPGRADE_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" # downgrade agent run_win_chef_client(backend, INITIAL_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" finally: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}")
def test_installer_on_all_distros(base_image, init_system, agent_version): if agent_version.endswith("-post") and (base_image, init_system) in RPM_DISTROS: agent_version = agent_version.replace("-post", "~post") elif agent_version.endswith("~post") and (base_image, init_system) in DEB_DISTROS: agent_version = agent_version.replace("~post", "-post") args = "MYTOKEN" if agent_version == "latest" else f"--package-version {agent_version}-1 MYTOKEN" args = args if STAGE == "release" else f"--{STAGE} {args}" if agent_version == "latest" or tuple( agent_version.split(".")) >= ("5", "1", "0"): user = "******" else: user = None with _run_tests(base_image, init_system, args, user=user) as [backend, cont]: if agent_version != "latest": installed_version = get_agent_version(cont) agent_version = agent_version.replace("~", "-") assert ( installed_version == agent_version ), f"Installed agent version is {installed_version} but should be {agent_version}" try: assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" finally: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}")
def test_win_installer(agent_version): uninstall_win_agent() with ensure_fake_backend() as backend: try: args = "-access_token MYTOKEN -stage final" if agent_version == "latest": agent_version = get_latest_win_agent_version(stage="final") else: args += f" -agent_version {agent_version}" installed_version = run_win_installer(backend, args) assert installed_version == agent_version, "installed agent version is '%s', expected '%s'" % ( installed_version, agent_version, ) assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "signalfx-metadata")), "Datapoints didn't come through" finally: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}")
def test_win_local_msi_installer(request): # Get msi path from command line flag to pytest msi_path = request.config.getoption("--test-msi-path") if not msi_path: raise ValueError( "You must specify the --test-msi-path flag to run msi tests") msi_path = os.path.abspath(msi_path) assert os.path.isfile(msi_path), f"{msi_path} not found!" uninstall_win_agent() with ensure_fake_backend() as backend: try: args = f"-access_token MYTOKEN -msi_path {msi_path}" run_win_installer(backend, args) assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" finally: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}") uninstall_win_agent(msi_path=msi_path)
def test_win_local_nupkg(request): assert has_choco(), "choco not installed" # Get nupkg path from command line flag to pytest nupkg_path = request.config.getoption("--test-nupkg-path") if not nupkg_path: raise ValueError( "You must specify the --test-nupkg-path flag to run choco tests") nupkg_path = os.path.abspath(nupkg_path) assert os.path.isfile(nupkg_path), f"{nupkg_path} not found!" nupkg_dir = os.path.dirname(nupkg_path) uninstall_win_agent() with ensure_fake_backend() as backend: try: params = f"/access_token:MYTOKEN /ingest_url:{backend.ingest_url} /api_url:{backend.api_url}" run_win_command( f"choco install signalfx-agent -y -s {nupkg_dir} --params=\"'{params}'\"" ) assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" finally: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}") run_win_command("choco uninstall -y signalfx-agent") uninstall_win_agent()
def run_as_subproc(self): self.write_config() with run_subprocess([AGENT_BIN, "-config", self.config_path] + (["-debug"] if self.debug else []), env=self.env, close_fds=False) as [get_output, pid]: self.pid = pid self.get_output = get_output try: yield finally: print("\nAgent output:") print_lines(self.get_output()) if self.debug: print("\nDatapoints received:") for dp in self.fake_services.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in self.fake_services.events: print_dp_or_event(event) print("\nTrace spans received:") for span in self.fake_services.spans: print(span) print(f"\nDimensions set: {self.fake_services.dims}") print("\nSplunk log entries received:") for entry in self.fake_services.splunk_entries: print(entry)
def test_helm(k8s_cluster, helm_version): helm_major_version = int(helm_version.split(".")[0]) with run_helm_image(k8s_cluster, helm_version) as cont: with k8s_cluster.create_resources( [APP_YAML_PATH]), tiller_rbac_resources( k8s_cluster, helm_major_version), fake_backend.start() as backend: if helm_major_version < 3: init_helm(k8s_cluster, cont, helm_major_version) with k8s_cluster.run_tunnels(backend) as proxy_pod_ip: with release_values_yaml(k8s_cluster, proxy_pod_ip, backend) as values_path: copy_file_into_container(values_path, cont, values_path) install_helm_chart(k8s_cluster, values_path, cont, helm_major_version) try: assert wait_for( p( has_datapoint, backend, dimensions={ "container_name": "prometheus", "application": "helm-test" }, ), timeout_seconds=60, ) assert wait_for(p( has_datapoint, backend, dimensions={"plugin": "signalfx-metadata"}), timeout_seconds=60) finally: for pod in get_pods_by_labels( "app=signalfx-agent", namespace=k8s_cluster.test_namespace): print("pod/%s:" % pod.metadata.name) status = exec_pod_command( pod.metadata.name, AGENT_STATUS_COMMAND, namespace=k8s_cluster.test_namespace) print("Agent Status:\n%s" % status) logs = get_pod_logs( pod.metadata.name, namespace=k8s_cluster.test_namespace) print("Agent Logs:\n%s" % logs) print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}")
def test_puppet_on_windows(puppet_version): run_win_command(f"powershell.exe '{WIN_UNINSTALL_SCRIPT_PATH}'") if os.environ.get("USERNAME") == "VssAdministrator": if puppet_version == "latest": run_win_command(f"choco upgrade -y -f puppet-agent") else: run_win_command( f"choco upgrade -y -f puppet-agent --version {puppet_version}") if WIN_PUPPET_BIN_DIR not in os.environ.get("PATH"): os.environ["PATH"] = WIN_PUPPET_BIN_DIR + ";" + os.environ.get( "PATH") if os.path.isdir(WIN_PUPPET_MODULE_DEST_DIR): shutil.rmtree(WIN_PUPPET_MODULE_DEST_DIR) shutil.copytree(WIN_PUPPET_MODULE_SRC_DIR, WIN_PUPPET_MODULE_DEST_DIR) run_win_command("puppet module install puppet-archive") run_win_command("puppet module install puppetlabs-powershell") with ensure_fake_backend() as backend: try: monitors = '{ type => "host-metadata" },' run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" run_win_puppet_agent(backend, monitors, UPGRADE_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" monitors = '{ type => "internal-metrics" },' run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_metric_name, backend, "sfxagent.datapoints_sent") ), "Didn't get internal metric datapoints" finally: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}")
def test_puppet_on_windows(puppet_version): run_win_puppet_setup(puppet_version) with ensure_fake_backend() as backend: try: monitors = [{"type": "host-metadata"}] run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata") ), "Datapoints didn't come through" if UPGRADE_VERSION: # upgrade agent run_win_puppet_agent(backend, monitors, UPGRADE_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata") ), "Datapoints didn't come through" # downgrade agent run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata") ), "Datapoints didn't come through" # change agent config monitors = [{"type": "internal-metrics"}] run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_metric_name, backend, "sfxagent.datapoints_sent") ), "Didn't get internal metric datapoints" # re-apply without any changes run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_metric_name, backend, "sfxagent.datapoints_sent") ), "Didn't get internal metric datapoints" finally: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}")
def run_agent(self, agent_image, config=None, observer=None, monitors=None, namespace="default"): """ Start the fake backend services and configure/create the k8s agent resources within the minikube container. Required Argument: agent_image: Object returned from the agent_image fixture containing the agent image's name, tag, and id. Optional Arguments: config: Configuration YAML for the agent (overwrites the configmap agent.yaml). If not None, takes precedence over `observer` and `monitors` arguments (default: None). observer: Name of the observer to set in the configmap agent.yaml (default: None). monitors: List of monitors to set in the configmap agent.yaml (default: []). namespace: Namespace for the agent (default: "default"). """ if not monitors: monitors = [] with start_fake_backend(ip_addr=get_host_ip()) as backend: options = dict( image_name=agent_image["name"], image_tag=agent_image["tag"], observer=observer, monitors=monitors, config=config, cluster_name=self.cluster_name, namespace=namespace, backend=backend, ) with self.agent.deploy(**options): try: yield self.agent, backend finally: if backend.datapoints: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) if backend.events: print("\nEvents received:") for event in backend.events: print_dp_or_event(event)
def run_k8s_with_agent(agent_image, minikube, monitors, observer=None, namespace="default", yamls=None, yamls_timeout=K8S_CREATE_TIMEOUT): """ Runs a minikube environment with the agent and a set of specified resources. Required Args: agent_image (dict): Dict object from the agent_image fixture minikube (Minikube): Minkube object from the minikube fixture monitors (str, dict, or list of dict): YAML-based definition of monitor(s) for the smart agent agent.yaml Optional Args: observer (str): Observer for the smart agent agent.yaml (if None, the agent.yaml will not be configured for an observer) namespace (str): K8S namespace for the smart agent and deployments yamls (list of str): Path(s) to K8S deployment yamls to create yamls_timeout (int): Timeout in seconds to wait for the K8S deployments to be ready """ if yamls is None: yamls = [] try: monitors = yaml.load(monitors) except AttributeError: pass if isinstance(monitors, dict): monitors = [monitors] assert isinstance( monitors, list), "unknown type/defintion for monitors:\n%s\n" % monitors with fake_backend.start(ip_addr=get_host_ip()) as backend: with minikube.deploy_k8s_yamls(yamls, namespace=namespace, timeout=yamls_timeout): with minikube.deploy_agent( AGENT_CONFIGMAP_PATH, AGENT_DAEMONSET_PATH, AGENT_SERVICEACCOUNT_PATH, AGENT_CLUSTERROLE_PATH, AGENT_CLUSTERROLEBINDING_PATH, observer=observer, monitors=monitors, cluster_name="minikube", backend=backend, image_name=agent_image["name"], image_tag=agent_image["tag"], namespace=namespace, ) as agent: try: yield [backend, agent] finally: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event)
def test_puppet_on_windows(puppet_version): run_win_puppet_setup(puppet_version) with ensure_fake_backend() as backend: try: monitors = [{"type": "host-metadata"}] run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" if UPGRADE_VERSION: # upgrade agent run_win_puppet_agent(backend, monitors, UPGRADE_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" # downgrade agent run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_dim, backend, "plugin", "host-metadata")), "Datapoints didn't come through" # change agent config monitors = [{"type": "internal-metrics"}] run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_metric_name, backend, "sfxagent.datapoints_sent") ), "Didn't get internal metric datapoints" # re-apply without any changes run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_metric_name, backend, "sfxagent.datapoints_sent") ), "Didn't get internal metric datapoints" # change agent config path if os.path.isfile(WIN_CONFIG_PATH): os.remove(WIN_CONFIG_PATH) with tempfile.TemporaryDirectory() as tmpdir: new_config_path = os.path.join(tmpdir, "agent.yaml") run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE, config_path=new_config_path) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_metric_name, backend, "sfxagent.datapoints_sent") ), "Didn't get internal metric datapoints" # change agent installation path with tempfile.TemporaryDirectory() as new_install_dir: new_install_dir = os.path.realpath(new_install_dir) try: run_win_puppet_agent(backend, monitors, INITIAL_VERSION, STAGE, install_dir=new_install_dir) backend.reset_datapoints() assert wait_for( p(has_datapoint_with_metric_name, backend, "sfxagent.datapoints_sent") ), "Didn't get internal metric datapoints" finally: agent_path = os.path.join(new_install_dir, "SignalFxAgent", "bin", "signalfx-agent.exe") if os.path.isfile(agent_path): run_win_command([ "powershell.exe", "-command", "Stop-Service -Name signalfx-agent" ]) run_win_command([agent_path, "-service", "uninstall"]) finally: print("\nDatapoints received:") for dp in backend.datapoints: print_dp_or_event(dp) print("\nEvents received:") for event in backend.events: print_dp_or_event(event) print(f"\nDimensions set: {backend.dims}")