import asyncio import os import sys import time from enum import Enum from dcr.scenario_utils.common_utils import execute_commands_concurrently_on_test_vms from dcr.scenario_utils.logging_utils import get_logger logger = get_logger("dcr.scripts.orchestrator.execute_ssh_on_vm") class SetupCommands: setup_vm = "setup_vm" fetch_results = "fetch_results" harvest = "harvest" async def run_tasks(command: str): ssh_cmd = f'ssh -o StrictHostKeyChecking=no {{username}}@{{ip}}' sources_dir = os.environ.get('BUILD_SOURCESDIRECTORY') artifact_dir = os.environ.get('BUILD_ARTIFACTSTAGINGDIRECTORY') if command == SetupCommands.setup_vm: dcr_root_dir = f"/home/{{username}}/dcr" pypy_path = os.environ.get("PYPYPATH") agent_version = os.environ.get("AGENTVERSION") setup_commands = [ f"scp -o StrictHostKeyChecking=no -r {sources_dir}/dcr/ {{username}}@{{ip}}:~/",
import glob import os import shutil import sys from junitparser import JUnitXml from dcr.scenario_utils.logging_utils import get_logger logger = get_logger("dcr.scripts.orchestrator.generate_test_files") def merge_xml_files(test_file_pattern): xml_data = JUnitXml() staging_dir = os.environ['BUILD_ARTIFACTSTAGINGDIRECTORY'] for test_file in glob.glob(test_file_pattern): xml_data += JUnitXml.fromfile(test_file) # Move file to harvest dir to save state and not publish the same test twice shutil.move( test_file, os.path.join(staging_dir, "harvest", os.path.basename(test_file))) if xml_data.tests > 0: # Merge all files into a single file for cleaner output output_file_name = f"test-results-{os.environ['SCENARIONAME']}-{os.environ['DISTRONAME']}.xml" xml_data.write(os.path.join(staging_dir, output_file_name)) else: logger.info(f"No test files found for pattern: {test_file_pattern}")
import json import os.path from dcr.scenario_utils.logging_utils import get_logger logger = get_logger("dcr.script.orchestrator.set_environment") add_variable_to_pipeline = '##vso[task.setvariable variable={name};]{value}' def _check_if_file_in_scenario_and_set_variable(file_name: str, name: str, true_value: str, false_val: str = None): """ We have certain scenarios in the tests where we determine what type of test to run based on the availability of the file. Check if file is present in the current scenario, and if so, set the variable name. Syntax for setting the variable : https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash#setvariable-initialize-or-modify-the-value-of-a-variable Eg: echo "##vso[task.setvariable variable=<VariableName>;]<Variable value>" """ file_path = os.path.join(scenario_path, file_name) if os.path.exists(file_path): logger.info(f"Found file: {file_path}, setting variable: {name}") print(add_variable_to_pipeline.format(name=name, value=true_value)) elif false_val is not None: print(add_variable_to_pipeline.format(name=name, value=false_val)) def _override_config(): """ This function reads the config.json file present in the scenario and makes all the variables available to the whole job as environment variables. It also overrides existing variables with the same name if available. Note: This function expects config.json to be a flat JSON """
import asyncio import math import os import secrets import subprocess import time from datetime import datetime from typing import List from dcr.scenario_utils.distro import get_distro from dcr.scenario_utils.logging_utils import get_logger from dcr.scenario_utils.models import get_vm_data_from_env logger = get_logger("dcr.scenario_utils.common_utils") def get_current_agent_name(distro_name=None): """ Only Ubuntu and Debian used walinuxagent, everyone else uses waagent. Note: If distro_name is not specified, we will search the distro in the VM itself :return: walinuxagent or waagent """ if distro_name is None: distro_name = get_distro()[0] walinuxagent_distros = ["ubuntu", "debian"] if any(dist.lower() in distro_name.lower() for dist in walinuxagent_distros): return "walinuxagent"