def _prepare_perf_execution_environment(settings, perf_runner, osgroup): correlation_dir = fix_path(settings.correlation_payload_dir) test_location = os.path.join(fix_path(settings.workitem_working_dir), 'execution') core_root = os.path.join(settings.workitem_working_dir, 'core_root') os.environ['CORE_ROOT'] = core_root xunit_perf_drop = os.path.join(correlation_dir, perf_runner) if not os.path.exists(xunit_perf_drop): raise Exception("Failed to find perf runner {} in directory {}.".format(perf_runner, correlation_dir)) # get the first subdir in the root and append it to xunit_perf_drop build_subdir = os.listdir(xunit_perf_drop) if len(build_subdir) > 1: log.info('Multiple directories found in '+xunit_perf_drop+' picking '+build_subdir[0]) xunit_perf_drop = os.path.join(xunit_perf_drop, build_subdir[0]) if perf_runner == 'Microsoft.DotNet.xunit.performance.runner.Windows': xunit_perf_drop = os.path.join(xunit_perf_drop, "tools") else: xunit_perf_drop = os.path.join(xunit_perf_drop, "lib", "netstandard1.3") log.info("Copying xunit perf drop from {} to {}.".format(xunit_perf_drop, test_location)) if osgroup.lower().startswith('windows'): _prepare_windows_env_for_perf(xunit_perf_drop, test_location) elif osgroup.lower().startswith('linux'): _prepare_linux_env_for_perf(correlation_dir, xunit_perf_drop, test_location, core_root) else: log.error('Invalid osgroup '+osgroup) # copy the architecture specific subdirectories archSubdirs = os.listdir(xunit_perf_drop) for archSubdir in archSubdirs: if os.path.isdir(os.path.join(xunit_perf_drop, archSubdir)): shutil.copytree(os.path.join(xunit_perf_drop, archSubdir), os.path.join(test_location, archSubdir))
def _prepare_execution_environment(settings, framework_in_tpa, assembly_list_name): workitem_dir = fix_path(settings.workitem_working_dir) correlation_dir = fix_path(settings.correlation_payload_dir) xunit_drop = os.path.join(correlation_dir, 'xunit') corerun_drop = os.path.join(correlation_dir, 'corerun') build_drop = os.path.join(correlation_dir) test_drop = os.path.join(workitem_dir) assembly_list = os.path.join(test_drop, assembly_list_name) test_location = os.path.join(workitem_dir, 'execution') core_root = os.path.join(workitem_dir, 'core_root') ensure_directory_exists(test_location) ensure_directory_exists(core_root) log.info("Copying only test files from {} to {}".format( test_drop, test_location)) copy_tree_to(test_drop, test_location) framework_target = core_root if framework_in_tpa else test_location log.info("Copying product binaries from {} to {}".format( build_drop, framework_target)) _copy_package_files(assembly_list, build_drop, framework_target, core_root, test_location)
def _prepare_perf_execution_environment(settings, perf_runner): correlation_dir = fix_path(settings.correlation_payload_dir) test_location = os.path.join(fix_path(settings.workitem_working_dir), 'execution') core_root = os.path.join(settings.workitem_working_dir, 'core_root') os.environ['CORE_ROOT'] = core_root xunit_perf_drop = os.path.join(correlation_dir, perf_runner) if not os.path.exists(xunit_perf_drop): raise Exception("Failed to find perf runner {} in directory {}.".format(perf_runner, correlation_dir)) # get the first subdir in the root and append it to xunit_perf_drop buildSubdir = os.listdir(xunit_perf_drop) xunit_perf_drop = os.path.join(xunit_perf_drop, buildSubdir[0]) xunit_perf_drop = os.path.join(xunit_perf_drop, "tools") log.info("Copying xunit perf drop from {} to {}.".format(xunit_perf_drop, test_location)) shutil.copy2(os.path.join(xunit_perf_drop, "xunit.performance.run.exe"), test_location) shutil.copy2(os.path.join(xunit_perf_drop, "xunit.performance.metrics.dll"), test_location) shutil.copy2(os.path.join(xunit_perf_drop, "xunit.performance.logger.exe"), test_location) shutil.copy2(os.path.join(xunit_perf_drop, "xunit.runner.utility.desktop.dll"), test_location) shutil.copy2(os.path.join(xunit_perf_drop, "ProcDomain.dll"), test_location) shutil.copy2(os.path.join(xunit_perf_drop, "Microsoft.Diagnostics.Tracing.TraceEvent.dll"), test_location) # copy the architecture specific subdirectories archSubdirs = os.listdir(xunit_perf_drop) for archSubdir in archSubdirs: if os.path.isdir(os.path.join(xunit_perf_drop, archSubdir)): shutil.copytree(os.path.join(xunit_perf_drop, archSubdir), os.path.join(test_location, archSubdir))
def post_process_perf_results(): settings = settings_from_env() perf_settings_json = "" perf_settings_json_file = os.path.join( fix_path(settings.correlation_payload_dir), "RunnerScripts", "xunitrunner-perf", "xunitrunner-perf.json" ) with open(perf_settings_json_file) as perf_settings_json: # read the perf-specific settings perf_settings_json = json.loads(perf_settings_json.read()) json_file = os.path.join( settings.workitem_working_dir, perf_settings_json["TestProduct"] + "-" + settings.workitem_id + ".json" ) json_file = json_file.encode("ascii", "ignore") csv_file = os.path.join(settings.workitem_working_dir, "execution", "testResults.csv") json_cmd = ( sys.executable + " " + os.path.join( fix_path(settings.correlation_payload_dir), "RunnerScripts", "xunitrunner-perf", "csvjsonconvertor.py" ) + " --jobName " + settings.correlation_id + " --csvFile " + csv_file + " --jsonFile " + json_file + " --perfSettingsJson " + perf_settings_json_file ) try: return_code = helix.proc.run_and_log_output( json_cmd.split(" "), cwd=os.path.join(fix_path(settings.workitem_working_dir), "execution"), env=None ) except: log.error("Exception when running the installation scripts: " + traceback.format_exc()) log.info("Uploading {}".format(csv_file)) result_url = _upload_file(csv_file, settings) log.info("Location {}".format(result_url)) # Upload json with rest of the results log.info("Uploaded {} to results container".format(json_file)) result_url = _upload_file(json_file, settings) log.info("Location {}".format(result_url)) # create deep copy and set perf container keys perf_settings = copy.deepcopy(settings) perf_settings.output_uri = perf_settings_json["RootURI"] perf_settings.output_write_token = perf_settings_json["WriteToken"] perf_settings.output_read_token = perf_settings_json["ReadToken"] log.info("Uploaded {} to perf container".format(json_file)) # Upload json to the perf specific container result_url = _upload_file(json_file, perf_settings) log.info("Location {}".format(result_url))
def post_process_perf_results(opts): csvs_dir = opts['--csvDir'] if not os.path.exists(csvs_dir): log.error('Not able to find csvs directory {}'.format(csvs_dir)) return -1 settings = settings_from_env() perf_settings_json = '' perf_settings_json_file = os.path.join( fix_path(settings.correlation_payload_dir), 'RunnerScripts', 'xunitrunner-perf', 'xunitrunner-perf.json') with open(perf_settings_json_file) as perf_settings_json: # read the perf-specific settings perf_settings_json = json.loads(perf_settings_json.read()) json_file = os.path.join( settings.workitem_working_dir, perf_settings_json['TestProduct'] + '-' + settings.workitem_id + '.json') json_file = json_file.encode('ascii', 'ignore') json_cmd = sys.executable+' '+os.path.join(fix_path(settings.correlation_payload_dir), 'RunnerScripts', 'xunitrunner-perf', 'csvjsonconvertor.py')+\ ' --jobName '+settings.correlation_id+' --csvDir '+csvs_dir+' --jsonFile '+json_file+' --perfSettingsJson '+perf_settings_json_file try: return_code = helix.proc.run_and_log_output( json_cmd.split(' '), cwd=os.path.join(fix_path(settings.workitem_working_dir), 'execution'), env=None) except: log.error("Exception when running the installation scripts: " + traceback.format_exc()) for item in os.listdir(csvs_dir): if item.endswith('.csv'): csv_file = os.path.join(csvs_dir, item) log.info('Uploading {}'.format(csv_file)) _upload_file(csv_file, settings) result_url = _upload_file(csv_file, settings) log.info('Location {}'.format(result_url)) # Upload json with rest of the results log.info('Uploaded {} to results container'.format(json_file)) result_url = _upload_file(json_file, settings) log.info('Location {}'.format(result_url)) # create deep copy and set perf container keys perf_settings = copy.deepcopy(settings) perf_settings.output_uri = perf_settings_json['RootURI'] perf_settings.output_write_token = perf_settings_json['WriteToken'] perf_settings.output_read_token = perf_settings_json['ReadToken'] log.info('Uploaded {} to perf container'.format(json_file)) # Upload json to the perf specific container result_url = _upload_file(json_file, perf_settings) log.info('Location {}'.format(result_url))
def post_process_perf_results(opts): csvs_dir = opts['--csvDir'] if not os.path.exists(csvs_dir): log.error('Not able to find csvs directory {}'.format(csvs_dir)) return -1 settings = settings_from_env() perf_settings_json = '' perf_settings_json_file = os.path.join(fix_path(settings.correlation_payload_dir), 'RunnerScripts', 'xunitrunner-perf', 'xunitrunner-perf.json') with open(perf_settings_json_file) as perf_settings_json: # read the perf-specific settings perf_settings_json = json.loads(perf_settings_json.read()) json_file = os.path.join(settings.workitem_working_dir, perf_settings_json['TestProduct']+'-'+settings.workitem_id+'.json') json_file = json_file.encode('ascii', 'ignore') json_cmd = sys.executable+' '+os.path.join(fix_path(settings.correlation_payload_dir), 'RunnerScripts', 'xunitrunner-perf', 'csvjsonconvertor.py')+\ ' --jobName '+settings.correlation_id+' --csvDir '+csvs_dir+' --jsonFile '+json_file+' --perfSettingsJson '+perf_settings_json_file try: return_code = helix.proc.run_and_log_output( json_cmd.split(' '), cwd=os.path.join(fix_path(settings.workitem_working_dir), 'execution'), env=None ) except: log.error("Exception when running the installation scripts: " + traceback.format_exc()) for item in os.listdir(csvs_dir): if item.endswith('.csv'): csv_file = os.path.join(csvs_dir, item) log.info('Uploading {}'.format(csv_file)) _upload_file(csv_file, settings) result_url = _upload_file(csv_file, settings) log.info('Location {}'.format(result_url)) # Upload json with rest of the results log.info('Uploaded {} to results container'.format(json_file)) result_url = _upload_file(json_file, settings) log.info('Location {}'.format(result_url)) # create deep copy and set perf container keys perf_settings = copy.deepcopy(settings) perf_settings.output_uri = perf_settings_json['RootURI'] perf_settings.output_write_token = perf_settings_json['WriteToken'] perf_settings.output_read_token = perf_settings_json['ReadToken'] log.info('Uploaded {} to perf container'.format(json_file)) # Upload json to the perf specific container result_url = _upload_file(json_file, perf_settings) log.info('Location {}'.format(result_url))
def prepare_linux_for_perf(): settings = settings_from_env() correlation_dir = fix_path(settings.correlation_payload_dir) dotnet_cli_dir = os.path.join(correlation_dir, "perf.dotnetcli") dotnet_cli = os.path.join(dotnet_cli_dir, "dotnet") # if local dotnet cli is already installed, skip if not os.path.exists(dotnet_cli): # install dotnet cli locally log.info( 'Local dotnet cli install not found, launching the installation script' ) dotnet_installer = os.path.join(correlation_dir, "RunnerScripts", "xunitrunner-perf", "ubuntu-dotnet-local-install.sh") try: log.info('Setting dotnet cli installation script at ' + dotnet_installer + ' as executable') helix.proc.run_and_log_output( ("chmod 777 " + dotnet_installer).split(" ")) log.info('Running script ' + dotnet_installer) helix.proc.run_and_log_output( (dotnet_installer + " -d " + dotnet_cli_dir + " -v " + os.path.join( correlation_dir, "RunnerScripts", "xunitrunner-perf", "DotNetCliVersion.txt")).split(" ")) except: log.error("Exception when running the installation scripts: " + traceback.format_exc()) else: log.info('Local dotnet cli install found')
def _run_xunit_from_execution(settings, test_dll, xunit_test_type, args): workitem_dir = fix_path(settings.workitem_working_dir) test_location = os.path.join(workitem_dir, 'execution') core_root = os.path.join(workitem_dir, 'core_root') results_location = os.path.join(workitem_dir, 'test_results.xml') event_client = helix.event.create_from_uri(settings.event_uri) log.info("Starting xunit against '{}'".format(test_dll)) xunit_result = xunit.run_tests(settings, [test_dll], test_location, core_root, results_location, xunit_test_type, args) if xunit_test_type == xunit.XUNIT_CONFIG_PERF: # perf testing has special requirements on the test output file name. # make a copy of it in the expected location so we can report the result. perf_log = os.path.join(test_location, "latest-perf-build.xml") log.info("Copying {} to {}.".format(perf_log, results_location)) shutil.copy2(perf_log, results_location) # archive the ETL file and upload it etl_file = os.path.join(test_location, "latest-perf-build.etl") etl_zip = os.path.join(test_location, "latest-perf-build.zip") log.info("Compressing {} into {}".format(etl_file, etl_zip)) zip_script.zipFilesAndFolders(etl_zip, [etl_file], True, True) log.info("Uploading ETL from {}".format(etl_zip)) _write_output_path(etl_zip, settings) log.info("XUnit exit code: {}".format(xunit_result)) if os.path.exists(results_location): log.info("Uploading results from {}".format(results_location)) with file(results_location) as result_file: test_count = 0 for line in result_file: if '<assembly ' in line: total_expression = re.compile(r'total="(\d+)"') match = total_expression.search(line) if match is not None: test_count = int(match.groups()[0]) break post_process_perf_results(settings, results_location, workitem_dir) result_url = _write_output_path(results_location, settings) log.info("Sending completion event") event_client.send({ 'Type': 'XUnitTestResult', 'WorkItemId': settings.workitem_id, 'WorkItemFriendlyName': settings.workitem_friendly_name, 'CorrelationId': settings.correlation_id, 'ResultsXmlUri': result_url, 'TestCount': test_count, }) else: log.error("Error: No exception thrown, but XUnit results not created") _report_error(settings) return xunit_result
def _prepare_execution_environment(settings, framework_in_tpa): workitem_dir = fix_path(settings.workitem_working_dir) correlation_dir = fix_path(settings.correlation_payload_dir) xunit_drop = os.path.join(correlation_dir, 'xunit') corerun_drop = os.path.join(correlation_dir, 'corerun') build_drop = os.path.join(correlation_dir) test_drop = os.path.join(workitem_dir) assembly_list = os.path.join(test_drop, settings.assembly_list) test_location = os.path.join(workitem_dir, 'execution') core_root = os.path.join(workitem_dir, 'core_root') ensure_directory_exists(test_location) ensure_directory_exists(core_root) log.info("Copying only test files from {} to {}".format(test_drop, test_location)) copy_tree_to(test_drop, test_location) framework_target = core_root if framework_in_tpa else test_location log.info("Copying product binaries from {} to {}".format(build_drop, framework_target)) _copy_package_files(assembly_list, build_drop, framework_target, core_root, test_location)
def prepare_linux_for_perf(): settings = settings_from_env() correlation_dir = fix_path(settings.correlation_payload_dir) dotnet_cli_dir = os.path.join(correlation_dir, "perf.dotnetcli") dotnet_cli = os.path.join(dotnet_cli_dir, "dotnet") # if local dotnet cli is already installed, skip if not os.path.exists(dotnet_cli): # install dotnet cli locally log.info('Local dotnet cli install not found, launching the installation script') dotnet_installer = os.path.join(correlation_dir, "RunnerScripts", "xunitrunner-perf", "ubuntu-dotnet-local-install.sh") try: log.info('Setting dotnet cli installation script at '+dotnet_installer+' as executable') helix.proc.run_and_log_output(("chmod 777 "+dotnet_installer).split(" ")) log.info('Running script '+dotnet_installer) helix.proc.run_and_log_output((dotnet_installer+" -d "+dotnet_cli_dir+" -v "+os.path.join(correlation_dir, "RunnerScripts", "xunitrunner-perf", "DotNetCliVersion.txt")).split(" ")) except: log.error("Exception when running the installation scripts: " + traceback.format_exc()) else: log.info('Local dotnet cli install found')
def _report_error(settings): from traceback import format_tb, format_exc log.error("Error running xunit {}".format(format_exc())) (type, value, traceback) = sys.exc_info() event_client = helix.event.create_from_uri(settings.event_uri) formatted = format_tb(traceback) workitem_dir = fix_path(settings.workitem_working_dir) error_path = os.path.join(workitem_dir, 'error.log') lines = ['Unhandled error: {}\n{}'.format(value, formatted)] with open(error_path, 'w') as f: f.writelines(lines) error_url = _write_output_path(error_path, settings) log.info("Sending ToF test failure event") event_client.send({ 'Type': 'XUnitTestFailure', 'WorkItemId': settings.workitem_id, 'WorkItemFriendlyName': settings.workitem_friendly_name, 'CorrelationId': settings.correlation_id, 'ErrorLogUri': error_url, })
def _report_error(settings): from traceback import format_tb, format_exc log.error("Error running xunit {}".format(format_exc())) (type, value, traceback) = sys.exc_info() event_client = helix.event.create_from_uri(settings.event_uri) formatted = format_tb(traceback) workitem_dir = fix_path(settings.workitem_working_dir) error_path = os.path.join(workitem_dir, 'error.log') lines = ['Unhandled error: {}\n{}'.format(value, formatted)] with open(error_path, 'w') as f: f.writelines(lines) error_url = _write_output_path(error_path, settings) log.info("Sending ToF test failure event") event_client.send( { 'Type': 'XUnitTestFailure', 'WorkItemId': settings.workitem_id, 'WorkItemFriendlyName': settings.workitem_friendly_name, 'CorrelationId': settings.correlation_id, 'ErrorLogUri': error_url, } )
def post_process_perf_results(settings, results_location, workitem_dir): # Use the xunit perf analysis exe from nuget package here log.info('Converting xml to csv') payload_dir = fix_path(os.getenv('HELIX_CORRELATION_PAYLOAD')) xmlconvertorpath = os.path.join(*[payload_dir, 'Microsoft.DotNet.xunit.performance.analysis', '1.0.0-alpha-build0028', 'tools', 'xunit.performance.analysis.exe']) if os.system(xmlconvertorpath+' -csv '+os.path.join(workitem_dir, 'results.csv')+' '+results_location) != 0: raise Exception('Failed to generate csv from result xml') perfscriptsdir = os.path.join(*[payload_dir, 'RunnerScripts', 'xunitrunner-perf']) # need to extract more properties from settings to pass to csvtojsonconvertor.py jsonPath = os.path.join(workitem_dir, settings.workitem_id+'.json') if os.system('%HELIX_PYTHONPATH% '+os.path.join(perfscriptsdir, 'csvjsonconvertor.py')+' --csvFile \"'+os.path.join(workitem_dir, 'results.csv')+'\" --jsonFile \"'+jsonPath+'\" --jobName "..." --jobDescription "..." --configName "..." --jobGroupName "..." --jobTypeName "Private" --username "CoreFx-Perf" --userAlias "deshank" --branch "ProjectK" --buildInfoName "1390881" --buildNumber "1390881" --machinepoolName "HP Z210 Workstation" --machinepoolDescription "Intel64 Family 6 Model 42 Stepping 7" --architectureName "AMD64" --manufacturerName "Intel" --microarchName "SSE2" --numberOfCores "4" --numberOfLogicalProcessors "8" --totalPhysicalMemory "16342" --osInfoName "Microsoft Windows 8.1 Pro" --osVersion "6.3.9600" --machineName "PCNAME" --machineDescription "Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz"') != 0: raise Exception('Failed to generate json from csv file') perfsettings = copy.deepcopy(settings) with open(os.path.join(perfscriptsdir, 'xunitrunner-perf.json'), 'rb') as perfsettingsjson: # upload the json using perf-specific details perfsettingsjson = json.loads(perfsettingsjson.read()) perfsettings.output_uri = perfsettingsjson['RootURI'] perfsettings.output_write_token = perfsettingsjson['WriteToken'] perfsettings.output_read_token = perfsettingsjson['ReadToken'] _write_output_path(jsonPath, perfsettings)
def _run_xunit_from_execution(settings, test_dll, xunit_test_type, args): workitem_dir = fix_path(settings.workitem_working_dir) test_location = os.path.join(workitem_dir, 'execution') core_root = os.path.join(workitem_dir, 'core_root') results_location = os.path.join(workitem_dir, 'test_results.xml') event_client = helix.event.create_from_uri(settings.event_uri) log.info("Starting xunit against '{}'".format(test_dll)) xunit_result = xunit.run_tests( settings, [test_dll], test_location, core_root, results_location, xunit_test_type, args ) if xunit_test_type == xunit.XUNIT_CONFIG_PERF: # perf testing has special requirements on the test output file name. # make a copy of it in the expected location so we can report the result. perf_log = os.path.join(test_location, "latest-perf-build.xml") log.info("Copying {} to {}.".format(perf_log, results_location)) shutil.copy2(perf_log, results_location) # archive the ETL file and upload it etl_file = os.path.join(test_location, "latest-perf-build.etl") etl_zip = os.path.join(test_location, "latest-perf-build.zip") log.info("Compressing {} into {}".format(etl_file, etl_zip)) zip_script.zipFilesAndFolders(etl_zip, [etl_file], True, True) log.info("Uploading ETL from {}".format(etl_zip)) _write_output_path(etl_zip, settings) log.info("XUnit exit code: {}".format(xunit_result)) if os.path.exists(results_location): log.info("Uploading results from {}".format(results_location)) with file(results_location) as result_file: test_count = 0 for line in result_file: if '<assembly ' in line: total_expression = re.compile(r'total="(\d+)"') match = total_expression.search(line) if match is not None: test_count = int(match.groups()[0]) break post_process_perf_results(settings, results_location, workitem_dir) result_url = _write_output_path(results_location, settings) log.info("Sending completion event") event_client.send( { 'Type': 'XUnitTestResult', 'WorkItemId': settings.workitem_id, 'WorkItemFriendlyName': settings.workitem_friendly_name, 'CorrelationId': settings.correlation_id, 'ResultsXmlUri': result_url, 'TestCount': test_count, } ) else: log.error("Error: No exception thrown, but XUnit results not created") _report_error(settings) return xunit_result
def post_process_perf_results(settings, results_location, workitem_dir, xunit_test_type): # Use the xunit perf analysis exe from nuget package here log.info('Converting xml to csv') payload_dir = fix_path(os.getenv('HELIX_CORRELATION_PAYLOAD')) xmlconvertorpath = '' if xunit_test_type == xunit.XUNIT_CONFIG_PERF_WINDOWS: perf_lib_dir = os.path.join( payload_dir, 'Microsoft.DotNet.xunit.performance.analysis') if len(os.listdir(perf_lib_dir)) > 1: log.info('Multiple directories found under ' + perf_lib_dir + ' picking ' + os.listdir(perf_lib_dir)[0]) perf_analysis_version = os.listdir(perf_lib_dir)[0] xmlconvertorpath = os.path.join( payload_dir, 'Microsoft.DotNet.xunit.performance.analysis', perf_analysis_version, 'tools', 'xunit.performance.analysis.exe') elif xunit_test_type == xunit.XUNIT_CONFIG_PERF_LINUX: perf_lib_dir = os.path.join( payload_dir, 'Microsoft.DotNet.xunit.performance.analysis.cli') if len(os.listdir(perf_lib_dir)) > 1: log.info('Multiple directories found under ' + perf_lib_dir + ' picking ' + os.listdir(perf_lib_dir)[0]) perf_analysis_version = os.listdir(perf_lib_dir)[0] dotnet_cli_exec = os.path.join(settings.correlation_payload_dir, "dotnet_cli", "dotnet") _copy_files_to_dest( os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis.cli', perf_analysis_version, 'lib', 'netstandard1.3'), os.path.join(workitem_dir, 'execution')) xmlconvertorpath = dotnet_cli_exec + ' ' + os.path.join( workitem_dir, 'execution', 'Microsoft.DotNet.xunit.performance.analysis.cli.dll') else: log.error('Invalid xunit_test_type') return csvs_dir = os.path.join(workitem_dir, 'resultcsvs') os.mkdir(csvs_dir) xmlCmd = xmlconvertorpath + ' -csv ' + csvs_dir + ' ' + results_location if (helix.proc.run_and_log_output(xmlCmd.split(' '))) != 0: raise Exception('Failed to generate csv from result xml') log.info('Uploading the result csv files') for item in os.listdir(csvs_dir): if item.endswith('.csv'): _write_output_path(os.path.join(csvs_dir, item), settings) perfscriptsdir = os.path.join(payload_dir, 'RunnerScripts', 'xunitrunner-perf') perfsettingsjson = '' perfsettingsjsonfile = os.path.join(perfscriptsdir, 'xunitrunner-perf.json') with open(perfsettingsjsonfile, 'rb') as perfsettingsjson: # read the perf-specific settings perfsettingsjson = json.loads(perfsettingsjson.read()) # need to extract more properties from settings to pass to csvtojsonconvertor.py jsonFileName = perfsettingsjson[ 'TestProduct'] + '-' + settings.workitem_id + '.json' jsonPath = os.path.join(workitem_dir, jsonFileName) jsonPath = jsonPath.encode('ascii', 'ignore') jsonArgsDict = dict() jsonArgsDict['--jobName'] = settings.correlation_id jsonArgsDict['--csvDir'] = csvs_dir jsonArgsDict['--jsonFile'] = jsonPath jsonArgsDict['--perfSettingsJson'] = perfsettingsjsonfile jsonArgs = [ sys.executable, os.path.join(perfscriptsdir, 'csvjsonconvertor.py') ] for key, value in jsonArgsDict.iteritems(): jsonArgs.append(key) jsonArgs.append(str(value)) if (helix.proc.run_and_log_output(jsonArgs)) != 0: raise Exception('Failed to generate json from csv file') # set info to upload result to perf-specific json container log.info('Uploading the results json') perfsettings = copy.deepcopy(settings) perfsettings.output_uri = perfsettingsjson['RootURI'] perfsettings.output_write_token = perfsettingsjson['WriteToken'] perfsettings.output_read_token = perfsettingsjson['ReadToken'] # Upload json with rest of the results _write_output_path(jsonPath, settings) # Upload json to the perf specific container _write_output_path(jsonPath, perfsettings)
def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] --script=path [args] """ optdict = dict(optlist) log.info("BuildTools Helix Script Runner v0.1 starting") if '--args' in optdict: script_arguments = optdict['--args'] log.info("Script Arguments:" + script_arguments) script_to_execute = optdict['--script'] unpack_dir = fix_path(settings.workitem_payload_dir) execution_args = [os.path.join(unpack_dir, script_to_execute)] + args test_executor = HelixTestExecution(settings) return_code = helix.proc.run_and_log_output(execution_args, cwd=unpack_dir, env=None) results_location = os.path.join(unpack_dir, 'testResults.xml') # In case testResults.xml was put somewhere else, try to find it anywhere in this directory before failing if not os.path.exists(results_location): for root, dirs, files in os.walk(settings.workitem_working_dir): for file_name in files: if file_name == 'testResults.xml': results_location = os.path.join(root, file_name) if os.path.exists(results_location): log.info("Uploading results from {}".format(results_location)) with file(results_location) as result_file: test_count = 0 for line in result_file: if '<assembly ' in line: total_expression = re.compile(r'total="(\d+)"') match = total_expression.search(line) if match is not None: test_count = int(match.groups()[0]) break if settings.output_uri is not None: result_url = test_executor.upload_file_to_storage( results_location, settings) else: result_url = None if (settings.event_uri is not None): event_client = helix.event.create_from_uri(settings.event_uri) log.info("Sending completion event") event_client.send({ 'Type': 'XUnitTestResult', 'WorkItemId': settings.workitem_id, 'WorkItemFriendlyName': settings.workitem_friendly_name, 'CorrelationId': settings.correlation_id, 'ResultsXmlUri': result_url, 'TestCount': test_count, }) else: log.error( "Error: No exception thrown, but XUnit results not created") if settings.output_uri is not None: test_executor.report_error(settings, failure_type="XUnitTestFailure") return return_code
def post_process_perf_results(settings, results_location, workitem_dir): # Use the xunit perf analysis exe from nuget package here log.info('Converting xml to csv') payload_dir = fix_path(os.getenv('HELIX_CORRELATION_PAYLOAD')) xmlconvertorpath = os.path.join(*[payload_dir, 'Microsoft.DotNet.xunit.performance.analysis', '1.0.0-alpha-build0028', 'tools', 'xunit.performance.analysis.exe']) xmlCmd = xmlconvertorpath+' -csv '+os.path.join(workitem_dir, 'results.csv')+' '+results_location if (helix.proc.run_and_log_output(xmlCmd.split(' '))) != 0: raise Exception('Failed to generate csv from result xml') perfscriptsdir = os.path.join(*[payload_dir, 'RunnerScripts', 'xunitrunner-perf']) # need to extract more properties from settings to pass to csvtojsonconvertor.py jsonPath = os.path.join(workitem_dir, settings.workitem_id+'.json') log.info('Uploading the results.csv file') _write_output_path(os.path.join(workitem_dir, 'results.csv'), settings) perfsettingsjson = '' with open(os.path.join(perfscriptsdir, 'xunitrunner-perf.json'), 'rb') as perfsettingsjson: # read the perf-specific settings perfsettingsjson = json.loads(perfsettingsjson.read()) jsonArgsDict = dict() jsonArgsDict['--csvFile'] = os.path.join(workitem_dir, 'results.csv') jsonArgsDict['--jsonFile'] = jsonPath jsonArgsDict['--jobName'] = settings.correlation_id jsonArgsDict['--jobDescription'] = '...' jsonArgsDict['--configName'] = perfsettingsjson['TargetQueue'] jsonArgsDict['--jobGroupName'] = perfsettingsjson['Creator']+'-'+perfsettingsjson['TestProduct']+'-'+perfsettingsjson['Branch']+'-Perf' jsonArgsDict['--jobTypeName'] = 'Private' jsonArgsDict['--username'] = perfsettingsjson['Creator'] jsonArgsDict['--userAlias'] = perfsettingsjson['Creator'] jsonArgsDict['--branch'] = perfsettingsjson['TestProduct'] jsonArgsDict['--buildInfoName'] = perfsettingsjson['BuildMoniker'] # extract build number from buildmoniker if official build buildtokens = perfsettingsjson['BuildMoniker'].split('-') if len(buildtokens) < 3: jsonArgsDict['--buildNumber'] = perfsettingsjson['BuildMoniker'] else: jsonArgsDict['--buildNumber'] = buildtokens[-2] +'.'+buildtokens[-1] jsonArgsDict['--machinepoolName'] = perfsettingsjson['TargetQueue'] jsonArgsDict['--machinepoolDescription'] = '...' jsonArgsDict['--microarchName'] = 'SSE2' # cannot be obtained by cpu-info; need to figure out some other way jsonArgsDict['--numberOfCores'] = psutil.cpu_count(logical=False) jsonArgsDict['--numberOfLogicalProcessors'] = psutil.cpu_count(logical=True) # psutil returns mem in bytes, convert it to MB for readability jsonArgsDict['--totalPhysicalMemory'] = psutil.virtual_memory().total/1024 jsonArgsDict['--osInfoName'] = platform.system() jsonArgsDict['--osVersion'] = platform.version() jsonArgsDict['--machineName'] = platform.node() info = cpuinfo.get_cpu_info() jsonArgsDict['--architectureName'] = format(info['arch']) jsonArgsDict['--machineDescription'] = format(info['brand']) jsonArgsDict['--manufacturerName'] = format(info['vendor_id']) jsonArgs = [sys.executable, os.path.join(perfscriptsdir, 'csvjsonconvertor.py')] for key, value in jsonArgsDict.iteritems(): jsonArgs.append(key) jsonArgs.append(str(value)) if (helix.proc.run_and_log_output(jsonArgs)) != 0: raise Exception('Failed to generate json from csv file') # set info to upload result to perf-specific json container log.info('Uploading the results json') perfsettings = copy.deepcopy(settings) perfsettings.output_uri = perfsettingsjson['RootURI'] perfsettings.output_write_token = perfsettingsjson['WriteToken'] perfsettings.output_read_token = perfsettingsjson['ReadToken'] _write_output_path(jsonPath, perfsettings)
def _main(settings, optlist, args): """ Usage:: continuationrunner [--config config.json] [--setting name=value] --script [--args arg1 arg2...] """ optdict = dict(optlist) log.info("BuildTools Helix Continuation Runner starting") if '--args' in optdict: script_arguments = optdict['--args'] log.info("Script Arguments: " + script_arguments) if '--script' in optdict: script_to_execute = optdict['--script'] else: log.error("Value for parameter '--script' is required") return -1 if '--next_queue' in optdict: next_queue = optdict['--next_queue'] else: log.error("Need a secondary queue id to continue execution.") return -1 if '--next_payload_dir' in optdict: next_payload_dir = optdict['--next_payload_dir'] else: log.error("Need a secondary payload to continue execution.") return -1 unpack_dir = fix_path(settings.workitem_payload_dir) execution_args = [os.path.join(unpack_dir, script_to_execute)] + args return_code = helix.proc.run_and_log_output(execution_args, cwd=unpack_dir, env=None) if return_code == 0: # currently there's no use for it, but here's where we'd choose to send out XUnit results # if desired at some point. log.info( "First stage of execution succeded. Sending a new work item to " + next_queue) log.info("Will include contents of " + next_payload_dir) settings = settings_from_env() # load Client-specific settings config_path = os.path.join(settings.config_root, "ClientSettings.json") settings.__dict__.update(json.load(open(config_path))) service_bus_repository = ServiceBusRepository( settings.ServiceBusRoot, settings.QueueId, settings.LongPollTimeout, settings.SAS, settings.servicebus_retry_count, settings.servicebus_retry_delay) # For now, we'll use ScriptRunner for this step. Eventually we'll want to either combine functionality # of the two into scriptrunner.py, OR parameterize which script is used (for the 2+ re-queue scenario) call_runcontinuation = "/RunnerScripts/scriptrunner/scriptrunner.py --script RunContinuation" if is_windows(): continuation_command = "%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%" + call_runcontinuation + ".cmd" else: continuation_command = "$HELIX_PYTHONPATH% $HELIX_CORRELATION_PAYLOAD" + call_runcontinuation + ".sh" # Prep the follow-up work item ... new_work_item = HelixWorkItem( correlation_id=settings.correlation_id, work_item_friendly_name=settings.workitem_friendly_name + ".Execution", command=continuation_command, results_output_uri=settings.output_uri + "/continuation", results_output_write_token=settings.output_write_token, results_output_read_token=settings.output_read_token) # This may eventually cause trouble if zips with identical names are somehow included inside # other payload zips. Chained continuation will be OK as there will be a new results # directory to upload to for each leg. new_workitem_payload_name = settings.workitem_friendly_name + ".continuation.zip" secondary_zip_path = os.path.join(settings.workitem_working_dir, new_workitem_payload_name) zip_directory(secondary_zip_path, next_payload_dir) log.info("Zipped into " + secondary_zip_path) # Upload the payloads for the job upload_client = helix.azure_storage.BlobUploadClient( settings.output_uri, settings.output_write_token, settings.output_read_token) new_payload_uri = upload_client.upload(secondary_zip_path, new_workitem_payload_name) new_work_item.WorkItemPayloadUris.append(new_payload_uri) # Current assumption: No need to reuse correlation payload, but bring supplemental (for scripts) # NOTE: We don't currently have a way to access the existing Uri, so reusing the payload from # storage will involve plumbing that through or re-uploading it (can be huge) supplemental_payload_path = os.path.join( settings.work_root, settings.correlation_id, "work", "SupplementalPayload.zip") supplemental_payload_uri = upload_client.upload( supplemental_payload_path, "SupplementalPayload.zip") log.info("Uploaded " + secondary_zip_path + " to " + new_payload_uri) log.info("Uploaded SupplementalPayload.zip to " + supplemental_payload_uri) new_work_item.CorrelationPayloadUris.append( supplemental_payload_uri) if service_bus_repository.post_new_workitem( queue_id=next_queue, work_item=new_work_item): log.info("Successfully queued new work item.") else: log.error("Failure to send to Service bus.") return -1 else: log.error( "Got non-zero exit code for first stage of execution. Skipping further processing." ) return return_code
def _main(settings, optlist, args): """ Usage:: continuationrunner [--config config.json] [--setting name=value] --script [--args arg1 arg2...] """ optdict = dict(optlist) log.info("BuildTools Helix Continuation Runner starting") if '--args' in optdict: script_arguments = optdict['--args'] log.info("Script Arguments: " + script_arguments) if '--script' in optdict: script_to_execute = optdict['--script'] else: log.error("Value for parameter '--script' is required") return -1 if '--next_queue' in optdict: next_queue = optdict['--next_queue'] else: log.error("Need a secondary queue id to continue execution.") return -1 if '--next_payload_dir' in optdict: next_payload_dir = optdict['--next_payload_dir'] else: log.error("Need a secondary payload to continue execution.") return -1 unpack_dir = fix_path(settings.workitem_payload_dir) execution_args = [os.path.join(unpack_dir, script_to_execute)] + args return_code = helix.proc.run_and_log_output( execution_args, cwd=unpack_dir, env=None ) if return_code == 0: # currently there's no use for it, but here's where we'd choose to send out XUnit results # if desired at some point. log.info("First stage of execution succeded. Sending a new work item to " + next_queue) log.info("Will include contents of " + next_payload_dir) settings = settings_from_env() # load Client-specific settings config_path = os.path.join(settings.config_root, "ClientSettings.json") settings.__dict__.update(json.load(open(config_path))) service_bus_repository = ServiceBusRepository(settings.ServiceBusRoot, settings.QueueId, settings.LongPollTimeout, settings.SAS, settings.servicebus_retry_count, settings.servicebus_retry_delay ) # For now, we'll use ScriptRunner for this step. Eventually we'll want to either combine functionality # of the two into scriptrunner.py, OR parameterize which script is used (for the 2+ re-queue scenario) call_runcontinuation = "/RunnerScripts/scriptrunner/scriptrunner.py --script RunContinuation" if is_windows(): continuation_command = "%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%" + call_runcontinuation + ".cmd" else: continuation_command = "$HELIX_PYTHONPATH% $HELIX_CORRELATION_PAYLOAD" + call_runcontinuation + ".sh" # Prep the follow-up work item ... new_work_item = HelixWorkItem( correlation_id=settings.correlation_id, work_item_friendly_name=settings.workitem_friendly_name + ".Execution", command=continuation_command, results_output_uri=settings.output_uri + "/continuation", results_output_write_token=settings.output_write_token, results_output_read_token=settings.output_read_token) # This may eventually cause trouble if zips with identical names are somehow included inside # other payload zips. Chained continuation will be OK as there will be a new results # directory to upload to for each leg. new_workitem_payload_name = settings.workitem_friendly_name + ".continuation.zip" secondary_zip_path = os.path.join(settings.workitem_working_dir, new_workitem_payload_name) zip_directory(secondary_zip_path, next_payload_dir) log.info("Zipped into " + secondary_zip_path) # Upload the payloads for the job upload_client = helix.azure_storage.BlobUploadClient(settings.output_uri, settings.output_write_token, settings.output_read_token) new_payload_uri = upload_client.upload(secondary_zip_path, new_workitem_payload_name) new_work_item.WorkItemPayloadUris.append(new_payload_uri) # Current assumption: No need to reuse correlation payload, but bring supplemental (for scripts) # NOTE: We don't currently have a way to access the existing Uri, so reusing the payload from # storage will involve plumbing that through or re-uploading it (can be huge) supplemental_payload_path = os.path.join(settings.work_root, settings.correlation_id, "work", "SupplementalPayload.zip") supplemental_payload_uri = upload_client.upload(supplemental_payload_path, "SupplementalPayload.zip") log.info("Uploaded " + secondary_zip_path + " to " + new_payload_uri) log.info("Uploaded SupplementalPayload.zip to " + supplemental_payload_uri) new_work_item.CorrelationPayloadUris.append(supplemental_payload_uri) if service_bus_repository.post_new_workitem(queue_id=next_queue, work_item=new_work_item): log.info("Successfully queued new work item.") else: log.error("Failure to send to Service bus.") return -1 else: log.error("Got non-zero exit code for first stage of execution. Skipping further processing.") return return_code
def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] --script [--args arg1 arg2...] """ optdict = dict(optlist) log.info("BuildTools Helix Script Runner v0.1 starting") if '--args' in optdict: script_arguments = optdict['--args'] log.info("Script Arguments:"+script_arguments) script_to_execute = optdict['--script'] test_executor = HelixTestExecution(settings) unpack_dir = fix_path(settings.workitem_payload_dir) execution_args = [os.path.join(unpack_dir, script_to_execute)] + args return_code = helix.proc.run_and_log_output( execution_args, cwd=unpack_dir, env=None ) event_client = helix.event.create_from_uri(settings.event_uri) results_location = os.path.join(unpack_dir, 'testResults.xml') # In case testResults.xml was put somewhere else, try to find it anywhere in this directory before failing if not os.path.exists(results_location): for root, dirs, files in os.walk(settings.workitem_working_dir): for file_name in files: if file_name == 'testResults.xml': results_location = os.path.join(root, file_name) if os.path.exists(results_location): log.info("Uploading results from {}".format(results_location)) with file(results_location) as result_file: test_count = 0 for line in result_file: if '<assembly ' in line: total_expression = re.compile(r'total="(\d+)"') match = total_expression.search(line) if match is not None: test_count = int(match.groups()[0]) break result_url = test_executor.upload_file_to_storage(results_location, settings) log.info("Sending completion event") event_client.send( { 'Type': 'XUnitTestResult', 'WorkItemId': settings.workitem_id, 'WorkItemFriendlyName': settings.workitem_friendly_name, 'CorrelationId': settings.correlation_id, 'ResultsXmlUri': result_url, 'TestCount': test_count, } ) else: log.error("Error: No exception thrown, but XUnit results not created") test_executor.report_error(settings, failure_type="XUnitTestFailure") return return_code
def post_process_perf_results(settings, results_location, workitem_dir, xunit_test_type): # Use the xunit perf analysis exe from nuget package here log.info('Converting xml to csv') payload_dir = fix_path(os.getenv('HELIX_CORRELATION_PAYLOAD')) xmlconvertorpath = '' if xunit_test_type == xunit.XUNIT_CONFIG_PERF_WINDOWS: perf_lib_dir = os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis') if len(os.listdir(perf_lib_dir)) > 1: log.info('Multiple directories found under '+perf_lib_dir+' picking '+os.listdir(perf_lib_dir)[0]) perf_analysis_version = os.listdir(perf_lib_dir)[0] xmlconvertorpath = os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis', perf_analysis_version, 'tools', 'xunit.performance.analysis.exe') elif xunit_test_type == xunit.XUNIT_CONFIG_PERF_LINUX: perf_lib_dir = os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis.cli') if len(os.listdir(perf_lib_dir)) > 1: log.info('Multiple directories found under '+perf_lib_dir+' picking '+os.listdir(perf_lib_dir)[0]) perf_analysis_version = os.listdir(perf_lib_dir)[0] dotnet_cli_exec = os.path.join(settings.correlation_payload_dir, "dotnet_cli", "dotnet") _copy_files_to_dest(os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis.cli', perf_analysis_version, 'lib', 'netstandard1.3'), os.path.join(workitem_dir, 'execution')) xmlconvertorpath = dotnet_cli_exec + ' ' + os.path.join(workitem_dir, 'execution', 'Microsoft.DotNet.xunit.performance.analysis.cli.dll') else: log.error('Invalid xunit_test_type') return xmlCmd = xmlconvertorpath+' -csv '+os.path.join(workitem_dir, 'results.csv')+' '+results_location if (helix.proc.run_and_log_output(xmlCmd.split(' '))) != 0: raise Exception('Failed to generate csv from result xml') log.info('Uploading the results.csv file') _write_output_path(os.path.join(workitem_dir, 'results.csv'), settings) perfscriptsdir = os.path.join(payload_dir, 'RunnerScripts', 'xunitrunner-perf') perfsettingsjson = '' perfsettingsjsonfile = os.path.join(perfscriptsdir, 'xunitrunner-perf.json') with open(perfsettingsjsonfile, 'rb') as perfsettingsjson: # read the perf-specific settings perfsettingsjson = json.loads(perfsettingsjson.read()) # need to extract more properties from settings to pass to csvtojsonconvertor.py jsonFileName = perfsettingsjson['TestProduct']+'-'+settings.workitem_id+'.json' jsonPath = os.path.join(workitem_dir, jsonFileName) jsonPath = jsonPath.encode('ascii', 'ignore') jsonArgsDict = dict() jsonArgsDict['--jobName'] = settings.correlation_id jsonArgsDict['--csvFile'] = os.path.join(workitem_dir, 'results.csv') jsonArgsDict['--jsonFile'] = jsonPath jsonArgsDict['--perfSettingsJson'] = perfsettingsjsonfile jsonArgs = [sys.executable, os.path.join(perfscriptsdir, 'csvjsonconvertor.py')] for key, value in jsonArgsDict.iteritems(): jsonArgs.append(key) jsonArgs.append(str(value)) if (helix.proc.run_and_log_output(jsonArgs)) != 0: raise Exception('Failed to generate json from csv file') # set info to upload result to perf-specific json container log.info('Uploading the results json') perfsettings = copy.deepcopy(settings) perfsettings.output_uri = perfsettingsjson['RootURI'] perfsettings.output_write_token = perfsettingsjson['WriteToken'] perfsettings.output_read_token = perfsettingsjson['ReadToken'] # Upload json with rest of the results _write_output_path(jsonPath, settings) # Upload json to the perf specific container _write_output_path(jsonPath, perfsettings)
def post_process_perf_results(settings, results_location, workitem_dir): # Use the xunit perf analysis exe from nuget package here log.info('Converting xml to csv') payload_dir = fix_path(os.getenv('HELIX_CORRELATION_PAYLOAD')) perf_analysis_version = (next(os.walk(os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis')))[1])[0] xmlconvertorpath = os.path.join(*[payload_dir, 'Microsoft.DotNet.xunit.performance.analysis', perf_analysis_version, 'tools', 'xunit.performance.analysis.exe']) xmlCmd = xmlconvertorpath+' -csv '+os.path.join(workitem_dir, 'results.csv')+' '+results_location if (helix.proc.run_and_log_output(xmlCmd.split(' '))) != 0: raise Exception('Failed to generate csv from result xml') log.info('Uploading the results.csv file') _write_output_path(os.path.join(workitem_dir, 'results.csv'), settings) perfscriptsdir = os.path.join(*[payload_dir, 'RunnerScripts', 'xunitrunner-perf']) perfsettingsjson = '' with open(os.path.join(perfscriptsdir, 'xunitrunner-perf.json'), 'rb') as perfsettingsjson: # read the perf-specific settings perfsettingsjson = json.loads(perfsettingsjson.read()) # need to extract more properties from settings to pass to csvtojsonconvertor.py jsonFileName = perfsettingsjson['TestProduct']+'-'+settings.workitem_id+'.json' jsonPath = os.path.join(workitem_dir, jsonFileName) jsonArgsDict = dict() jsonArgsDict['--csvFile'] = os.path.join(workitem_dir, 'results.csv') jsonArgsDict['--jsonFile'] = jsonPath jsonArgsDict['--jobName'] = settings.correlation_id jsonArgsDict['--jobDescription'] = '...' jsonArgsDict['--configName'] = perfsettingsjson['TargetQueue'] jsonArgsDict['--jobGroupName'] = perfsettingsjson['Creator']+'-'+perfsettingsjson['TestProduct']+'-'+perfsettingsjson['Branch']+'-Perf' jsonArgsDict['--jobTypeName'] = 'Private' jsonArgsDict['--username'] = perfsettingsjson['Creator'] jsonArgsDict['--userAlias'] = perfsettingsjson['Creator'] jsonArgsDict['--branch'] = perfsettingsjson['TestProduct'] jsonArgsDict['--buildInfoName'] = perfsettingsjson['BuildMoniker'] # extract build number from buildmoniker if official build buildtokens = perfsettingsjson['BuildMoniker'].split('-') if len(buildtokens) < 3: jsonArgsDict['--buildNumber'] = perfsettingsjson['BuildMoniker'] else: jsonArgsDict['--buildNumber'] = buildtokens[-2] +'.'+buildtokens[-1] jsonArgsDict['--machinepoolName'] = perfsettingsjson['TargetQueue'] jsonArgsDict['--machinepoolDescription'] = '...' jsonArgsDict['--microarchName'] = 'SSE2' # cannot be obtained by cpu-info; need to figure out some other way jsonArgsDict['--numberOfCores'] = psutil.cpu_count(logical=False) jsonArgsDict['--numberOfLogicalProcessors'] = psutil.cpu_count(logical=True) # psutil returns mem in bytes, convert it to MB for readability jsonArgsDict['--totalPhysicalMemory'] = psutil.virtual_memory().total/1024 jsonArgsDict['--osInfoName'] = platform.system() jsonArgsDict['--osVersion'] = platform.version() jsonArgsDict['--machineName'] = platform.node() info = cpuinfo.get_cpu_info() jsonArgsDict['--architectureName'] = format(info['arch']) jsonArgsDict['--machineDescription'] = format(info['brand']) jsonArgsDict['--manufacturerName'] = format(info['vendor_id']) jsonArgs = [sys.executable, os.path.join(perfscriptsdir, 'csvjsonconvertor.py')] for key, value in jsonArgsDict.iteritems(): jsonArgs.append(key) jsonArgs.append(str(value)) if (helix.proc.run_and_log_output(jsonArgs)) != 0: raise Exception('Failed to generate json from csv file') # set info to upload result to perf-specific json container log.info('Uploading the results json') perfsettings = copy.deepcopy(settings) perfsettings.output_uri = perfsettingsjson['RootURI'] perfsettings.output_write_token = perfsettingsjson['WriteToken'] perfsettings.output_read_token = perfsettingsjson['ReadToken'] jsonPath = str(jsonPath) # Upload json with rest of the results _write_output_path(jsonPath, settings) # Upload json to the perf specific container _write_output_path(jsonPath, perfsettings)