def main(args=None): def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] --dll Test.dll """ optdict = dict(optlist) # check if a perf runner has been specified perf_runner = None assembly_list = None if '--perf-runner' in optdict: perf_runner = optdict['--perf-runner'] if '--assemblylist' in optdict: assembly_list = optdict['--assemblylist'] log.info("Using assemblylist parameter:"+assembly_list) else: assembly_list = os.getenv('HELIX_ASSEMBLY_LIST') log.info('Using assemblylist environment variable:'+assembly_list) return run_tests(settings, optdict['--dll'], '--tpaframework' in optdict, assembly_list, perf_runner, args) return command_main(_main, ['dll=', 'tpaframework', 'perf-runner=', 'assemblylist='], args)
def main(args=None): def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] --dll Test.dll """ optdict = dict(optlist) # check if a perf runner has been specified perf_runner = None assembly_list = None if '--perf-runner' in optdict: perf_runner = optdict['--perf-runner'] if '--assemblylist' in optdict: assembly_list = optdict['--assemblylist'] log.info("Using assemblylist parameter:" + assembly_list) else: assembly_list = os.getenv('HELIX_ASSEMBLY_LIST') log.info('Using assemblylist environment variable:' + assembly_list) return run_tests(settings, optdict['--dll'], '--tpaframework' in optdict, assembly_list, perf_runner, args) return command_main( _main, ['dll=', 'tpaframework', 'perf-runner=', 'assemblylist='], args)
def main(args=None): def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] --dll Test.dll """ optdict = dict(optlist) # check if a perf runner has been specified perf_runner = None assembly_list = None if '--perf-runner' in optdict: perf_runner = optdict['--perf-runner'] if not os.path.exists(optdict['--dll']): dllpath = optdict['--dll'] exepath = '.'.join(dllpath.split('.')[:-1]) exepath = exepath + '.exe' if not os.path.exists(exepath): raise Exception('No valid test dll or exe found') else: optdict['--dll'] = exepath if '--assemblylist' in optdict: assembly_list = optdict['--assemblylist'] log.info("Using assemblylist parameter:"+assembly_list) else: assembly_list = os.getenv('HELIX_ASSEMBLY_LIST') log.info('Using assemblylist environment variable:'+assembly_list) return run_tests(settings, optdict['--dll'], '--tpaframework' in optdict, assembly_list, perf_runner, args) return command_main(_main, ['dll=', 'tpaframework', 'perf-runner=', 'assemblylist='], args)
def main(args=None): def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] [--tpaframework] [--assemblylist assemblylist.txt] [--xunit-test-type type] --dll Test.dll """ optdict = dict(optlist) log.info("BuildTools Functional Helix Runner v0.1 starting") if '--assemblylist' in optdict: assembly_list = optdict['--assemblylist'] log.info("Using assemblylist parameter:" + assembly_list) else: assembly_list = os.getenv('HELIX_ASSEMBLY_LIST') log.info("Using assemblylist environment variable:" + assembly_list) test_assembly = optdict['--dll'] xunit_test_type = XUnitExecution.XUNIT_CONFIG_NETCORE if '--xunit-test-type' in optdict: xunit_test_type = optdict['--xunit-test-type'] if xunit_test_type == XUnitExecution.XUNIT_CONFIG_DESKTOP and os.name != 'nt': raise Exception( "Error: Cannot run desktop .NET Framework XUnit on non windows platforms" ) # Currently this will automatically create the default "execution" folder and move contents of the # work item to it. If needed, we could separate this out. xunit_execute = XUnitExecution(settings) # This runner handles the "original" AssemblyList format by contructing tuples to copy and then # calling HelixTestExecution.copy_file_list. Once assembly list formatting is changed in the # source that this runner lives with, can convert to simply calling HelixTestExecution.copy_package_files # Eventually --tpaframework ought to be removed entirely from the runner, but leaving it in here # in case it's used. If specified, we copy framework assemblies into CORE_ROOT to get different trust file_tuples = _create_package_file_list(assembly_list, "execution", "core_root", '--tpaframework' in optdict) xunit_execute.test_execution.copy_file_list( settings.correlation_payload_dir, file_tuples, settings.workitem_working_dir) # Custom runners put stuff here to do things before execution begins, xunit_result = xunit_execute.run_xunit(settings, test_assembly, xunit_test_type, args) # or here, to do any post-run work they want to. return xunit_result return command_main(_main, [ 'dll=', 'tpaframework', 'perf-runner=', 'assemblylist=', 'xunit-test-type=' ], args)
def main(args=None): def _main(settings, optlist, _): """ Usage:: csv_to_json.py --csvDir dir where csvs can be found """ opts = dict(optlist) return post_process_perf_results(opts) return command_main(_main, ['csvDir='], args)
def main(args=None): def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] [--tpaframework] [--assemblylist assemblylist.txt] [--xunit-test-type type] --dll Test.dll """ optdict = dict(optlist) log.info("BuildTools Functional Helix Runner v0.1 starting") if '--assemblylist' in optdict: assembly_list = optdict['--assemblylist'] log.info("Using assemblylist parameter:"+assembly_list) else: assembly_list = os.getenv('HELIX_ASSEMBLY_LIST') log.info("Using assemblylist environment variable:"+assembly_list) test_assembly = optdict['--dll'] xunit_test_type = XUnitExecution.XUNIT_CONFIG_NETCORE if '--xunit-test-type' in optdict: xunit_test_type = optdict['--xunit-test-type'] if xunit_test_type == XUnitExecution.XUNIT_CONFIG_DESKTOP and os.name != 'nt': raise Exception("Error: Cannot run desktop .NET Framework XUnit on non windows platforms") # Currently this will automatically create the default "execution" folder and move contents of the # work item to it. If needed, we could separate this out. xunit_execute = XUnitExecution(settings) # This runner handles the "original" AssemblyList format by contructing tuples to copy and then # calling HelixTestExecution.copy_file_list. Once assembly list formatting is changed in the # source that this runner lives with, can convert to simply calling HelixTestExecution.copy_package_files # Eventually --tpaframework ought to be removed entirely from the runner, but leaving it in here # in case it's used. If specified, we copy framework assemblies into CORE_ROOT to get different trust file_tuples = _create_package_file_list(assembly_list, "execution", "core_root", '--tpaframework' in optdict) xunit_execute.test_execution.copy_file_list(settings.correlation_payload_dir, file_tuples, settings.workitem_working_dir) # Custom runners put stuff here to do things before execution begins, xunit_result = xunit_execute.run_xunit(settings, test_assembly, xunit_test_type, args) # or here, to do any post-run work they want to. return xunit_result return command_main(_main, ['dll=', 'tpaframework', 'perf-runner=', 'assemblylist=', 'xunit-test-type='], args)
def main(args=None): def _main(settings, optlist, _): """ Usage:: csv_to_json.py --csvDir dir where csvs can be found --jsonFile json file path --jobName "sample job" --perfSettingsJson json file containing perf-specific settings """ opts = dict(optlist) return run_json_conversion(opts) return command_main(_main, ['csvDir=', 'jsonFile=', 'jobName=', 'perfSettingsJson='], args)
def main(args=None): def _main(settings, optlist, _): """ Usage:: csv_to_json.py --csvDir dir where csvs can be found --jsonFile json file path --jobName "sample job" --perfSettingsJson json file containing perf-specific settings """ opts = dict(optlist) return run_json_conversion(opts) return command_main( _main, ['csvDir=', 'jsonFile=', 'jobName=', 'perfSettingsJson='], args)
def main(args=None): def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] --dll Test.dll """ optdict = dict(optlist) # check if a perf runner has been specified perf_runner = None if '--perf-runner' in optdict: perf_runner = optdict['--perf-runner'] return run_tests(settings, optdict['--dll'], '--tpaframework' in optdict, perf_runner, args) return command_main(_main, ['dll=', 'tpaframework', 'perf-runner='], args)
def main(args=None): def _main(settings, optlist, _): """ Usage:: csv_to_json.py --csvFile csvFile to convert to json "MyLoc\\csvfile.csv" --jsonFile output json file location "MyLoc\\jsonfile.json" --jobName "sample job" --jobDescription sample job description --configName "sample config" --jobGroupName sample job group --jobTypeName "Private" --username "sample user" --userAlias "sampleuser" --branch "ProjectK" --buildInfoName "sample build" --buildNumber "1234" --machinepoolName "HP Z210 Workstation" --machinepoolDescription "Intel64 Family 6 Model 42 Stepping 7" --architectureName "AMD64" --manufacturerName "Intel" --microarchName "SSE2" --numberOfCores "4" --numberOfLogicalProcessors "8" --totalPhysicalMemory "16342" --osInfoName "Microsoft Windows 8.1 Pro" --osVersion "6.3.9600" --machineName "PCNAME" --machineDescription "Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz" """ opts = dict(optlist) return run_json_conversion(opts) return command_main(_main, [ 'csvFile=', 'jsonFile=', 'jobName=', 'jobDescription=', 'configName=', 'jobGroupName=', 'jobTypeName=', 'username='******'userAlias=', 'branch=', 'buildInfoName=', 'buildNumber=', 'machinepoolName=', 'machinepoolDescription=', 'architectureName=', 'manufacturerName=', 'microarchName=', 'numberOfCores=', 'numberOfLogicalProcessors=', 'totalPhysicalMemory=', 'osInfoName=', 'osVersion=', 'machineName=', 'machineDescription=' ], args)
def main(args=None): def _main(settings, optlist, _): """ Usage:: csv_to_json.py --csvFile csvFile to convert to json "MyLoc\\csvfile.csv" --jsonFile output json file location "MyLoc\\jsonfile.json" --jobName "sample job" --jobDescription sample job description --configName "sample config" --jobGroupName sample job group --jobTypeName "Private" --username "sample user" --userAlias "sampleuser" --branch "ProjectK" --buildInfoName "sample build" --buildNumber "1234" --machinepoolName "HP Z210 Workstation" --machinepoolDescription "Intel64 Family 6 Model 42 Stepping 7" --architectureName "AMD64" --manufacturerName "Intel" --microarchName "SSE2" --numberOfCores "4" --numberOfLogicalProcessors "8" --totalPhysicalMemory "16342" --osInfoName "Microsoft Windows 8.1 Pro" --osVersion "6.3.9600" --machineName "PCNAME" --machineDescription "Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz" """ opts = dict(optlist) return run_json_conversion(opts) return command_main(_main, ['csvFile=', 'jsonFile=', 'jobName=', 'jobDescription=', 'configName=', 'jobGroupName=', 'jobTypeName=', 'username='******'userAlias=', 'branch=', 'buildInfoName=', 'buildNumber=', 'machinepoolName=', 'machinepoolDescription=', 'architectureName=', 'manufacturerName=' , 'microarchName=', 'numberOfCores=', 'numberOfLogicalProcessors=', 'totalPhysicalMemory=', 'osInfoName=', 'osVersion=', 'machineName=', 'machineDescription='], args)
def main(args=None): def _main(settings, optlist, args): """ Usage:: continuationrunner [--config config.json] [--setting name=value] --script [--args arg1 arg2...] """ optdict = dict(optlist) log.info("BuildTools Helix Continuation Runner starting") if '--args' in optdict: script_arguments = optdict['--args'] log.info("Script Arguments: " + script_arguments) if '--script' in optdict: script_to_execute = optdict['--script'] else: log.error("Value for parameter '--script' is required") return -1 if '--next_queue' in optdict: next_queue = optdict['--next_queue'] else: log.error("Need a secondary queue id to continue execution.") return -1 if '--next_payload_dir' in optdict: next_payload_dir = optdict['--next_payload_dir'] else: log.error("Need a secondary payload to continue execution.") return -1 unpack_dir = fix_path(settings.workitem_payload_dir) execution_args = [os.path.join(unpack_dir, script_to_execute)] + args return_code = helix.proc.run_and_log_output( execution_args, cwd=unpack_dir, env=None ) if return_code == 0: # currently there's no use for it, but here's where we'd choose to send out XUnit results # if desired at some point. log.info("First stage of execution succeded. Sending a new work item to " + next_queue) log.info("Will include contents of " + next_payload_dir) settings = settings_from_env() # load Client-specific settings config_path = os.path.join(settings.config_root, "ClientSettings.json") settings.__dict__.update(json.load(open(config_path))) service_bus_repository = ServiceBusRepository(settings.ServiceBusRoot, settings.QueueId, settings.LongPollTimeout, settings.SAS, settings.servicebus_retry_count, settings.servicebus_retry_delay ) # For now, we'll use ScriptRunner for this step. Eventually we'll want to either combine functionality # of the two into scriptrunner.py, OR parameterize which script is used (for the 2+ re-queue scenario) call_runcontinuation = "/RunnerScripts/scriptrunner/scriptrunner.py --script RunContinuation" if is_windows(): continuation_command = "%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%" + call_runcontinuation + ".cmd" else: continuation_command = "$HELIX_PYTHONPATH% $HELIX_CORRELATION_PAYLOAD" + call_runcontinuation + ".sh" # Prep the follow-up work item ... new_work_item = HelixWorkItem( correlation_id=settings.correlation_id, work_item_friendly_name=settings.workitem_friendly_name + ".Execution", command=continuation_command, results_output_uri=settings.output_uri + "/continuation", results_output_write_token=settings.output_write_token, results_output_read_token=settings.output_read_token) # This may eventually cause trouble if zips with identical names are somehow included inside # other payload zips. Chained continuation will be OK as there will be a new results # directory to upload to for each leg. new_workitem_payload_name = settings.workitem_friendly_name + ".continuation.zip" secondary_zip_path = os.path.join(settings.workitem_working_dir, new_workitem_payload_name) zip_directory(secondary_zip_path, next_payload_dir) log.info("Zipped into " + secondary_zip_path) # Upload the payloads for the job upload_client = helix.azure_storage.BlobUploadClient(settings.output_uri, settings.output_write_token, settings.output_read_token) new_payload_uri = upload_client.upload(secondary_zip_path, new_workitem_payload_name) new_work_item.WorkItemPayloadUris.append(new_payload_uri) # Current assumption: No need to reuse correlation payload, but bring supplemental (for scripts) # NOTE: We don't currently have a way to access the existing Uri, so reusing the payload from # storage will involve plumbing that through or re-uploading it (can be huge) supplemental_payload_path = os.path.join(settings.work_root, settings.correlation_id, "work", "SupplementalPayload.zip") supplemental_payload_uri = upload_client.upload(supplemental_payload_path, "SupplementalPayload.zip") log.info("Uploaded " + secondary_zip_path + " to " + new_payload_uri) log.info("Uploaded SupplementalPayload.zip to " + supplemental_payload_uri) new_work_item.CorrelationPayloadUris.append(supplemental_payload_uri) if service_bus_repository.post_new_workitem(queue_id=next_queue, work_item=new_work_item): log.info("Successfully queued new work item.") else: log.error("Failure to send to Service bus.") return -1 else: log.error("Got non-zero exit code for first stage of execution. Skipping further processing.") return return_code return command_main(_main, ['script=', 'args=', 'next_queue=', 'next_payload_dir='], args)
def main(args=None): def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] --script=path [args] """ optdict = dict(optlist) log.info("BuildTools Helix Script Runner v0.1 starting") if '--args' in optdict: script_arguments = optdict['--args'] log.info("Script Arguments:" + script_arguments) script_to_execute = optdict['--script'] unpack_dir = fix_path(settings.workitem_payload_dir) execution_args = [os.path.join(unpack_dir, script_to_execute)] + args test_executor = HelixTestExecution(settings) return_code = helix.proc.run_and_log_output(execution_args, cwd=unpack_dir, env=None) results_location = os.path.join(unpack_dir, 'testResults.xml') # In case testResults.xml was put somewhere else, try to find it anywhere in this directory before failing if not os.path.exists(results_location): for root, dirs, files in os.walk(settings.workitem_working_dir): for file_name in files: if file_name == 'testResults.xml': results_location = os.path.join(root, file_name) if os.path.exists(results_location): log.info("Uploading results from {}".format(results_location)) with file(results_location) as result_file: test_count = 0 for line in result_file: if '<assembly ' in line: total_expression = re.compile(r'total="(\d+)"') match = total_expression.search(line) if match is not None: test_count = int(match.groups()[0]) break if settings.output_uri is not None: result_url = test_executor.upload_file_to_storage( results_location, settings) else: result_url = None if (settings.event_uri is not None): event_client = helix.event.create_from_uri(settings.event_uri) log.info("Sending completion event") event_client.send({ 'Type': 'XUnitTestResult', 'WorkItemId': settings.workitem_id, 'WorkItemFriendlyName': settings.workitem_friendly_name, 'CorrelationId': settings.correlation_id, 'ResultsXmlUri': result_url, 'TestCount': test_count, }) else: log.error( "Error: No exception thrown, but XUnit results not created") if settings.output_uri is not None: test_executor.report_error(settings, failure_type="XUnitTestFailure") return return_code return command_main(_main, ['script=', 'args='], args)
def main(args=None): def _main(settings, optlist, args): """ Usage:: xunitrunner [--config config.json] [--setting name=value] --script [--args arg1 arg2...] """ optdict = dict(optlist) log.info("BuildTools Helix Script Runner v0.1 starting") if '--args' in optdict: script_arguments = optdict['--args'] log.info("Script Arguments:"+script_arguments) script_to_execute = optdict['--script'] test_executor = HelixTestExecution(settings) unpack_dir = fix_path(settings.workitem_payload_dir) execution_args = [os.path.join(unpack_dir, script_to_execute)] + args return_code = helix.proc.run_and_log_output( execution_args, cwd=unpack_dir, env=None ) event_client = helix.event.create_from_uri(settings.event_uri) results_location = os.path.join(unpack_dir, 'testResults.xml') # In case testResults.xml was put somewhere else, try to find it anywhere in this directory before failing if not os.path.exists(results_location): for root, dirs, files in os.walk(settings.workitem_working_dir): for file_name in files: if file_name == 'testResults.xml': results_location = os.path.join(root, file_name) if os.path.exists(results_location): log.info("Uploading results from {}".format(results_location)) with file(results_location) as result_file: test_count = 0 for line in result_file: if '<assembly ' in line: total_expression = re.compile(r'total="(\d+)"') match = total_expression.search(line) if match is not None: test_count = int(match.groups()[0]) break result_url = test_executor.upload_file_to_storage(results_location, settings) log.info("Sending completion event") event_client.send( { 'Type': 'XUnitTestResult', 'WorkItemId': settings.workitem_id, 'WorkItemFriendlyName': settings.workitem_friendly_name, 'CorrelationId': settings.correlation_id, 'ResultsXmlUri': result_url, 'TestCount': test_count, } ) else: log.error("Error: No exception thrown, but XUnit results not created") test_executor.report_error(settings, failure_type="XUnitTestFailure") return return_code return command_main(_main, ['script=', 'args='], args)
def main(args=None): def _main(settings, optlist, args): """ Usage:: continuationrunner [--config config.json] [--setting name=value] --script [--args arg1 arg2...] """ optdict = dict(optlist) log.info("BuildTools Helix Continuation Runner starting") if '--args' in optdict: script_arguments = optdict['--args'] log.info("Script Arguments: " + script_arguments) if '--script' in optdict: script_to_execute = optdict['--script'] else: log.error("Value for parameter '--script' is required") return -1 if '--next_queue' in optdict: next_queue = optdict['--next_queue'] else: log.error("Need a secondary queue id to continue execution.") return -1 if '--next_payload_dir' in optdict: next_payload_dir = optdict['--next_payload_dir'] else: log.error("Need a secondary payload to continue execution.") return -1 unpack_dir = fix_path(settings.workitem_payload_dir) execution_args = [os.path.join(unpack_dir, script_to_execute)] + args return_code = helix.proc.run_and_log_output(execution_args, cwd=unpack_dir, env=None) if return_code == 0: # currently there's no use for it, but here's where we'd choose to send out XUnit results # if desired at some point. log.info( "First stage of execution succeded. Sending a new work item to " + next_queue) log.info("Will include contents of " + next_payload_dir) settings = settings_from_env() # load Client-specific settings config_path = os.path.join(settings.config_root, "ClientSettings.json") settings.__dict__.update(json.load(open(config_path))) service_bus_repository = ServiceBusRepository( settings.ServiceBusRoot, settings.QueueId, settings.LongPollTimeout, settings.SAS, settings.servicebus_retry_count, settings.servicebus_retry_delay) # For now, we'll use ScriptRunner for this step. Eventually we'll want to either combine functionality # of the two into scriptrunner.py, OR parameterize which script is used (for the 2+ re-queue scenario) call_runcontinuation = "/RunnerScripts/scriptrunner/scriptrunner.py --script RunContinuation" if is_windows(): continuation_command = "%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%" + call_runcontinuation + ".cmd" else: continuation_command = "$HELIX_PYTHONPATH% $HELIX_CORRELATION_PAYLOAD" + call_runcontinuation + ".sh" # Prep the follow-up work item ... new_work_item = HelixWorkItem( correlation_id=settings.correlation_id, work_item_friendly_name=settings.workitem_friendly_name + ".Execution", command=continuation_command, results_output_uri=settings.output_uri + "/continuation", results_output_write_token=settings.output_write_token, results_output_read_token=settings.output_read_token) # This may eventually cause trouble if zips with identical names are somehow included inside # other payload zips. Chained continuation will be OK as there will be a new results # directory to upload to for each leg. new_workitem_payload_name = settings.workitem_friendly_name + ".continuation.zip" secondary_zip_path = os.path.join(settings.workitem_working_dir, new_workitem_payload_name) zip_directory(secondary_zip_path, next_payload_dir) log.info("Zipped into " + secondary_zip_path) # Upload the payloads for the job upload_client = helix.azure_storage.BlobUploadClient( settings.output_uri, settings.output_write_token, settings.output_read_token) new_payload_uri = upload_client.upload(secondary_zip_path, new_workitem_payload_name) new_work_item.WorkItemPayloadUris.append(new_payload_uri) # Current assumption: No need to reuse correlation payload, but bring supplemental (for scripts) # NOTE: We don't currently have a way to access the existing Uri, so reusing the payload from # storage will involve plumbing that through or re-uploading it (can be huge) supplemental_payload_path = os.path.join( settings.work_root, settings.correlation_id, "work", "SupplementalPayload.zip") supplemental_payload_uri = upload_client.upload( supplemental_payload_path, "SupplementalPayload.zip") log.info("Uploaded " + secondary_zip_path + " to " + new_payload_uri) log.info("Uploaded SupplementalPayload.zip to " + supplemental_payload_uri) new_work_item.CorrelationPayloadUris.append( supplemental_payload_uri) if service_bus_repository.post_new_workitem( queue_id=next_queue, work_item=new_work_item): log.info("Successfully queued new work item.") else: log.error("Failure to send to Service bus.") return -1 else: log.error( "Got non-zero exit code for first stage of execution. Skipping further processing." ) return return_code return command_main( _main, ['script=', 'args=', 'next_queue=', 'next_payload_dir='], args)