def __merge_container_reports(self): """Merges the robot framework output files from each container""" print('Copying container output xml files to top level') files_to_merge = [] try: for suite in self.execution_file_json['suites']: if 'suitefile' in suite: name = suite['suitefile'].replace('.robot', '') else: name = suite['suitedirectory'] print('Copying xml file for suite: %s' % name) output_xml_path = os.path.join(self.output_path, name, ParallelRunner.ROBOT_XML.replace('SUITE', name)) destination_path = os.path.join(self.output_path, ParallelRunner.ROBOT_XML.replace('SUITE', name)) shutil.copyfile(src=output_xml_path, dst=destination_path) files_to_merge.append(destination_path) except Exception: pass print('Merging container output xml into html report') try: log_path = os.path.join(self.output_path, 'allsuites_log.html') report_path = os.path.join(self.output_path, 'allsuites_report.html') rebot(*files_to_merge, name='AllSuites', log=log_path, report=report_path) except Exception as e: print('Error merging container xml output: %s' % str(e)) raise
def cleanup(self, options): RobotTestSuite.instances_count -= 1 if RobotTestSuite.instances_count == 0: if RobotTestSuite.robot_frontend_process: os.kill(RobotTestSuite.robot_frontend_process.pid, 15) RobotTestSuite.robot_frontend_process.wait() if len(RobotTestSuite.log_files) > 0: print("Aggregating all robot results") robot.rebot(*RobotTestSuite.log_files, processemptysuite=True, name='Test Suite', outputdir=options.results_directory, output='robot_output.xml')
def test_custom_stdout_and_stderr_with_minumal_implementation(self): output = StreamWithOnlyWriteAndFlush() assert_equals(rebot(self.data, log='NONE', report='NONE', stdout=output, stderr=output), 252) assert_equals(rebot(self.data, report='NONE', stdout=output, stderr=output, outputdir=TEMP), 1) self._assert_output(output, [('[ ERROR ] No outputs created', 1), ('--help', 1), ('Log:', 1), ('Report:', 0)]) self._assert_outputs()
def _run_tests_and_process_output(robot_file): results = join(CURDIR, "results") output = join(results, "output.xml") if exists(results): rmtree(results) run(join(CURDIR, robot_file), output=output, log=None, report=None, loglevel="DEBUG") process_output(output) rebot(output, outputdir=results) return output
def parse_outputs(output_dir): """Parse output xmls from all executed tests.""" outs = [ os.path.join(output_dir, file_name) for file_name in os.listdir(output_dir) if file_name.endswith('.xml') ] # pylint: disable=star-args robot.rebot(*outs, merge=True)
def atests(*opts): if os.name == 'java': os_includes = get_os_includes(os._name) jython(*(os_includes+opts)) process_output(join(OUTPUT_JYTHON, 'output.xml')) return rebot(join(OUTPUT_JYTHON, 'output.xml'), outputdir=OUTPUT_JYTHON) else: os_includes = get_os_includes(os.name) python(*(os_includes+opts)) process_output(join(OUTPUT_PYTHON, 'output.xml')) return rebot(join(OUTPUT_PYTHON, 'output.xml'), outputdir=OUTPUT_PYTHON)
def _run_tests_and_statuschecker(test_file): curdir = dirname(abspath(__file__)) results = join(curdir, 'results') output = join(results, 'output.xml') if exists(results): rmtree(results) run(join(curdir, test_file), output=output, log=None, report=None, loglevel='DEBUG') call(['python', join(dirname(curdir), 'robotstatuschecker.py'), output]) rebot(output, outputdir=results) return output
def atests(*opts): if os.name == 'java': jython(*opts) process_output(join(OUTPUT_JYTHON, 'output.xml')) return rebot(join(OUTPUT_JYTHON, 'output.xml'), outputdir=OUTPUT_JYTHON) elif os.name == 'nt': os_includes = ('--include', 'windows') else: os_includes = ('--exclude', 'windows') python(*(os_includes+opts)) process_output(join(OUTPUT_PYTHON, 'output.xml')) return rebot(join(OUTPUT_PYTHON, 'output.xml'), outputdir=OUTPUT_PYTHON)
def _run_tests_and_process_output(robot_file): results = join(CURDIR, "results") output = join(results, "output.xml") if exists(results): rmtree(results) if RF3: run(join(CURDIR, robot_file), output=output, log=None, report=None, loglevel="DEBUG", exclude="rf3unsupported") else: run(join(CURDIR, robot_file), output=output, log=None, report=None, loglevel="DEBUG") process_output(output) rebot(output, outputdir=results) return output
def merge_report(self, start_time, end_time): path = os.path.join(self.output_dir, '*', 'output.xml') outputs = glob.glob(path) options = { "merge": True, "loglevel": "WARN", "starttime": str(start_time), "endtime": str(end_time), "outputdir": self.output_dir, "output": "output.xml" } with open(os.path.join(self.output_dir, 'merge.log'), 'w') as stdout: rebot(*outputs, stdout=stdout, **options)
def atests(*opts): if os.name == 'java': os_includes = get_os_includes(os._name) jython(*(os_includes + opts)) process_output(join(OUTPUT_JYTHON, 'output.xml')) return rebot(join(OUTPUT_JYTHON, 'output.xml'), outputdir=OUTPUT_JYTHON) else: os_includes = get_os_includes(os.name) python(*(os_includes + opts)) process_output(join(OUTPUT_PYTHON, 'output.xml')) return rebot(join(OUTPUT_PYTHON, 'output.xml'), outputdir=OUTPUT_PYTHON)
def _run_tests_and_process_output(robot_file): results = join(CURDIR, 'results') output = join(results, 'output.xml') if exists(results): rmtree(results) run(join(CURDIR, robot_file), output=output, log=None, report=None, loglevel='DEBUG') process_output(output) rebot(output, outputdir=results) return output
def atests(*opts): if os.name == 'java': jython(*opts) process_output(join(OUTPUT_JYTHON, 'output.xml')) return rebot(join(OUTPUT_JYTHON, 'output.xml'), outputdir=OUTPUT_JYTHON) elif os.name == 'nt': os_includes = ('--include', 'windows') else: os_includes = ('--exclude', 'windows') python(*(os_includes + opts)) process_output(join(OUTPUT_PYTHON, 'output.xml')) return rebot(join(OUTPUT_PYTHON, 'output.xml'), outputdir=OUTPUT_PYTHON)
def test_result_robot(request, result_id): build_qs = Build.objects.for_user(request.user) result = get_object_or_404(TestResult, id=result_id, build_flow__build__in=build_qs) if result.robot_xml: source = mkstemp()[1] log = mkstemp(".html")[1] with open(source, "w") as f: f.write(result.robot_xml) rebot(source, log=log, output=None, report=None) with open(log, "r") as f: log_html = f.read() os.remove(source) os.remove(log) return HttpResponse(log_html)
def cleanup(self, options): RobotTestSuite.instances_count -= 1 if RobotTestSuite.instances_count == 0: self._close_remote_server(RobotTestSuite.robot_frontend_process, options) if len(RobotTestSuite.log_files) > 0: print("Aggregating all robot results") robot.rebot(*RobotTestSuite.log_files, processemptysuite=True, name='Test Suite', outputdir=options.results_directory, output='robot_output.xml') if options.css_file: with open(options.css_file) as style: style_content = style.read() for report_name in ("report.html", "log.html"): with open(os.path.join(options.results_directory, report_name), "a") as report: report.write("<style media=\"all\" type=\"text/css\">") report.write(style_content) report.write("</style>")
def test_invalid_option_value(self): stderr = StringIO() assert_equal(rebot(self.data, loglevel='INFO:INV', stderr=stderr), 252) self._assert_output( stderr, [("[ ERROR ] Invalid value for option '--loglevel': " "Invalid level 'INV'.", 1)]) self._assert_outputs()
def test_rerunfailed_is_not_persistent(self): # https://github.com/robotframework/robotframework/issues/2437 data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot') self._run(data, output=OUTPUT_PATH, rc=1) self._run(data, rerunfailed=OUTPUT_PATH, rc=1) self._run(self.data, output=OUTPUT_PATH, rc=0) assert_equal(rebot(OUTPUT_PATH, log=LOG_PATH, report=None), 0)
def _report_results(outs_dir, options, start_time_string, tests_root_name): output_path = os.path.abspath(os.path.join(options.get('outputdir', '.'), options.get('output', 'output.xml'))) merge(sorted(glob(os.path.join(outs_dir, '**/*.xml'))), options, tests_root_name).save(output_path) _copy_screenshots(options) print 'Output: %s' % output_path options['output'] = None # Do not write output again with rebot return rebot(output_path, **_options_for_rebot(options, start_time_string, _now()))
def _report_results_for_one_run(outs_dir, options, start_time_string, tests_root_name): output_path = _merge_one_run(outs_dir, options, tests_root_name) _copy_screenshots(options) print('Output: %s' % output_path) options['output'] = None # Do not write output again with rebot return rebot(output_path, **_options_for_rebot(options, start_time_string, _now()))
def main(args): start_time = time.time() start_time_string = _now() #NOTE: timeout option try: options, datasources, pabot_args = _parse_args(args) outs_dir = _output_dir(options) suite_names = solve_suite_names(outs_dir, datasources, options) _parallel_execute(datasources, options, outs_dir, pabot_args, suite_names) sys.exit(rebot(*sorted(glob(os.path.join(outs_dir, '**/*.xml'))), **_options_for_rebot(options, datasources, start_time_string, _now()))) except Information, i: print """A parallel executor for Robot Framework test cases. Supports all Robot Framework command line options and also following options (these must be before normal RF options): --verbose more output --command [ACTUAL COMMANDS TO START ROBOT EXECUTOR] --end-command RF script for situations where pybot is not used directly --processes [NUMBER OF PROCESSES] How many parallel executors to use (default max of 2 and cpu count)""" print i.message
def _report_results_for_one_run(outs_dir, options, start_time_string, tests_root_name): output_path = _merge_one_run(outs_dir, options, tests_root_name) _copy_screenshots(options) print 'Output: %s' % output_path options['output'] = None # Do not write output again with rebot return rebot(output_path, **_options_for_rebot(options, start_time_string, _now()))
def test_result_robot(request, result_id): build_qs = Build.objects.for_user(request.user) result = get_object_or_404(TestResult, id=result_id, build_flow__build__in=build_qs) if result.robot_xml: # resolve linked assets into temporary S3 URLs robot_xml = ASSET_URL_RE.sub(make_asset_resolver(result), result.robot_xml) source = mkstemp()[1] log = mkstemp(".html")[1] rebot_options = {"log": log, "output": None, "report": None} if result.task: # Copy subset of robot task options that affect the log options_to_copy = ( "name", "doc", "metadata", "settag", "critical", "noncritical", "logtitle", "suitestatlevel", "tagstatinclude", "tagstatexclude", "tagstatcombine", "tagdoc", "tagstatlink", "removekeywords", "flattenkeywords", ) options = result.task.options.get("options", {}) rebot_options.update( {k: options[k] for k in options_to_copy if k in options} ) with open(source, "w") as f: f.write(robot_xml) rebot(source, **rebot_options) with open(log, "r") as f: log_html = f.read() log_html = patch_html(log_html) os.remove(source) os.remove(log) return HttpResponse(log_html) else: return HttpResponse(f"No robot_xml available in test result: {result}")
def make_report(file_dir): """ Creates a report file based on the executed usecase. @param file_dir File name of the report being generated. """ file_list = [] if args.configFile is not None: output_dir = "../testArtifacts/robot/" + get_base_filename( ) + "/" + file_dir else: output_dir = "../testArtifacts/robot/" + file_dir for t_file in os.listdir(output_dir): if fnmatch.fnmatch(t_file, '*.xml'): file_list.append(output_dir + "/" + t_file) logging.debug(file_list) rebot(*file_list, log=output_dir + "_log.html", report=output_dir + "_report.html")
def test_result_robot(request, result_id): build_qs = view_queryset(request) result = get_object_or_404( TestResult, id=result_id, build_flow__build__in=build_qs, ) if result.robot_xml: source = mkstemp()[1] log = mkstemp('.html')[1] with open(source, 'w') as f: f.write(result.robot_xml) rebot(source, log=log, output=None, report=None) with open(log, 'r') as f: log_html = f.read() os.remove(source) os.remove(log) return HttpResponse(log_html)
def test_pre_rebot_modifier_as_instance(self): class Modifier(SuiteVisitor): def __init__(self): self.tests = [] def visit_test(self, test): self.tests.append(test.name) test.status = 'FAIL' modifier = Modifier() assert_equals(rebot(self.data, outputdir=TEMP, prerebotmodifier=modifier), 3) assert_equals(modifier.tests, ['Test 1.1', 'Test 1.2', 'Test 2.1'])
def test_pre_rebot_modifier_as_instance(self): class Modifier(SuiteVisitor): def __init__(self): self.tests = [] def visit_test(self, test): self.tests.append(test.name) test.status = 'FAIL' modifier = Modifier() assert_equal(rebot(self.data, outputdir=TEMP, prerebotmodifier=modifier), 3) assert_equal(modifier.tests, ['Test 1.1', 'Test 1.2', 'Test 2.1'])
def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root_name): if pabot_args['argumentfiles']: outputs = [] for index, _ in pabot_args['argumentfiles']: outputs += [_merge_one_run(os.path.join(outs_dir, index), options, tests_root_name, outputfile=os.path.join('pabot_results', 'output%s.xml' % index))] _copy_screenshots(options) options['output'] = 'output.xml' return rebot(*outputs, **_options_for_rebot(options, start_time_string, _now())) else: return _report_results_for_one_run(outs_dir, options, start_time_string, tests_root_name)
def _upload_results(self): try: process_id = self._get_process_id() try: #TODO: merge results from different folders time.sleep(random.randint(1, 3)) self.oc_client.list(process_id+"/") self.oc_client.get_file(process_id+"/output.xml","o.xml") rebot("o.xml", self.output_file, merge=True, rpa=True, doc=f"Task results for process instance {process_id}", reporttitle=f"{process_id} Task Report", name=".", report=self.report_file, output=self.output_file, log=self.log_file) except Exception as e: if str(e) == "HTTP error: 404": self.oc_client.mkdir(process_id) pass else: raise Exception(f"Error:{e}") self.oc_client.put_file(process_id+"/log.html", self.log_file) self.oc_client.put_file(process_id+"/output.xml", self.output_file) self.oc_client.put_file(process_id+"/report.html", self.report_file) except Exception as e: logger.error(f"Could not upload results: {e}") return self.oc_client.share_file_with_link(process_id).get_link()
def start(): remove_old_report_folder() remove_old_logs() logging.basicConfig(level=logging.DEBUG) args = configure_parser().parse_args() validate_args(args) logging.info("Profiles for testing: {0}".format(args.profile)) for profile in args.profile: logging.info("Run testing for profile '{0}'".format(profile)) setup_config(profile) kwargs = get_kwargs(args, profile) os.chdir(SCENARIOS_DIR) if args.useCase and ('*' in args.useCase or '.' in args.useCase): logging.info("Running use case {0}".format(args.useCase)) run('.', **kwargs) elif args.useCase: logging.info("Running use case {0}".format(args.testCase)) run(*args.useCase, **kwargs) if args.testCase: logging.info("Running test case {0}".format(args.testCase)) run(*args.testCase, **kwargs) # Aggregate testing reports outputs = [] for profile in args.profile: output = "{}/{}.xml".format(OUTPUTDIR, profile) outputs.append(output) # print(*my_list) rebot(*outputs, name="edgex", outputdir=OUTPUTDIR, xunit=OUTPUTDIR + "/result.xml")
def acceptance_tests(args): runner = 'pybot' if os.sep == '\\': runner += '.bat' _make_results_dir() cmd = [runner] + ROBOT_ARGS + args + [testenv.TEST_DATA] print "Executing:\n" + " ".join(cmd) subprocess.call(cmd) outputxml = join(testenv.RESULTS_DIR, "output.xml") statuschecker.process_output(outputxml) rc = robot.rebot(outputxml, outputdir=testenv.RESULTS_DIR) if rc == 0: print 'All tests passed' else: print '%d test%s failed' % (rc, 's' if rc != 1 else '')
def _generate_robot_html(self): numtests = self.passed + self.failed + self.xpassed + self.xfailed + self.errors if numtests == 0: print 'No tests ran, skipping rebot' return htmlpath = self.config.option.robothtmlpath if not htmlpath: htmlpath = self.config.option.robotxmlpath.replace('.xml', '.html') print '--robothtmlpath path not specified, defaulting to %s' %htmlpath htmlfile = os.path.expanduser(os.path.expandvars(htmlpath)) htmlfile = os.path.normpath(os.path.abspath(htmlfile)) print '\n--- rebot xml from %s to %s ---' %(self.logfile, htmlfile) re = rebot(self.logfile, log=htmlfile, report=htmlfile.replace('.html', '_report.html')) if re > 251: # REVIEW don't understand how these codes work but seems like 252 means failed raise Exception('rebot failed with status %s' %re)
def main(args): start_time = time.time() start_time_string = _now() #NOTE: timeout option try: options, datasources, pabot_args = _parse_args(args) outs_dir = _output_dir(options) suite_names = solve_suite_names(outs_dir, datasources, options) _parallel_execute(datasources, options, outs_dir, pabot_args, suite_names) print "Merging test results." sys.exit(rebot(*sorted(glob(os.path.join(outs_dir, '**/*.xml'))), **_options_for_rebot(options, datasources, start_time_string, _now(), pabot_args))) except Information, i: print """A parallel executor for Robot Framework test cases. Supports all Robot Framework command line options and also following options (these must be before normal RF options): --verbose more output --command [ACTUAL COMMANDS TO START ROBOT EXECUTOR] --end-command RF script for situations where pybot is not used directly --processes [NUMBER OF PROCESSES] How many parallel executors to use (default max of 2 and cpu count) --no_load_balancing pre-compute the distribution of the suites among the workers. This option gives a reproduceable distribution but the total execution time may increase in a significant way. --randomize_suites [SEED] randomize suites execution order using an optional seed argument --resource_file [FILENAME] use FILENAME to declare resources for the workers for instance, if workers require a servername variable, FILENAME can be defined with this content: --variable servername:server1 --variable servername:server2 --variable servername:server3 --metadata [name:value] See rebot documentation for usage """ print i.message
def run_rebot(args): logging.info("Run rebot for the '{0}' folder".format(args.inputdir)) # Aggregate testing reports files = glob.glob(args.inputdir + "/*.xml") rebot(*files, name="edgex", outputdir=args.outputdir, xunit="result.xml")
def test_custom_stdout_and_stderr_with_minimal_implementation(self): output = StreamWithOnlyWriteAndFlush() assert_equals(rebot(self.data, log="NONE", report="NONE", stdout=output, stderr=output), 252) assert_equals(rebot(self.data, report="NONE", stdout=output, stderr=output, outputdir=TEMP), 1) self._assert_output(output, [("[ ERROR ] No outputs created", 1), ("--help", 1), ("Log:", 1), ("Report:", 0)]) self._assert_outputs()
def test_custom_stdout(self): stdout = StringIO() assert_equal(rebot(self.data, report='None', stdout=stdout, outputdir=TEMP), 1) self._assert_output(stdout, [('Log:', 1), ('Report:', 0)]) self._assert_outputs()
def test_run_multiple_times(self): assert_equal(rebot(self.data, outputdir=TEMP), 1) assert_equal(rebot(self.data, outputdir=TEMP, name='New Name'), 1) self._assert_outputs([(LOG, 2)])
def test_run_fails(self): assert_equal(rebot(self.nonex), 252) assert_equal(rebot(self.data, outputdir=TEMP), 1) self._assert_outputs(stdout=[(LOG, 1)], stderr=[('[ ERROR ]', 1), (self.nonex, (1, 2)), ('--help', 1)])
def test_run_multiple_times(self): assert_equals(rebot(self.data, outputdir=TEMP, critical='nomatch'), 0) assert_equals(rebot(self.data, outputdir=TEMP, name='New Name'), 1) self._assert_outputs([(LOG, 2)])
def test_run_once(self): assert_equal(rebot(self.data, outputdir=TEMP, report='NONE'), 1) self._assert_outputs([(LOG, 1), ('Report:', 0)]) assert exists(LOG_PATH)
def parse_outputs(out_dir='./'): outs = ['/'.join((out_dir, file)) for file in os.listdir(out_dir) if file.endswith('.xml')] robot.rebot(*outs, merge=True)
def test_run_once(self): assert_equals(rebot(self.data, outputdir=TEMP, report='NONE'), 1) self._assert_outputs([(LOG, 1), ('Report:', 0)]) assert exists(LOG_PATH)
def test_custom_stdout(self): stdout = StringIO() assert_equals(rebot(self.data, report='None', stdout=stdout, outputdir=TEMP), 1) self._assert_output(stdout, [('Log:', 1), ('Report:', 0)]) self._assert_outputs()
def test_run_fails(self): assert_equals(rebot(self.nonex), 252) assert_equals(rebot(self.data, outputdir=TEMP), 1) self._assert_outputs(stdout=[(LOG, 1)], stderr=[('[ ERROR ]', 1), (self.nonex, 1), ('--help', 1)])
for entry in libraries: name, _, path = entry.partition(':') name = name.rsplit('.', 1)[1] OUTPUT = join(RESULTS, 'output-' + name + '.xml') outputs.append(OUTPUT) args = [ interpreter, '-m', 'robot.run', '--name', name, '--variable', 'PATH:' + path, '--output', OUTPUT, '--log', 'NONE', '--report', 'NONE' ] if 'MinDynamic' in name: args.extend(['--exclude', 'argsknown']) if 'kwargs' in name.lower(): args.extend(['--include', 'kwargs']) else: args.extend(['--exclude', 'kwargs']) args.extend(['--loglevel', 'DEBUG']) args.extend([join(BASE, 'tests')]) print('Running tests with command:\n%s' % ' '.join(args)) subprocess.call(args) print statuschecker.process_output(OUTPUT) servercontroller.stop(8270, "/Static") rc = robot.rebot(*outputs, outputdir=RESULTS) if rc == 0: print('All tests passed') else: print('%d test%s failed' % (rc, 's' if rc != 1 else ''))
interpreters = clargs.pop(0) if ':' in interpreters: server_interpreter, runner_interpreter = interpreters.rsplit(':', 1) else: server_interpreter = runner_interpreter = interpreters if clargs and clargs[0].startswith('libraryfile='): library_file = clargs.pop(0).split('=')[1] else: library_file = 'StaticApiLibrary.py' servercontroller.start(server_interpreter, library_file) name = interpreters + '_-_' + library_file.rsplit('.', 1)[0] args = [runner_interpreter, '-m', 'robot.run', '--name', name, '--output', OUTPUT, '--log', 'NONE', '--report', 'NONE'] if 'minimal' in library_file.lower(): args.extend(['--exclude', 'argsknown']) args.extend(clargs or [join(BASE, 'tests')]) print 'Running tests with command:\n%s' % ' '.join(args) subprocess.call(args) servercontroller.stop() print statuschecker.process_output(OUTPUT) rc = robot.rebot(OUTPUT, outputdir=RESULTS) if rc == 0: print 'All tests passed' else: print '%d test%s failed' % (rc, 's' if rc != 1 else '')
def test_custom_stdout(self): stdout = StringIO() assert_equals(rebot(self.data, report="None", stdout=stdout, outputdir=TEMP), 1) self._assert_output(stdout, [("Log:", 1), ("Report:", 0)]) self._assert_outputs()
outputs = [] for entry in libraries: name, _, path = entry.partition(':') name = name.rsplit('.', 1)[1] OUTPUT = join(RESULTS, 'output-' + name + '.xml') outputs.append(OUTPUT) args = [interpreter, '-m', 'robot.run', '--name', name, '--variable', 'PATH:' + path, '--output', OUTPUT, '--log', 'NONE', '--report', 'NONE'] if 'min' in name.lower() or 'static' in name.lower(): args.extend(['--exclude', 'argsknown']) if 'kwargs' in name.lower(): args.extend(['--include', 'kwargs']) else: args.extend(['--exclude', 'kwargs']) args.extend(['--loglevel','DEBUG']) args.extend([join(BASE, 'tests')]) print 'Running tests with command:\n%s' % ' '.join(args) subprocess.call(args) print statuschecker.process_output(OUTPUT) servercontroller.stop(8270, "/Static") rc = robot.rebot(*outputs, outputdir=RESULTS) if rc == 0: print 'All tests passed' else: print '%d test%s failed' % (rc, 's' if rc != 1 else '')
mkdir(results) if not arguments: print 'Running unit tests with %s.' % interpreter rc = subprocess.call([interpreter, join(curdir, 'utest', 'run.py')]) print if rc != 0: print '%d unit test%s failed.' % (rc, 's' if rc != 1 else '') sys.exit(rc) arguments = [join(curdir, 'atest')] command = ['python', '-m', 'robot.run', '--variable', 'INTERPRETER:%s' % interpreter, '--name', '%s Remote Server' % splitext(basename(interpreter))[0].title(), '--metadata', 'Server_Interpreter:%s' % interpreter, '--noncritical', 'skip', '--output', output, '--log', 'NONE', '--report', 'NONE'] + arguments print 'Running acceptance tests with command:\n%s' % ' '.join(command) subprocess.call(command) print print 'Verifying results.' robotstatuschecker.process_output(output) rc = robot.rebot(output, outputdir=results, noncritical='skip') print if rc == 0: print 'All tests passed.' else: print '%d acceptance test%s failed.' % (rc, 's' if rc != 1 else '') sys.exit(rc)
def test_custom_stdout_and_stderr_with_minimal_implementation(self): output = StreamWithOnlyWriteAndFlush() assert_equal(rebot(self.data, log='NONE', report='NONE', stdout=output, stderr=output), 252) assert_equal(rebot(self.data, report='NONE', stdout=output, stderr=output, outputdir=TEMP), 1) self._assert_output(output, [('[ ERROR ] No outputs created', 1), ('--help', 1), ('Log:', 1), ('Report:', 0)]) self._assert_outputs()