def _check_for_driver_crash_or_unresponsiveness(self, error_line): crashed_check = error_line.rstrip(b'\r\n') if crashed_check == b"#CRASHED": self._crashed_process_name = self._server_process.process_name() self._crashed_pid = self._server_process.system_pid() return True elif error_line.startswith(b"#CRASHED - "): match = re.match(b'#CRASHED - (\S+)', error_line) self._crashed_process_name = decode_for( match.group(1), str) if match else 'WebProcess' match = re.search(b'pid (\d+)', error_line) self._crashed_pid = int(match.group(1)) if match else None _log.debug('%s crash, pid = %s' % (self._crashed_process_name, str(self._crashed_pid))) return True elif error_line.startswith(b"#PROCESS UNRESPONSIVE - "): match = re.match(b'#PROCESS UNRESPONSIVE - (\S+)', error_line) child_process_name = decode_for(match.group(1), str) if match else 'WebProcess' match = re.search(b'pid (\d+)', error_line) child_process_pid = int(match.group(1)) if match else None _log.debug('%s is unresponsive, pid = %s' % (child_process_name, str(child_process_pid))) self._driver_timed_out = True if child_process_pid: self._port.sample_process(child_process_name, child_process_pid, self._target_host) self.error_from_test += decode_for(error_line, str) self._server_process.write( '#SAMPLE FINISHED\n', True ) # Must be able to ignore a broken pipe here, target process may already be closed. return True return self.has_crashed()
def _parse_last_build_cell(self, builder, cell): status_link = cell.find('a') if status_link: # Will be either a revision number or a build number revision_string = status_link.string # If revision_string has non-digits assume it's not a revision number. builder['built_revision'] = int(revision_string) if not re.match( r'\D', revision_string) else None # FIXME: We treat slave lost as green even though it is not to # work around the Qts bot being on a broken internet connection. # The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099 builder['is_green'] = not re.search('fail', decode_for(cell.renderContents(), str)) or \ bool(re.search('lost', decode_for(cell.renderContents(), str))) status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<build_number>\d+)" link_match = re.match(status_link_regexp, status_link['href']) builder['build_number'] = int(link_match.group("build_number")) else: # We failed to find a link in the first cell, just give up. This # can happen if a builder is just-added, the first cell will just # be "no build" # Other parts of the code depend on is_green being present. builder['is_green'] = False builder['built_revision'] = None builder['build_number'] = None
def _parse_child_processes_output(output): child_processes = defaultdict(list) for line in output.splitlines(): m = re.match(b'^([^:]+): ([0-9]+)$', line) if m: process_name = decode_for(m.group(1), str) process_id = decode_for(m.group(2), str) child_processes[process_name].append(process_id) return child_processes
def is_invalid_bugzilla_email(self, search_string): review_queue_url = "request.cgi?action=queue&requester=%s&product=&type=review&requestee=&component=&group=requestee" % urllib.quote( search_string) results_page = self._load_query(review_queue_url) return bool( re.search('did not match anything', decode_for(results_page.read(), str)))
def check(self, patch_string, fs=None): """Check style in the given patch.""" fs = fs or FileSystem() patch_string = decode_for(patch_string, str) patch_files = DiffParser(patch_string.splitlines()).files # If the user uses git, checking subversion config file only once is enough. call_only_once = True for path, diff_file in patch_files.items(): line_numbers = diff_file.added_or_modified_line_numbers() _log.debug('Found %s new or modified lines in: %s' % (len(line_numbers), path)) if not line_numbers: match = re.search("\s*png$", path) if match and fs.exists(path): if call_only_once: self._text_file_reader.process_file(file_path=path, line_numbers=None) cwd = FileSystem().getcwd() detection = SCMDetector( fs, Executive()).detect_scm_system(cwd) if detection.display_name() == "git": call_only_once = False continue # Don't check files which contain only deleted lines # as they can never add style errors. However, mark them as # processed so that we count up number of such files. self._text_file_reader.delete_file(path) continue self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers) self._text_file_reader.do_association_check(fs.getcwd())
def write_test_output(self, test_input, output, is_reftest): if output.audio: self._stdout.write('Content-Type: audio/wav\n') self._stdout.write('Content-Transfer-Encoding: base64\n') self._stdout.write(base64.b64encode(output.audio)) else: self._stdout.write('Content-Type: text/plain\n') # FIXME: Note that we don't ensure there is a trailing newline! # This mirrors actual (Mac) DRT behavior but is a bug. if output.text: self._stdout.write(output.text) self._stdout.write('#EOF\n') if self._options.pixel_tests and output.image_hash: self._stdout.write('\n') self._stdout.write('ActualHash: %s\n' % output.image_hash) self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash) if output.image_hash != test_input.image_hash: self._stdout.write('Content-Type: image/png\n') self._stdout.write('Content-Length: %s\n' % len(output.image)) self._stdout.write(decode_for(output.image, str)) self._stdout.write('#EOF\n') self._stdout.flush() self._stderr.write('#EOF\n') self._stderr.flush()
def _parse_attachment_id_from_add_patch_to_bug_response(response_html): response_html = decode_for(response_html, str) match = re.search('<title>Attachment (?P<attachment_id>\d+) added to Bug \d+</title>', response_html) if match: return match.group('attachment_id') _log.warning('Unable to parse attachment id') return None
def diff_image(self, expected_contents, actual_contents, tolerance=None): expected_contents = encode_if_necessary(expected_contents) actual_contents = encode_if_necessary(actual_contents) diffed = actual_contents != expected_contents if not actual_contents and not expected_contents: return (None, 0, None) if not actual_contents or not expected_contents: return (True, 0, None) if b'ref' in expected_contents: assert tolerance == 0 if diffed: return ("< {}\n---\n> {}\n".format( decode_for(expected_contents, str), decode_for(actual_contents, str), ), 1, None) return (None, 0, None)
def __init__(self, text, image, image_hash, audio, crash=False, test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??', crashed_pid=None, crash_log=None, pid=None): # FIXME: Args could be renamed to better clarify what they do. self.text = decode_for(text, str) if text else None self.image = image # May be empty-string if the test crashes. self.image_hash = image_hash self.image_diff = None # image_diff gets filled in after construction. self.audio = encode_if_necessary( audio) if audio else None # Binary format is port-dependent. self.crash = crash self.crashed_process_name = crashed_process_name self.crashed_pid = crashed_pid self.crash_log = crash_log self.test_time = test_time self.measurements = measurements self.timeout = timeout self.error = error # stderr output self.pid = pid
def running_pids(self, process_name_filter=None): if self._is_native_win: # FIXME: running_pids isn't implemented on native Windows yet... return [] if not process_name_filter: process_name_filter = lambda process_name: True running_pids = [] if self._is_cygwin: ps_process = self.run_command(['ps', '-e'], ignore_errors=True) for line in ps_process.splitlines(): tokens = line.strip().split() try: pid, ppid, pgid, winpid, tty, uid, stime, process_name = tokens if process_name_filter(process_name): running_pids.append(int(pid)) self.pid_to_system_pid[int(pid)] = int(winpid) except ValueError as e: pass else: with self.popen(['ps', '-eo', 'pid,comm'], stdout=self.PIPE, stderr=self.PIPE) as ps_process: stdout, _ = ps_process.communicate() for line in stdout.splitlines(): try: # In some cases the line can contain one or more # leading white-spaces, so strip it before split. pid, process_name = line.strip().split(b' ', 1) if process_name_filter(unicode_compatibility.decode_for(process_name, str)): running_pids.append(int(pid)) except ValueError as e: pass return sorted(running_pids)
def authenticate(self): if self.authenticated: return credentials = Credentials(config_urls.bug_server_host, git_prefix="bugzilla") attempts = 0 while not self.authenticated: attempts += 1 username, password = credentials.read_credentials(use_stored_credentials=attempts == 1) _log.info("Logging in as %s..." % username) self.open_url(config_urls.bug_server_url + "index.cgi?GoAheadAndLogIn=1") self.browser.select_form(name="login") self.browser['Bugzilla_login'] = username self.browser['Bugzilla_password'] = password self.browser.find_control("Bugzilla_restrictlogin").items[0].selected = False response = self.browser.submit() match = re.search(b'<title>(.+?)</title>', response.read()) # If the resulting page has a title, and it contains the word # "invalid" assume it's the login failure page. if match and re.search(b'Invalid', match.group(1), re.IGNORECASE): errorMessage = "Bugzilla login failed: {}".format(decode_for(match.group(1), str)) # raise an exception only if this was the last attempt if attempts < 5: _log.error(errorMessage) else: raise Exception(errorMessage) else: self.authenticated = True self.username = username
def decode_content(self): if self.encoding == 'base64' and self.content is not None: self.decoded_content = base64.b64decode(self.content) else: try: self.decoded_content = decode_for(self.content, str) except UnicodeDecodeError: self.decoded_content = None
def _read(self): deadline = time.time() + 2.0 output = None output_image = b'' while not self._process.timed_out and not self._process.has_crashed(): output = self._process.read_stdout_line(deadline) if self._process.timed_out or self._process.has_crashed( ) or not output: break if output.startswith( b'diff'): # This is the last line ImageDiff prints. break if output.startswith(b'Content-Length'): m = re.match(b'Content-Length: (\d+)', output) content_length = int(decode_for(m.group(1), str)) output_image = self._process.read_stdout( deadline, content_length) output = self._process.read_stdout_line(deadline) break stderr = decode_for(self._process.pop_all_buffered_stderr(), str) err_str = '' if stderr: err_str += "ImageDiff produced stderr output:\n" + stderr if self._process.timed_out: err_str += "ImageDiff timed out\n" if self._process.has_crashed(): err_str += "ImageDiff crashed\n" diff_percent = 0 if output and output.startswith(b'diff'): m = re.match(b'diff: (.+)% (passed|failed)', output) if m.group(2) == b'passed': return (None, 0, None) diff_percent = float(decode_for(m.group(1), str)) return (output_image, diff_percent, err_str or None)
def _parse_leaks_output(self, leaks_output): try: parsed_string = parseString(leaks_output) except ExpatError as e: parse_failed = True _log.error("could not parse %s: %s" % (decode_for(leaks_output, str), e)) return cur_report_errors = set() commandline = None preamble = parsed_string.getElementsByTagName("preamble")[0] for node in preamble.getElementsByTagName("line"): if node.localName == "line": for x in node.childNodes: if x.nodeType == node.TEXT_NODE and "Command" in x.data: commandline = x.data break raw_errors = parsed_string.getElementsByTagName("error") for raw_error in raw_errors: # Ignore "possible" leaks and InvalidRead/Write by default. if (get_text_of(raw_error, "kind") != "Leak_PossiblyLost") and \ (get_text_of(raw_error, "kind") != "Leak_StillReachable") and \ (get_text_of(raw_error, "kind") != "InvalidWrite") and \ (get_text_of(raw_error, "kind") != "InvalidRead"): error = ValgrindError(self._executive, self._source_dir, raw_error) if error not in cur_report_errors: # We haven't seen such errors doing this report yet... if error in self._errors: # ... but we saw it in earlier reports, e.g. previous UI test cur_report_errors.add("This error was already printed in " "some other test, see 'hash=#%016X#'" % \ error.error_hash()) else: # ... and we haven't seen it in other tests as well self._errors.add(error) cur_report_errors.add(error) suppcountlist = parsed_string.getElementsByTagName("suppcounts") if len(suppcountlist) > 0: suppcountlist = suppcountlist[0] for node in suppcountlist.getElementsByTagName("pair"): count = get_text_of(node, "count") name = get_text_of(node, "name") self._suppressions[name] += int(count) return cur_report_errors
def _read_header(self, block, line, header_text, header_attr, header_filter=None): if line.startswith(header_text) and getattr(block, header_attr) is None: value = decode_for(line.split()[1], str) if header_filter: value = header_filter(value) setattr(block, header_attr, value) return True return False
def _check_create_bug_response(self, response_html): response_html = decode_for(response_html, str) match = re.search('<title>Bug (?P<bug_id>\d+) Submitted[^<]*</title>', response_html) if match: return match.group('bug_id') match = re.search( '<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">', response_html, re.DOTALL) error_message = "FAIL" if match: text_lines = BeautifulSoup(match.group('error_message')).findAll(text=True) error_message = "\n" + '\n'.join([" " + line.strip() for line in text_lines if line.strip()]) raise Exception("Bug not created: {}".format(error_message))
def _run_command_with_teed_output(self, args, teed_output, **kwargs): child_process = self.popen(args, stdout=self.PIPE, stderr=self.STDOUT, close_fds=self._should_close_fds(), **kwargs) with child_process: # Use our own custom wait loop because Popen ignores a tee'd # stderr/stdout. # FIXME: This could be improved not to flatten output to stdout. while child_process.poll() is None: output_line = child_process.stdout.readline() teed_output.write(unicode_compatibility.decode_for(output_line, str)) return child_process.poll()
def _parse_current_build_cell(self, builder, cell): # Convert rendered contents to native string rendered = decode_for(cell.renderContents(), str) # BeautifulSoup and bs4 render differently if '<br/>' in rendered: activity_lines = rendered.split('<br/>') else: activity_lines = rendered.split('<br />') builder["activity"] = activity_lines[ 0] # normally "building" or "idle" # The middle lines document how long left for any current builds. match = re.match("(?P<pending_builds>\d) pending", activity_lines[-1]) builder["pending_builds"] = int( match.group("pending_builds")) if match else 0
def _check_for_driver_timeout(self, out_line): if out_line.startswith(b"#PID UNRESPONSIVE - "): match = re.match(b'#PID UNRESPONSIVE - (\S+)', out_line) child_process_name = decode_for(match.group(1), str) if match else 'WebProcess' match = re.search(b'pid (\d+)', out_line) child_process_pid = int(match.group(1)) if match else None err_line = 'Wait on notifyDone timed out, process ' + child_process_name + ' pid = ' + str( child_process_pid) self.error_from_test += err_line _log.debug(err_line) if self._port.get_option("sample_on_timeout"): self._port.sample_process(child_process_name, child_process_pid, self._target_host) if out_line == "FAIL: Timed out waiting for notifyDone to be called\n": self._driver_timed_out = True
def write_git_patch_file(self): _, patch_file = self._filesystem.open_binary_tempfile( 'wpt_export_patch') patch_data = self._wpt_patch if b'diff' not in patch_data: _log.info('No changes to upstream, patch data is: "{}"'.format( decode_for(patch_data, str))) return b'' # FIXME: We can probably try to use --relative git parameter to not do that replacement. patch_data = patch_data.replace( encode_if_necessary(WEBKIT_WPT_DIR) + b'/', b'') # FIXME: Support stripping of <!-- webkit-test-runner --> comments. self.has_webkit_test_runner_specific_changes = b'webkit-test-runner' in patch_data if self.has_webkit_test_runner_specific_changes: _log.warning( "Patch contains webkit-test-runner specific changes, please remove them before creating a PR" ) return b'' self._filesystem.write_binary_file(patch_file, patch_data) return patch_file
def _handle_error(self, driver_output, reference_filename=None): """Returns test failures if some unusual errors happen in driver's run. Args: driver_output: The output from the driver. reference_filename: The full path to the reference file which produced the driver_output. This arg is optional and should be used only in reftests until we have a better way to know which html file is used for producing the driver_output. """ failures = [] fs = self._filesystem if driver_output.timeout: failures.append( test_failures.FailureTimeout(bool(reference_filename))) if reference_filename: testname = self._port.relative_test_filename(reference_filename) else: testname = self._test_name if driver_output.crash: failures.append( test_failures.FailureCrash(bool(reference_filename), driver_output.crashed_process_name, driver_output.crashed_pid)) if driver_output.error: _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname)) else: _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname)) elif driver_output.error: _log.debug("%s %s output stderr lines:" % (self._worker_name, testname)) for line in driver_output.error.splitlines(): _log.debug(" {}".format(decode_for(line, str))) return failures
def run_command(self, args, cwd=None, input=None, error_handler=None, ignore_errors=False, return_exit_code=False, return_stderr=True, decode_output=False, env=None): self.calls.append(args) assert (isinstance(args, list) or isinstance(args, tuple)) if self._should_log: env_string = "" if env: for key in sorted(env.keys()): if len(env_string): env_string += ", " env_string += "'{}': '{}'".format(key, env[key]) env_string = ", env={" + env_string + "}" input_string = "" if input: input_string = ", input=%s" % decode_for(input, str) _log.info("MOCK run_command: %s, cwd=%s%s%s" % (args, cwd, env_string, input_string)) output = "MOCK output of child process" if self._should_throw_when_run.intersection(args): raise ScriptError("Exception for %s" % args, output="MOCK command output") if self._should_throw: raise ScriptError("MOCK ScriptError", output=output) return output
def test_auth_token(self): self.assertEqual( self.wpt_github.auth_token(), decode_for( base64.encodestring(encode_if_necessary('rutabaga:decafbad')), str).strip())
def _read_block(self, deadline, test_name, wait_for_stderr_eof=False): block = ContentBlock() out_seen_eof = False asan_violation_detected = False while True: if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof): break if self.err_seen_eof: out_line = self._server_process.read_stdout_line(deadline) err_line = None elif out_seen_eof: out_line = None err_line = self._server_process.read_stderr_line(deadline) else: out_line, err_line = self._server_process.read_either_stdout_or_stderr_line( deadline) # ServerProcess returns None for time outs and crashes. if out_line is None and err_line is None: break if out_line: assert not out_seen_eof out_line, out_seen_eof = self._strip_eof(out_line) if err_line: assert not self.err_seen_eof err_line, self.err_seen_eof = self._strip_eof(err_line) if out_line: self._check_for_driver_timeout(out_line) if out_line[-1] != '\n' and out_line[-1] != 10: _log.error( " %s -> Last character read from DRT stdout line was not a newline! This indicates either a NRWT or DRT bug." % test_name) content_length_before_header_check = block._content_length self._process_stdout_line(block, out_line) # FIXME: Unlike HTTP, DRT dumps the content right after printing a Content-Length header. # Don't wait until we're done with headers, just read the binary blob right now. if content_length_before_header_check != block._content_length: block.content = encode_if_necessary( self._server_process.read_stdout( deadline, block._content_length)) if err_line: if self._check_for_driver_crash_or_unresponsiveness(err_line): break elif self._check_for_address_sanitizer_violation(err_line): asan_violation_detected = True self._crash_report_from_driver = b'' # ASan report starts with a nondescript line, we only detect the second line. end_of_previous_error_line = self.error_from_test.rfind( '\n', 0, -1) if end_of_previous_error_line > 0: self.error_from_test = self.error_from_test[: end_of_previous_error_line] else: self.error_from_test = '' # Symbolication can take a very long time, give it 10 extra minutes to finish. # FIXME: This can likely be removed once <rdar://problem/18701447> is fixed. deadline += 10 * 60 * 1000 if asan_violation_detected: self._crash_report_from_driver += decode_for(err_line, str) else: self.error_from_test += decode_for(err_line, str) if asan_violation_detected and not self._crashed_process_name: self._crashed_process_name = self._server_process.process_name() self._crashed_pid = self._server_process.system_pid() block.decode_content() return block
def __init__(self, fs, path, data): super(ReadableTextFileObject, self).__init__( fs, path, unicode_compatibility.UnicodeIO( unicode_compatibility.decode_for( data, unicode_compatibility.unicode)))
def run_test(self, driver_input, stop_when_done): """Run a single test and return the results. Note that it is okay if a test times out or crashes and leaves the driver in an indeterminate state. The upper layers of the program are responsible for cleaning up and ensuring things are okay. Returns a DriverOutput object. """ start_time = time.time() self.start(driver_input.should_run_pixel_test, driver_input.args) test_begin_time = time.time() self._driver_timed_out = False self._crash_report_from_driver = None self.error_from_test = '' self.err_seen_eof = False command = self._command_from_driver_input(driver_input) # Certain timeouts are detected by the tool itself; tool detection is better, # because results contain partial output in this case. Make script timeout longer # by 5 seconds to avoid racing for which timeout is detected first. # FIXME: It's not the job of the driver to decide what the timeouts should be. # Move the additional timeout to driver_input. if self._no_timeout: deadline = test_begin_time + 60 * 60 * 24 * 7 # 7 days. Using sys.maxint causes a hang. else: deadline = test_begin_time + int(driver_input.timeout) / 1000.0 + 5 self._server_process.write(command) text, audio = self._read_first_block( deadline, driver_input.test_name) # First block is either text or audio image, actual_image_hash = self._read_optional_image_block( deadline, driver_input.test_name ) # The second (optional) block is image data. text = decode_for(text, str) crashed = self.has_crashed() timed_out = self._server_process.timed_out driver_timed_out = self._driver_timed_out pid = self._server_process.pid() if stop_when_done or crashed or timed_out: if stop_when_done and not (crashed or timed_out): self.do_post_tests_work() # We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output. # In the timeout case, we kill the hung process as well. out, err = self._server_process.stop( self._port.driver_stop_timeout() if stop_when_done else 0.0) if out: text += decode_for(out, str) if err: self.error_from_test += decode_for(err, str) self._server_process = None crash_log = None if self._crash_report_from_driver: crash_log = self._crash_report_from_driver elif crashed: self.error_from_test, crash_log = self._get_crash_log( text, self.error_from_test, newer_than=start_time) # If we don't find a crash log use a placeholder error message instead. if not crash_log: pid_str = str( self._crashed_pid) if self._crashed_pid else "unknown pid" crash_log = 'No crash log found for %s:%s.\n' % ( self._crashed_process_name, pid_str) # Print stdout and stderr to the placeholder crash log; we want as much context as possible. if self.error_from_test: crash_log += '\nstdout:\n%s\nstderr:\n%s\n' % ( text, self.error_from_test) return DriverOutput(text, image, actual_image_hash, audio, crash=crashed, test_time=time.time() - test_begin_time, measurements=self._measurements, timeout=timed_out or driver_timed_out, error=self.error_from_test, crashed_process_name=self._crashed_process_name, crashed_pid=self._crashed_pid, crash_log=crash_log, pid=pid)
def _is_ignored_file(self, filename): filename = decode_for(filename, str) for suffix in EXCLUDED_FILE_SUFFIXES: if filename.endswith(suffix): return True return False
def mock_run_cppfilt_command(args): if args[0] == 'c++filt': return valgrind_output_cppfilt_map[decode_for(args[2], str)] return ""
def auth_token(self): assert self.has_credentials() return decode_for( base64.b64encode( encode_if_necessary('{}:{}'.format(self.user, self.token))), str)
def _run_single_test(self, binary_name, test): server_process = ServerProcess( self._port, binary_name, Runner.command_for_port(self._port, [ self._port._build_path(binary_name), '--gtest_filter={}'.format(test) ]), env=self._port.environment_for_api_tests()) status = Runner.STATUS_RUNNING if test.split('.')[1].startswith( 'DISABLED_') and not self._port.get_option('force'): status = Runner.STATUS_DISABLED try: deadline = time.time() + self._timeout if status != Runner.STATUS_DISABLED: server_process.start() stdout_buffer = '' stderr_buffer = '' while status == Runner.STATUS_RUNNING: stdout_line, stderr_line = server_process.read_either_stdout_or_stderr_line( deadline) if not stderr_line and not stdout_line: break if stderr_line: stderr_line = decode_for(stderr_line, str) stderr_buffer += stderr_line self.post('log', output=stderr_line[:-1]) if stdout_line: stdout_line = decode_for(stdout_line, str) if '**PASS**' in stdout_line: status = Runner.STATUS_PASSED elif '**FAIL**' in stdout_line: status = Runner.STATUS_FAILED else: stdout_buffer += stdout_line self.post('log', output=stdout_line[:-1]) if status == Runner.STATUS_DISABLED: pass elif server_process.timed_out: status = Runner.STATUS_TIMEOUT elif server_process.has_crashed(): status = Runner.STATUS_CRASHED elif status == Runner.STATUS_RUNNING: status = Runner.STATUS_FAILED finally: remaining_stderr = decode_for( server_process.pop_all_buffered_stderr(), str) remaining_stdout = decode_for( server_process.pop_all_buffered_stdout(), str) self.post('log', output=remaining_stderr + remaining_stdout) output_buffer = stderr_buffer + stdout_buffer + remaining_stderr + remaining_stdout server_process.stop() self.post('ended_test', '{}.{}'.format(binary_name, test), status, self._filter_noisy_output(output_buffer))