def _check_for_driver_crash_or_unresponsiveness(self, error_line): crashed_check = error_line.rstrip(b'\r\n') if crashed_check == b"#CRASHED": self._crashed_process_name = self._server_process.process_name() self._crashed_pid = self._server_process.system_pid() return True elif error_line.startswith(b"#CRASHED - "): match = re.match(br'#CRASHED - (\S+)', error_line) self._crashed_process_name = string_utils.decode( match.group(1), target_type=str) if match else 'WebProcess' match = re.search(br'pid (\d+)', error_line) self._crashed_pid = int(match.group(1)) if match else None _log.debug('%s crash, pid = %s' % (self._crashed_process_name, str(self._crashed_pid))) return True elif error_line.startswith(b"#PROCESS UNRESPONSIVE - "): match = re.match(br'#PROCESS UNRESPONSIVE - (\S+)', error_line) child_process_name = string_utils.decode( match.group(1), target_type=str) if match else 'WebProcess' match = re.search(br'pid (\d+)', error_line) child_process_pid = int(match.group(1)) if match else None _log.debug('%s is unresponsive, pid = %s' % (child_process_name, str(child_process_pid))) self._driver_timed_out = True if child_process_pid: self._port.sample_process(child_process_name, child_process_pid, self._target_host) self.error_from_test += string_utils.decode(error_line, target_type=str) self._server_process.write( '#SAMPLE FINISHED\n', True ) # Must be able to ignore a broken pipe here, target process may already be closed. return True return self.has_crashed()
def _parse_last_build_cell(self, builder, cell): status_link = cell.find('a') if status_link: # Will be either a revision number or a build number revision_string = status_link.string # If revision_string has non-digits assume it's not a revision number. builder['built_revision'] = int(revision_string) if not re.match(r'\D', revision_string) else None # FIXME: We treat worker lost as green even though it is not to # work around the Qts bot being on a broken internet connection. # The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099 builder['is_green'] = not re.search('fail', string_utils.decode(cell.renderContents(), target_type=str)) or \ bool(re.search('lost', string_utils.decode(cell.renderContents(), target_type=str))) status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<build_number>\d+)" link_match = re.match(status_link_regexp, status_link['href']) builder['build_number'] = int(link_match.group("build_number")) else: # We failed to find a link in the first cell, just give up. This # can happen if a builder is just-added, the first cell will just # be "no build" # Other parts of the code depend on is_green being present. builder['is_green'] = False builder['built_revision'] = None builder['build_number'] = None
def poll(self): if not self._completion: self.stdin.seek(0) self._completion = Subprocess.completion_for( *self._args, cwd=self._cwd, input=self.stdin.read()) (self.stdout or sys.stdout).write( string_utils.decode(self._completion.stdout, target_type=self._stdout_type)) (self.stdout or sys.stdout).flush() (self.stderr or sys.stderr).write( string_utils.decode(self._completion.stderr, target_type=self._stderr_type)) (self.stderr or sys.stderr).flush() if self.returncode is not None and time.time( ) >= self._start_time + self._completion.elapsed: self.returncode = self._completion.returncode if self.stdout: self.stdout.seek(0) if self.stderr: self.stderr.seek(0) return self.returncode
def _parse_child_processes_output(output): child_processes = defaultdict(list) for line in output.splitlines(): m = re.match(b'^([^:]+): ([0-9]+)$', line) if m: process_name = string_utils.decode(m.group(1), target_type=str) process_id = string_utils.decode(m.group(2), target_type=str) child_processes[process_name].append(process_id) return child_processes
def _args_from_content(self, content, include_log=True): xml = xmltodict.parse(content) date = datetime.strptime( string_utils.decode(xml['S:log-item']['S:date']).split('.')[0], '%Y-%m-%dT%H:%M:%S') name = string_utils.decode(xml['S:log-item']['D:creator-displayname']) return dict( revision=int(xml['S:log-item']['D:version-name']), author=self.contributors.create(name, name) if name and '@' in name else self.contributors.create(name), timestamp=int(calendar.timegm(date.timetuple())), message=string_utils.decode(xml['S:log-item']['D:comment']) if include_log else None, )
def write_test_output(self, test_input, output, is_reftest): if output.audio: self._stdout.write('Content-Type: audio/wav\n') self._stdout.write('Content-Transfer-Encoding: base64\n') self._stdout.write(base64.b64encode(output.audio)) else: self._stdout.write('Content-Type: text/plain\n') # FIXME: Note that we don't ensure there is a trailing newline! # This mirrors actual (Mac) DRT behavior but is a bug. if output.text: self._stdout.write(output.text) self._stdout.write('#EOF\n') if self._options.pixel_tests and output.image_hash: self._stdout.write('\n') self._stdout.write('ActualHash: %s\n' % output.image_hash) self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash) if output.image_hash != test_input.image_hash: self._stdout.write('Content-Type: image/png\n') self._stdout.write('Content-Length: %s\n' % len(output.image)) self._stdout.write(string_utils.decode(output.image, target_type=str)) self._stdout.write('#EOF\n') self._stdout.flush() self._stderr.write('#EOF\n') self._stderr.flush()
def __init__(self, text, image, image_hash, audio, crash=False, test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??', crashed_pid=None, crash_log=None, pid=None): # FIXME: Args could be renamed to better clarify what they do. self.text = string_utils.decode(text, target_type=str) if text else None self.image = image # May be empty-string if the test crashes. self.image_hash = image_hash self.image_diff = None # image_diff gets filled in after construction. self.audio = string_utils.encode( audio) if audio else None # Binary format is port-dependent. self.crash = crash self.crashed_process_name = crashed_process_name self.crashed_pid = crashed_pid self.crash_log = crash_log self.test_time = test_time self.measurements = measurements self.timeout = timeout self.error = error # stderr output self.pid = pid
def _get_trace_from_flatpak(self): if self.newer_than: coredump_since = "--gdb-stack-trace=@%f" % self.newer_than else: coredump_since = "--gdb-stack-trace" webkit_flatpak_path = self._webkit_finder.path_to_script( 'webkit-flatpak') cmd = ['flatpak-spawn', '--host'] # Forward WEBKIT_FLATPAK_USER_DIR so webkit-flatpak can use the same flatpak # install as the current one. user_dir = os.environ.get('WEBKIT_FLATPAK_USER_DIR') if user_dir: cmd.append("--env=WEBKIT_FLATPAK_USER_DIR=%s" % user_dir) cmd.extend([ webkit_flatpak_path, '--%s' % self._port_name, "--%s" % self._configuration.lower(), "--verbose", coredump_since ]) proc = self._executive.popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) crash_log, stderr = proc.communicate() errors = string_utils.decode(str(stderr or '<empty>'), errors='ignore').splitlines() return crash_log, errors
def running_pids(self, process_name_filter=None): if self._is_native_win: # FIXME: running_pids isn't implemented on native Windows yet... return [] if not process_name_filter: process_name_filter = lambda process_name: True running_pids = [] if self._is_cygwin: ps_process = self.run_command(['ps', '-e'], ignore_errors=True) for line in ps_process.splitlines(): tokens = line.strip().split() try: pid, ppid, pgid, winpid, tty, uid, stime, process_name = tokens if process_name_filter(process_name): running_pids.append(int(pid)) self.pid_to_system_pid[int(pid)] = int(winpid) except ValueError: pass else: with self.popen(['ps', '-eo', 'pid,comm'], stdout=self.PIPE, stderr=self.PIPE) as ps_process: stdout, _ = ps_process.communicate() for line in stdout.splitlines(): try: # In some cases the line can contain one or more # leading white-spaces, so strip it before split. pid, process_name = line.strip().split(b' ', 1) if process_name_filter(string_utils.decode(process_name, target_type=str)): running_pids.append(int(pid)) except ValueError: pass return sorted(running_pids)
def _parse_attachment_id_from_add_patch_to_bug_response(response_html): response_html = string_utils.decode(response_html, target_type=str) match = re.search(r'<title>Attachment (?P<attachment_id>\d+) added to Bug \d+</title>', response_html) if match: return match.group('attachment_id') _log.warning('Unable to parse attachment id') return None
def authenticate(self): if self.authenticated: return credentials = Credentials(config_urls.bug_server_host, git_prefix="bugzilla") attempts = 0 while not self.authenticated: attempts += 1 username, password = credentials.read_credentials(use_stored_credentials=attempts == 1) _log.info("Logging in as %s..." % username) self.open_url(config_urls.bug_server_url + "index.cgi?GoAheadAndLogIn=1") self.browser.select_form(name="login") self.browser['Bugzilla_login'] = username self.browser['Bugzilla_password'] = password self.browser.find_control("Bugzilla_restrictlogin").items[0].selected = False response = self.browser.submit() match = re.search(b'<title>(.+?)</title>', response.read()) # If the resulting page has a title, and it contains the word # "invalid" assume it's the login failure page. if match and re.search(b'Invalid', match.group(1), re.IGNORECASE): errorMessage = "Bugzilla login failed: {}".format(string_utils.decode(match.group(1), target_type=str)) # raise an exception only if this was the last attempt if attempts < 5: _log.error(errorMessage) else: raise Exception(errorMessage) else: self.authenticated = True self.username = username
def check(self, patch_string, fs=None): """Check style in the given patch.""" fs = fs or FileSystem() patch_string = string_utils.decode(patch_string, target_type=str) patch_files = DiffParser(patch_string.splitlines()).files # If the user uses git, checking subversion config file only once is enough. call_only_once = True for path, diff_file in patch_files.items(): line_numbers = diff_file.added_or_modified_line_numbers() _log.debug('Found %s new or modified lines in: %s' % (len(line_numbers), path)) if not line_numbers: match = re.search(r"\s*png$", path) if match and fs.exists(path): if call_only_once: self._text_file_reader.process_file(file_path=path, line_numbers=None) cwd = FileSystem().getcwd() detection = SCMDetector(fs, Executive()).detect_scm_system(cwd) if detection.display_name() == "git": call_only_once = False continue # Don't check files which contain only deleted lines # as they can never add style errors. However, mark them as # processed so that we count up number of such files. self._text_file_reader.delete_file(path) continue self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers) self._text_file_reader.do_association_check(fs.getcwd())
def _handle_error(self, driver_output, reference_filename=None): """Returns test failures if some unusual errors happen in driver's run. Args: driver_output: The output from the driver. reference_filename: The full path to the reference file which produced the driver_output. This arg is optional and should be used only in reftests until we have a better way to know which html file is used for producing the driver_output. """ failures = [] if driver_output.timeout: failures.append(test_failures.FailureTimeout(bool(reference_filename))) if reference_filename: testname = self._port.relative_test_filename(reference_filename) else: testname = self._test_name if driver_output.crash: failures.append(test_failures.FailureCrash(bool(reference_filename), driver_output.crashed_process_name, driver_output.crashed_pid)) if driver_output.error: _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname)) else: _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname)) elif driver_output.error: _log.debug("%s %s output stderr lines:" % (self._worker_name, testname)) for line in driver_output.error.splitlines(): _log.debug(" {}".format(string_utils.decode(line, target_type=str))) return failures
def run_and_throw_if_fail(self, args, quiet=False, decode_output=True, **kwargs): # Cache the child's output locally so it can be used for error reports. child_out_file = StringIO() tee_stdout = sys.stdout try: if quiet: dev_null = open(os.devnull, "w") # FIXME: Does this need an encoding? tee_stdout = dev_null child_stdout = Tee(child_out_file, tee_stdout) exit_code = self._run_command_with_teed_output( args, child_stdout, **kwargs) finally: if quiet: dev_null.close() child_output = child_out_file.getvalue() child_out_file.close() if decode_output: child_output = string_utils.decode( child_output, encoding=self._child_process_encoding()) else: child_output = string_utils.encode( child_output, encoding=self._child_process_encoding()) if exit_code: raise ScriptError(script_args=args, exit_code=exit_code, output=child_output) return child_output
def diff_image(self, expected_contents, actual_contents, tolerance=None): expected_contents = string_utils.encode(expected_contents) actual_contents = string_utils.encode(actual_contents) diffed = actual_contents != expected_contents if not actual_contents and not expected_contents: return (None, 0, None) if not actual_contents or not expected_contents: return (True, 0, None) if b'ref' in expected_contents: assert tolerance == 0 if diffed: return ("< {}\n---\n> {}\n".format( string_utils.decode(expected_contents, target_type=str), string_utils.decode(actual_contents, target_type=str), ), 1, None) return (None, 0, None)
def is_invalid_bugzilla_email(self, search_string): review_queue_url = "request.cgi?action=queue&requester=%s&product=&type=review&requestee=&component=&group=requestee" % urllib.quote( search_string) results_page = self._load_query(review_queue_url) return bool( re.search( 'did not match anything', string_utils.decode(results_page.read(), target_type=str)))
def decode_content(self): if self.encoding == 'base64' and self.content is not None: self.decoded_content = base64.b64decode(self.content) else: try: self.decoded_content = string_utils.decode(self.content, target_type=str) except UnicodeDecodeError: self.decoded_content = None
def run_command(self, args, cwd=None, env=None, input=None, stdout=subprocess.PIPE, error_handler=None, ignore_errors=False, return_exit_code=False, return_stderr=True, decode_output=True): """Popen wrapper for convenience and to work around python bugs.""" assert(isinstance(args, list) or isinstance(args, tuple)) start_time = time.time() stdin, string_to_communicate = self._compute_stdin(input) stderr = self.STDOUT if return_stderr else None process = self.popen(args, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, env=env, close_fds=self._should_close_fds()) with process: if not string_to_communicate: output = process.communicate()[0] else: output = process.communicate(string_utils.encode(string_to_communicate, encoding='utf-8'))[0] # run_command automatically decodes to unicode() and converts CRLF to LF unless explicitly told not to. if decode_output: output = string_utils.decode(output, encoding=self._child_process_encoding()).replace('\r\n', '\n') # wait() is not threadsafe and can throw OSError due to: # http://bugs.python.org/issue1731717 exit_code = process.wait() _log.debug('"%s" took %.2fs' % (self.command_for_printing(args), time.time() - start_time)) if return_exit_code: return exit_code if exit_code: script_error = ScriptError(script_args=args, exit_code=exit_code, output=output, cwd=cwd) if ignore_errors: assert error_handler is None, "don't specify error_handler if ignore_errors is True" error_handler = Executive.ignore_error (error_handler or self.default_error_handler)(script_error) return output
def generate_crash_log(self, stdout, stderr): pid_representation = str(self.pid or '<unknown>') log_directory = os.environ.get("WEBKIT_CORE_DUMPS_DIRECTORY") errors = [] crash_log = '' expected_crash_dump_filename = "core-pid_%s.dump" % pid_representation proc_name = "%s" % (self.name) def match_filename(filesystem, directory, filename): if self.pid: return filename == expected_crash_dump_filename return filename.find(self.name) > -1 # Poor man which, ignore any failure. for coredumpctl in [['coredumpctl'], ['flatpak-spawn', '--host', 'coredumpctl'], []]: try: if not self._executive.run_command(coredumpctl, return_exit_code=True): break except: continue if log_directory: dumps = self._filesystem.files_under( log_directory, file_filter=match_filename) if dumps: # Get the most recent coredump matching the pid and/or process name. coredump_path = list(reversed(sorted(dumps)))[0] if not self.newer_than or self._filesystem.mtime(coredump_path) > self.newer_than: crash_log, errors = self._get_gdb_output(coredump_path) elif coredumpctl: crash_log, errors = self._get_trace_from_systemd(coredumpctl, pid_representation) stderr_lines = errors + string_utils.decode(str(stderr or '<empty>'), errors='ignore').splitlines() errors_str = '\n'.join(('STDERR: ' + stderr_line) for stderr_line in stderr_lines) cppfilt_proc = self._executive.popen( ['c++filt'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) errors_str = cppfilt_proc.communicate(errors_str)[0] if not crash_log: if not log_directory: log_directory = "/path/to/coredumps" core_pattern = self._filesystem.join(log_directory, "core-pid_%p.dump") crash_log = """\ Coredump %(expected_crash_dump_filename)s not found. To enable crash logs: - run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern - enable core dumps: ulimit -c unlimited - set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(log_directory)s """ % locals() return (stderr, """\ crash log for %(proc_name)s (pid %(pid_representation)s): %(crash_log)s %(errors_str)s""" % locals())
def read_checksum(filehandle): # We expect the comment to be at the beginning of the file. data = string_utils.encode(filehandle.read(2048)) comment_key = b'tEXtchecksum\x00' comment_pos = data.find(comment_key) if comment_pos == -1: return checksum_pos = comment_pos + len(comment_key) return string_utils.decode(data[checksum_pos:checksum_pos + 32], target_type=str)
def _read(self): deadline = time.time() + 2.0 output = None output_image = b'' while not self._process.timed_out and not self._process.has_crashed(): output = self._process.read_stdout_line(deadline) if self._process.timed_out or self._process.has_crashed( ) or not output: break if output.startswith( b'diff'): # This is the last line ImageDiff prints. break if output.startswith(b'Content-Length'): m = re.match(br'Content-Length: (\d+)', output) content_length = int( string_utils.decode(m.group(1), target_type=str)) output_image = self._process.read_stdout( deadline, content_length) output = self._process.read_stdout_line(deadline) break stderr = string_utils.decode(self._process.pop_all_buffered_stderr(), target_type=str) err_str = '' if stderr: err_str += "ImageDiff produced stderr output:\n" + stderr if self._process.timed_out: err_str += "ImageDiff timed out\n" if self._process.has_crashed(): err_str += "ImageDiff crashed\n" diff_percent = 0 if output and output.startswith(b'diff'): m = re.match(b'diff: (.+)% (passed|failed)', output) if m.group(2) == b'passed': return (None, 0, None) diff_percent = float( string_utils.decode(m.group(1), target_type=str)) return (output_image, diff_percent, err_str or None)
def execute_command(self, args, stdout=None, stderr=None, env=None, keep_signals=True): if keep_signals: ctx_manager = nullcontext() else: ctx_manager = disable_signals() _log.debug('Running: %s\n' % ' '.join(string_utils.decode(arg) for arg in args)) result = 0 with ctx_manager: try: result = subprocess.check_call(args, stdout=stdout, stderr=stderr, env=env) except subprocess.CalledProcessError as err: if self.verbose: cmd = ' '.join(string_utils.decode(arg) for arg in err.cmd) message = "'%s' returned a non-zero exit code." % cmd if stderr: with open(stderr.name, 'r') as stderrf: message += " Stderr: %s" % stderrf.read() Console.error_message(message) return err.returncode return result
def test_find_all_log_darwin(self): if not SystemHost().platform.is_mac(): return crash_logs = self.create_crash_logs_darwin() all_logs = crash_logs.find_all_logs() self.assertEqual(len(all_logs), 8) for test, crash_log in all_logs.items(): self.assertTrue(crash_log in [string_utils.decode(value) for value in self.files.values()]) if test.split('-')[0] != 'Sandbox': self.assertTrue(test == "Unknown" or int(test.split("-")[1]) in range(28527, 28531))
def main(argv): option_parser = argparse.ArgumentParser(usage="%(prog)s [options] [url]", add_help=False) groups = [("Platform options", platform_options()), ("Configuration options", configuration_options())] # Convert options to argparse, so that we can use parse_known_args() which is not supported in optparse. # FIXME: Globally migrate to argparse. https://bugs.webkit.org/show_bug.cgi?id=213463 for group_name, group_options in groups: option_group = option_parser.add_argument_group(group_name) for option in group_options: # Skip deprecated option if option.get_opt_string() != "--target": default = None if option.default != ("NO", "DEFAULT"): default = option.default option_group.add_argument(option.get_opt_string(), action=option.action, dest=option.dest, help=option.help, const=option.const, default=default) option_parser.add_argument('url', metavar='url', type=lambda s: decode(s, 'utf8'), nargs='?', help='Website URL to load') options, args = option_parser.parse_known_args(argv) if not options.platform: options.platform = "mac" # Convert unregistered command-line arguments to utf-8 and append parsed # URL. convert_arg_line_to_args() returns a list containing a single # string, so it needs to be split again. browser_args = [decode(s, "utf-8") for s in option_parser.convert_arg_line_to_args(' '.join(args))[0].split()] if options.url: browser_args.append(options.url) try: port = factory.PortFactory(Host()).get(options.platform, options=options) return port.run_minibrowser(browser_args) except BaseException as e: if isinstance(e, Exception): print('\n%s raised: %s' % (e.__class__.__name__, str(e)), file=sys.stderr) traceback.print_exc(file=sys.stderr) return 1
def _get_trace_from_flatpak(self): if self.newer_than: coredump_since = "--gdb-stack-trace=@%f" % self.newer_than else: coredump_since = "--gdb-stack-trace" webkit_flatpak_path = self._webkit_finder.path_to_script('webkit-flatpak') cmd = ['flatpak-spawn', '--host', webkit_flatpak_path, '--%s' % self._port_name, "--%s" % self._configuration.lower(), coredump_since] proc = self._executive.popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) crash_log, stderr = proc.communicate() errors = string_utils.decode(str(stderr or '<empty>'), errors='ignore').splitlines() return crash_log, errors
def _parse_current_build_cell(self, builder, cell): # Convert rendered contents to native string rendered = string_utils.decode(cell.renderContents(), target_type=str) # BeautifulSoup and bs4 render differently if '<br/>' in rendered: activity_lines = rendered.split('<br/>') else: activity_lines = rendered.split('<br />') builder["activity"] = activity_lines[0] # normally "building" or "idle" # The middle lines document how long left for any current builds. match = re.match(r"(?P<pending_builds>\d) pending", activity_lines[-1]) builder["pending_builds"] = int(match.group("pending_builds")) if match else 0
def _parse_leaks_output(self, leaks_output): try: parsed_string = parseString(leaks_output) except ExpatError as e: parse_failed = True _log.error("could not parse %s: %s" % (string_utils.decode(leaks_output, target_type=str), e)) return cur_report_errors = set() commandline = None preamble = parsed_string.getElementsByTagName("preamble")[0] for node in preamble.getElementsByTagName("line"): if node.localName == "line": for x in node.childNodes: if x.nodeType == node.TEXT_NODE and "Command" in x.data: commandline = x.data break raw_errors = parsed_string.getElementsByTagName("error") for raw_error in raw_errors: # Ignore "possible" leaks and InvalidRead/Write by default. if (get_text_of(raw_error, "kind") != "Leak_PossiblyLost") and \ (get_text_of(raw_error, "kind") != "Leak_StillReachable") and \ (get_text_of(raw_error, "kind") != "InvalidWrite") and \ (get_text_of(raw_error, "kind") != "InvalidRead"): error = ValgrindError(self._executive, self._source_dir, raw_error) if error not in cur_report_errors: # We haven't seen such errors doing this report yet... if error in self._errors: # ... but we saw it in earlier reports, e.g. previous UI test cur_report_errors.add("This error was already printed in " "some other test, see 'hash=#%016X#'" % \ error.error_hash()) else: # ... and we haven't seen it in other tests as well self._errors.add(error) cur_report_errors.add(error) suppcountlist = parsed_string.getElementsByTagName("suppcounts") if len(suppcountlist) > 0: suppcountlist = suppcountlist[0] for node in suppcountlist.getElementsByTagName("pair"): count = get_text_of(node, "count") name = get_text_of(node, "name") self._suppressions[name] += int(count) return cur_report_errors
def _read_header(self, block, line, header_text, header_attr, header_filter=None): if line.startswith(header_text) and getattr(block, header_attr) is None: value = string_utils.decode(line.split()[1], target_type=str) if header_filter: value = header_filter(value) setattr(block, header_attr, value) return True return False
def _run_command_with_teed_output(self, args, teed_output, **kwargs): child_process = self.popen(args, stdout=self.PIPE, stderr=self.STDOUT, close_fds=self._should_close_fds(), **kwargs) with child_process: # Use our own custom wait loop because Popen ignores a tee'd # stderr/stdout. # FIXME: This could be improved not to flatten output to stdout. while child_process.poll() is None: output_line = child_process.stdout.readline() teed_output.write(string_utils.decode(output_line, target_type=str)) return child_process.poll()
def _check_create_bug_response(self, response_html): response_html = string_utils.decode(response_html, target_type=str) match = re.search(r'<title>Bug (?P<bug_id>\d+) Submitted[^<]*</title>', response_html) if match: return match.group('bug_id') match = re.search( '<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">', response_html, re.DOTALL) error_message = "FAIL" if match: text_lines = BeautifulSoup(match.group('error_message')).findAll(text=True) error_message = "\n" + '\n'.join([" " + line.strip() for line in text_lines if line.strip()]) raise Exception("Bug not created: {}".format(error_message))