def _process_series(self, pw_series) -> None: if pw_series['id'] in self.seen_series: log(f"Already seen {pw_series['id']}", "") return s = PwSeries(self._pw, pw_series) log( "Series info", f"Series ID {s['id']}\n" + f"Series title {s['name']}\n" + f"Author {s['submitter']['name']}\n" + f"Date {s['date']}") log_open_sec('Patches') for p in s['patches']: log(p['name'], "") log_end_sec() if not s['received_all']: raise IncompleteSeries comment = self.series_determine_tree(s) if hasattr(s, 'tree_name') and s.tree_name: s.tree_selection_comment = comment self._workers[s.tree_name].queue.put(s) else: core.write_tree_selection_result(self.result_dir, s, comment) core.mark_done(self.result_dir, s) self.seen_series.add(s['id'])
def _check_tree(self): core.log_open_sec("Checking tree " + self.name) try: out = self.git_status(untracked="no", short=True) if out: raise TreeNotClean(f"Tree {self.name} is not clean") finally: core.log_end_sec()
def process_series(self, pw_series) -> None: log_open_sec( f"Checking series {pw_series['id']} with {pw_series['total']} patches" ) try: self._process_series(pw_series) finally: log_end_sec()
def series_determine_tree(self, s: PwSeries) -> str: log_open_sec('Determining the tree') try: ret = self._series_determine_tree(s) finally: log_end_sec() return ret
def reset(self, fetch=None): core.log_open_sec("Reset tree " + self.name) try: if fetch or (fetch is None and self.remote): self.git_fetch(self.remote) self.git_reset(self.branch, hard=True) finally: core.log_end_sec()
def cmd_run(cmd, shell=True, include_stderr=False): """Run a command. Run a command in subprocess and return the stdout; optionally return stderr as well as a second value. Parameters ---------- cmd : str shell command with all its arguments shell : bool, optional invoke command in a full shell include_stderr : bool, optional return stderr as a second return value Raises ------ CmdError If command returned non-zero exit code. Returns ------- string the stdout, optionally stderr as well as a second string value """ process = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE) core.log_open_sec("CMD " + process.args) stdout, stderr = process.communicate() stdout = stdout.decode("utf-8") stderr = stderr.decode("utf-8") process.stdout.close() process.stderr.close() stderr = "\n" + stderr if stderr[-1] == "\n": stderr = stderr[:-1] core.log("RETCODE", process.returncode) core.log("STDOUT", stdout) core.log("STDERR", stderr) core.log("END", datetime.datetime.now().strftime("%H:%M:%S.%f")) core.log_end_sec() if process.returncode != 0: if stderr and stderr[-1] == "\n": stderr = stderr[:-1] raise CmdError("Command failed: %s" % (process.args, ), process.returncode, stdout, stderr) if not include_stderr: return stdout return stdout, stderr
def check_already_applied(self, thing): core.log_open_sec("Checking if applied " + thing.title) try: self.reset() ret = self.is_applied(thing) finally: core.log_end_sec() return ret
def load_tests(self, tests_dir, name): core.log_open_sec(name.capitalize() + " tests") tests_subdir = os.path.join(tests_dir, name) tests = [] for td in os.listdir(tests_subdir): tests.append(Test(os.path.join(tests_subdir, td), td)) core.log_end_sec() return tests
def _tree_name_should_be_local_files(raw_email): acceptable_files = { 'CREDITS', 'MAINTAINERS', 'Documentation/', 'include/', } required_files = { 'net/', 'kernel/bpf/', 'drivers/net/', 'drivers/net/ethernet/', 'tools/bpf/' 'tools/lib/bpf/', 'tools/testing/selftests/net/', 'tools/testing/selftests/bpf/' } excluded_files = { 'drivers/net/wireless/', } all_files = acceptable_files.union(required_files) required_found = False lines = raw_email.split('\n') regex = re.compile(r'^\s*([-\w/._]+)\s+\|\s+\d+\s*[-+]*\s*$') for line in lines: match = regex.match(line) if not match: continue found = False excluded = False file_name = match.group(1) log_open_sec(f'Checking file name {file_name}') if file_name.startswith('.../'): compare = _file_name_match_dotted else: compare = _file_name_match_start for fn in excluded_files: excluded = excluded or compare(fn, file_name) if excluded: log(f'Excluded by {fn}', "") break for fn in all_files: matches = compare(fn, file_name) if not matches: continue log(f'Matched by {fn}', "") found = True if not excluded: required_found = required_found or fn in required_files log_end_sec() if not found: log(f'File name {file_name} was not matched by any list', "") return False if not required_found: log('No required files found', "") return required_found
def contains(self, commit): core.log_open_sec("Checking for commit " + commit) try: self.git_merge_base(commit, 'HEAD', is_ancestor=True) ret = True except CMD.CmdError: ret = False finally: core.log_end_sec() return ret
def __init__(self, result_dir): core.log_open_sec("Tester init") self.result_dir = result_dir if not os.path.exists(self.result_dir): os.makedirs(self.result_dir) tests_dir = os.path.abspath(core.CORE_DIR + "../../tests") self.series_tests = self.load_tests(tests_dir, "series") self.patch_tests = self.load_tests(tests_dir, "patch") core.log_end_sec()
def check_applies(self, thing): core.log_open_sec("Test-applying " + thing.title) try: self.reset() self.apply(thing) ret = True except PatchApplyError: ret = False finally: core.log_end_sec() return ret
def _post(self, req, headers, data): url = f'{self._proto}{self.server}/api/1.1/{req}' try: core.log_open_sec(f"Patchwork {self.server} post: {url}") ret = requests.post(url, headers=headers, data=data) core.log("Headers", headers) core.log("Data", data) core.log("Response", ret) core.log("Response data", ret.json()) finally: core.log_end_sec() return ret
def _request(self, url): try: core.log_open_sec(f"Patchwork {self.server} request: {url}") ret = requests.get(url) core.log("Response", ret) try: core.log("Response data", ret.json()) except json.decoder.JSONDecodeError: core.log("Response data", ret.text) finally: core.log_end_sec() return ret
def __init__(self, path, name): self.path = path self.name = name core.log_open_sec("Test %s init" % (self.name, )) self._info_load() # Load dynamically the python func if "pymod" in self.info: test_group = os.path.basename(os.path.dirname(path)) m = importlib.import_module("tests.%s.%s.%s" % (test_group, name, self.info["pymod"])) self._exec_pyfunc = getattr(m, self.info["pyfunc"]) core.log_end_sec()
def _apply_patch_safe(self, patch): try: with tempfile.NamedTemporaryFile() as fp: patch.write_out(fp) core.log_open_sec("Applying patch " + patch.title) try: self.git_am(fp.name) finally: core.log_end_sec() except CMD.CmdError as e: try: self.git("am --abort") except CMD.CmdError: pass raise PatchApplyError(e) from e
def exec(self, tree, thing, result_dir): if self.is_disabled(): core.log(f"Skipping test {self.name} - disabled", "") return True core.log_open_sec(f"Running test {self.name}") test_dir = os.path.join(result_dir, self.name) if not os.path.exists(test_dir): os.makedirs(test_dir) retcode, out, err, desc = self._exec(tree, thing, result_dir) self.write_result(result_dir, retcode, out, err, desc) core.log_end_sec() return retcode == 0
def fixup_pull_covers(self): # For pull requests posted as series patchwork treats the cover letter # as a patch so the cover is null. Try to figure that out but still # use first patch for prefix, pulls don't have dependable subjects. all_reply = None log_open_sec("Searching for implicit cover/pull request") for p in self.patches: lines = p.raw_patch.split('\n') r_in_reply = re.compile(r'^In-Reply-To: <(.*)>$') reply_to = None for line in lines: if line == "": # end of headers if reply_to is None: log("Patch had no reply header", "") all_reply = False break match = r_in_reply.match(line) if not match: continue reply_to = match.group(1) log("Patch reply header", reply_to) if all_reply is None: all_reply = reply_to elif all_reply != reply_to: all_reply = False log("Mismatch in replies", "") log("Result", all_reply) if all_reply: covers = self.pw.get_all('patches', filters={'msgid': all_reply}, api='1.2') if len(covers) != 1: log('Unique cover letter not found', len(covers)) else: cover = covers[0] if 'pull_url' in cover and cover['pull_url']: self.cover_pull = cover log('Attached pull cover', '') else: log('Pull URL not present in cover', '') log_end_sec()
def run(self) -> None: config = configparser.ConfigParser() config.read(['nipa.config', 'pw.config', 'tester.config']) core.log_init( config.get('log', 'type', fallback='org'), config.get('log', 'file', fallback=os.path.join(core.NIPA_DIR, f"{self.tree.name}.org"))) core.log_open_sec("Tester init") if not os.path.exists(self.result_dir): os.makedirs(self.result_dir) tests_dir = os.path.abspath(core.CORE_DIR + "../../tests") self.series_tests = load_tests(tests_dir, "series") self.patch_tests = load_tests(tests_dir, "patch") core.log_end_sec() while not self.should_die: self.barrier.wait() while not self.should_die and not self.queue.empty(): s = self.queue.get() if s is None: continue self.test_series(self.tree, s) self.done_queue.put(s) # If we're the last worker with work to do - let the poller run core.log( f"Checking barrier {self.barrier.n_waiting}/{self.barrier.parties} " ) if self.barrier.parties == self.barrier.n_waiting + 1: break self.barrier.wait()
def exec(self, tree, thing, result_dir): if self.is_disabled(): core.log(f"Skipping test {self.name} - disabled", "") return True core.log_open_sec(f"Running test {self.name}") test_dir = os.path.join(result_dir, self.name) if not os.path.exists(test_dir): os.makedirs(test_dir) retcode, out, err = self._exec(tree, thing, result_dir) # Write out the results with open(os.path.join(test_dir, "retcode"), "w+") as fp: fp.write(str(retcode)) with open(os.path.join(test_dir, "stdout"), "w+") as fp: fp.write(out) with open(os.path.join(test_dir, "stderr"), "w+") as fp: fp.write(err) with open(os.path.join(test_dir, "summary"), "w+") as fp: fp.write("==========\n") if retcode == 0: fp.write("%s - OKAY\n" % (self.name, )) else: fp.write("%s - FAILED\n" % (self.name, )) fp.write("\n") if err.strip(): if err[:-1] != '\n': err += '\n' fp.write(err) elif out.strip(): if out[:-1] != '\n': out += '\n' fp.write(out) core.log_end_sec() return retcode == 0
def _series_determine_tree(self, s: PwSeries) -> str: s.tree_name = netdev.series_tree_name_direct(s) s.tree_mark_expected = True s.tree_marked = bool(s.tree_name) if s.tree_name: log(f'Series is clearly designated for: {s.tree_name}', "") return f"Clearly marked for {s.tree_name}" s.tree_mark_expected, should_test = netdev.series_tree_name_should_be_local( s) if not should_test: log("No tree designation found or guessed", "") return "Not a local patch" if netdev.series_ignore_missing_tree_name(s): s.tree_mark_expected = None log('Okay to ignore lack of tree in subject, ignoring series', "") return "Series ignored based on subject" if s.tree_mark_expected: log_open_sec('Series should have had a tree designation') else: log_open_sec('Series okay without a tree designation') if netdev.series_is_a_fix_for(s, self._trees["net"]): s.tree_name = "net" elif self._trees["net-next"].check_applies(s): s.tree_name = "net-next" if s.tree_name: log(f"Target tree - {s.tree_name}", "") res = f"Guessed tree name to be {s.tree_name}" else: log("Target tree not found", "") res = "Guessing tree name failed - patch did not apply" log_end_sec() return res
pw = Patchwork(config) partial_series = 0 partial_series_id = 0 prev_time = state['last_poll'] # Loop try: while True: poll_ival = 120 prev_time = state['last_poll'] prev_time_obj = datetime.datetime.fromisoformat(prev_time) since = prev_time_obj - datetime.timedelta(minutes=4) state['last_poll'] = str(datetime.datetime.utcnow()) log_open_sec(f"Checking at {state['last_poll']} since {since}") json_resp = pw.get_series_all(since=since) log(f"Loaded {len(json_resp)} series", "") pw_series = {} for pw_series in json_resp: log_open_sec(f"Checking series {pw_series['id']} " + f"with {pw_series['total']} patches") if pw_series['id'] <= state['last_id']: log(f"Already seen {pw_series['id']}", "") log_end_sec() continue s = PwSeries(pw, pw_series)
def pw_upload_results(series_dir, pw, config): log_open_sec('Upload initial') try: _pw_upload_results(series_dir, pw, config) finally: log_end_sec()
def initial_scan(results_dir, pw, config): log_open_sec('Upload initial') try: _initial_scan(results_dir, pw, config) finally: log_end_sec()
def run(self) -> None: partial_series = {} prev_big_scan = datetime.datetime.fromtimestamp( self._state['last_poll']) prev_req_time = datetime.datetime.utcnow() # We poll every 2 minutes, for series from last 10 minutes # Every 3 hours we do a larger check of series of last 12 hours to make sure we didn't miss anything # apparently patchwork uses the time from the email headers and people back date their emails, a lot # We keep a history of the series we've seen in and since the last big poll to not process twice try: while True: this_poll_seen = set() req_time = datetime.datetime.utcnow() # Decide if this is a normal 4 minute history poll or big scan of last 12 hours if prev_big_scan + datetime.timedelta(hours=3) < req_time: big_scan = True since = prev_big_scan - datetime.timedelta(hours=9) log_open_sec( f"Big scan of last 12 hours at {req_time} since {since}" ) else: big_scan = False since = prev_req_time - datetime.timedelta(minutes=10) log_open_sec(f"Checking at {req_time} since {since}") json_resp = self._pw.get_series_all(since=since) log(f"Loaded {len(json_resp)} series", "") had_partial_series = False for pw_series in json_resp: try: self.process_series(pw_series) this_poll_seen.add(pw_series['id']) except IncompleteSeries: partial_series.setdefault(pw_series['id'], 0) if partial_series[pw_series['id']] < 5: had_partial_series = True partial_series[pw_series['id']] += 1 if big_scan: prev_req_time = req_time prev_big_scan = req_time # Shorten the history of series we've seen to just the last 12 hours self.seen_series = this_poll_seen self.done_series &= self.seen_series elif had_partial_series: log("Partial series, not moving time forward", "") else: prev_req_time = req_time # Unleash all workers log("Activate workers", "") self._barrier.wait() # Wait for workers to come back log("Wait for workers", "") self._barrier.wait() while not self._done_queue.empty(): s = self._done_queue.get() self.done_series.add(s['id']) log(f"Testing complete for series {s['id']}", "") secs = 120 - (datetime.datetime.utcnow() - req_time).total_seconds() if secs > 0: log("Sleep", secs) time.sleep(secs) log_end_sec() if os.path.exists('poller.quit'): os.remove('poller.quit') break finally: log_open_sec(f"Stopping threads") self._barrier.abort() for _, worker in self._workers.items(): worker.should_die = True worker.queue.put(None) for _, worker in self._workers.items(): log(f"Waiting for worker {worker.tree.name} / {worker.name}") worker.join() log_end_sec() self._state['last_poll'] = prev_big_scan.timestamp() self._state['done_series'] = list(self.seen_series) # Dump state with open('poller.state', 'w') as f: json.dump(self._state, f)
parser.add_argument('--tree-name', default='net-next', help='the tree name to expect') parser.add_argument( '--tree-branch', default='master', help='the branch or commit to use as a base for applying patches') parser.add_argument('--result-dir', default=results_dir, help='the directory where results will be generated') args = parser.parse_args() args.mdir = os.path.abspath(args.mdir) args.tree = os.path.abspath(args.tree) core.log_open_sec("Loading patches") try: files = [os.path.join(args.mdir, f) for f in sorted(os.listdir(args.mdir))] series = Series() for f in files: with open(f, 'r') as fp: data = fp.read() if re.search(r"\[.* 0+/\d.*\]", data) and \ not re.search(r"\n@@ -\d", data): series.set_cover_letter(data) else: series.add_patch(Patch(data)) finally: core.log_end_sec()
def _tree_name_should_be_local_files(raw_email): """ Returns True: patch should have been explicitly designated for local tree False: patch has nothing to do with local trees None: patch has mixed contents, it touches local code, but also code outside """ acceptable_files = { '.../', 'CREDITS', 'MAINTAINERS', 'Documentation/', 'include/', } required_files = { 'Documentation/networking/', 'include/linux/netdevice.h', 'include/linux/skbuff.h', 'include/net/', 'include/phy/', 'net/', 'drivers/atm/', 'drivers/net/', 'drivers/dsa/', 'drivers/nfc/', 'drivers/phy/', 'drivers/net/ethernet/', 'tools/testing/selftests/net/' } excluded_files = { 'drivers/net/wireless/', } all_files = acceptable_files.union(required_files) required_found = False foreign_found = False lines = raw_email.split('\n') r_diffstat = re.compile(r'^\s*([-\w/._]+)\s+\|\s+\d+\s*[-+]*\s*$') r_header = re.compile(r'\+\+\+ b/([-\w/._]+)$') for line in lines: match = r_header.match(line) if not match: match = r_diffstat.match(line) if not match: continue found = False excluded = False file_name = match.group(1) log_open_sec(f'Checking file name {file_name}') if file_name.startswith('.../'): compare = _file_name_match_dotted else: compare = _file_name_match_start for fn in excluded_files: excluded = excluded or compare(fn, file_name) if excluded: log(f'Excluded by {fn}', "") break for fn in all_files: matches = compare(fn, file_name) if not matches: continue log(f'Matched by {fn}', "") found = True if not excluded: required_found = required_found or fn in required_files log_end_sec() if not found: log(f'File name {file_name} was not matched by any list', "") foreign_found = True log(f'Required found: {required_found}, foreign_found: {foreign_found}', "") if not required_found: return False if foreign_found: return None return True
def test_series(self, tree, series): core.log_open_sec("Running tests in tree %s for %s" % (tree.name, series.title)) series_dir = os.path.join(self.result_dir, str(series.id)) if not os.path.exists(series_dir): os.makedirs(series_dir) if not tree.check_applies(series): already_applied = tree.check_already_applied(series) if already_applied: core.log("Series already applied", "") with open(os.path.join(series_dir, "summary"), "w+") as fp: fp.write(f"Patch already applied to {tree.name}") else: core.log("Series does not apply", "") with open(os.path.join(series_dir, "summary"), "w+") as fp: fp.write(f"Patch does not apply to {tree.name}") core.log_end_sec() return [already_applied], [already_applied] tree.enter() try: series_ret = [] patch_ret = [] tree.reset() for test in self.series_tests: ret = test.exec(tree, series, series_dir) series_ret.append(ret) for test in self.patch_tests: test.prep() for patch in series.patches: core.log_open_sec("Testing patch " + patch.title) current_patch_ret = [] patch_dir = os.path.join(series_dir, str(patch.id)) if not os.path.exists(patch_dir): os.makedirs(patch_dir) try: tree.apply(patch) for test in self.patch_tests: ret = test.exec(tree, patch, patch_dir) current_patch_ret.append(ret) finally: core.log_end_sec() patch_ret.append(current_patch_ret) finally: tree.leave() core.log_end_sec() os.mknod(os.path.join(series_dir, ".tester_done")) return series_ret, patch_ret
def cmd_run(cmd, shell=True, include_stderr=False, add_env=None, cwd=None, pass_fds=()): """Run a command. Run a command in subprocess and return the stdout; optionally return stderr as well as a second value. Parameters ---------- cmd : str shell command with all its arguments shell : bool, optional invoke command in a full shell include_stderr : bool, optional return stderr as a second return value add_env: dict, optional additional env variables cwd: str directory to run the command in pass_fds : iterable, optional pass extra file descriptors to the command Raises ------ CmdError If command returned non-zero exit code. Returns ------- string the stdout, optionally stderr as well as a second string value """ env = os.environ.copy() if add_env: env.update(add_env) core.log("START", datetime.datetime.now().strftime("%H:%M:%S.%f")) process = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd, pass_fds=pass_fds) core.log_open_sec("CMD " + process.args) stdout, stderr = process.communicate() stdout = stdout.decode("utf-8", "ignore") stderr = stderr.decode("utf-8", "ignore") process.stdout.close() process.stderr.close() stderr = "\n" + stderr if stderr[-1] == "\n": stderr = stderr[:-1] core.log("RETCODE", process.returncode) core.log("STDOUT", stdout) core.log("STDERR", stderr) core.log("END", datetime.datetime.now().strftime("%H:%M:%S.%f")) core.log_end_sec() if process.returncode != 0: if stderr and stderr[-1] == "\n": stderr = stderr[:-1] raise CmdError("Command failed: %s" % (process.args, ), process.returncode, stdout, stderr) if not include_stderr: return stdout return stdout, stderr
def prep(self): if "prep" not in self.info or self.is_disabled(): return core.log_open_sec("Preparing for test %s" % (self.name, )) CMD.cmd_run(os.path.join(self.path, self.info["prep"])) core.log_end_sec()