def _setSliderLabels(refreshClicks): print(f'\n[CALLBACK] refreshing commit list for {refreshClicks}th time') uniqueSHAs = Config.objects().distinct(field='commitSHA') print(f'\tFound {len(uniqueSHAs)} unique SHAs') options = pd.DataFrame(columns=['SHA', 'CommitDate']) for sha in uniqueSHAs: c = Config.objects(commitSHA=sha).first() options = options.append( { 'SHA': sha, 'CommitDate': c.commitDate, 'commitMessage': c.commitMessage }, ignore_index=True) options = options.sort_values('CommitDate') marks = {} for i, o in enumerate(options.iterrows()): message = o[1].commitMessage.replace('\n', ' ') marks[i] = f'{o[1].SHA[0:6]} {message}' return marks, [len(marks) - 4, len(marks) - 1], len(marks) - 1, options.to_json()
def _reloadAvailableCommits(refreshClicks): print(f'\n[CALLBACK] refreshing commit list for {refreshClicks}th time') uniqueSHAs = Config.objects().distinct(field='commitSHA') print(f'\tFound {len(uniqueSHAs)} unique SHAs') options = [] for sha in uniqueSHAs: c = Config.objects(commitSHA=sha).first() options.append({'label': f'[{c.commitDate}: {sha[:8]}] {c.commitMessage}', 'value': sha}) sorted_options = sorted(options, key=lambda x: x['label']) return sorted_options, sorted_options[-2]['value'], sorted_options, sorted_options[-1]['value'], sorted_options, sorted_options[-1]['value']
def _setSetup(sliderPos, sliderDict): low_i = sliderPos[0] parsedSlider = pd.read_json(sliderDict) baseSHA = parsedSlider.iloc[low_i].SHA availConfigs = Config.objects(commitSHA=baseSHA) possibleSetups = [] for conf in availConfigs: failure = True if conf.failure is not None else False if conf.setup is None: continue if failure: possibleSetups.append({ 'label': f'{conf.setup.name}: {conf.system} [{conf.failure}]', 'value': f'{str(conf.id)}', 'disabled': failure }) else: possibleSetups.append({ 'label': f'{conf.setup.name}: {conf.system} Run date [{conf.date}]', 'value': f'{str(conf.id)}', 'disabled': failure }) if len(possibleSetups) != 0: return possibleSetups, possibleSetups[0]['value'] return [], []
def generatePlot(self): """ Quick overview plot for commit :return: """ try: imgur = ImgurUploader() confs = Config.objects(commitSHA=self.sha) images = [] # Multiple Plots if more than one config was run conf: Config for conf in confs: results = Result.objects(config=conf) means = np.array([r.meanTime for r in results]) mins = np.array([r.minTime for r in results]) header, all_keys = get_dyn_keys(results) header_string = r'$\bf{' + header + '}$' labels = generate_label_table(results, all_keys) # Sort by minimum time sort_keys = np.argsort(mins)[::-1] sorted_means = means[sort_keys] sorted_mins = mins[sort_keys] sorted_labels = labels[sort_keys] sorted_labels = np.append(sorted_labels, header_string) fig = plt.figure(figsize=(15, len(means) / 4)) plt.gca().set_title(conf) plt.barh(np.arange(len(means)), sorted_means, label='mean') plt.barh(np.arange(len(means)), sorted_mins, label='min') plt.legend() plt.xlabel('nanoseconds') plt.xscale('log') plt.yticks(np.arange(len(sorted_labels)), sorted_labels) plt.grid(which='both', axis='x') plt.tight_layout() # Upload figure buf = io.BytesIO() fig.savefig(buf, format='png') buf.seek(0) link, hash = imgur.upload(buf.read()) conf.perfImgurLink = link conf.perfDeleteHash = hash conf.save() self.updateStatus(1, "PLOTTING", "PLOTTING succeeded\n", link) except Exception as e: self.updateStatus(-1, "PLOTTING", f"PLOTTING failed\n{e}") os.chdir(self.baseDir) return True
def updateSetups(sha0, sha1): print('\n[CALLBACK] Checking Setups') if sha0 is None or sha1 is None: print('\tSelection is None') return [], [] print(f'\tComparing {sha0} vs. {sha1}') config0_all = Config.objects(commitSHA=sha0).order_by('-date') # Listing available setups possible_comparisons = [] for conf0 in config0_all: conf1_avail = Config.objects(commitSHA=sha1, system=conf0.system, setup=conf0.setup).order_by('-date') for conf1 in conf1_avail: failure = True if conf0.failure is not None or conf1.failure is not None else False if failure: try: system1 = conf1.system except AttributeError: system1 = 'no system' try: name1 = conf1.setup.name except: name1 = 'no name' possible_comparisons.append( {'label': f'{1}: {system1} [{conf0.failure}] [{conf1.failure}]', 'value': f'{str(conf0.id)} # {str(conf1.id)}', 'disabled': failure}) else: possible_comparisons.append( {'label': f'{conf1.setup.name}: {conf1.system} Run dates [{conf0.date} vs. {conf1.date}]', 'value': f'{str(conf0.id)} # {str(conf1.id)}', 'disabled': failure}) if len(possible_comparisons) != 0: for comp in possible_comparisons: if not comp['disabled']: return possible_comparisons, comp['value'] return possible_comparisons, [] else: return [], []
def _checkCommits(self, url): """ Gets list of pull request commits and runs checks :param url: url to receive commits from :return: if worker is needed """ self.baseSHA = self._getBranchHead(f'origin/{self.base}') compareSHAs = {'0_BaseSHA': self.baseSHA} # Adding Fork Point if available try: forkPoint = self._getForkPoint(baseBranch=f'origin/{self.base}', branchRef=f'origin/{self.branch}') compareSHAs['1_ForkPoint'] = forkPoint except ValueError: print(f'No Forkpoint found for {self.branch} on {self.base}') # Adding Last Common Commit if available try: lastCommon = self._getLastCommonRef( baseRef=f'origin/{self.base}', branchRef=f'origin/{self.branch}') compareSHAs['2_LastCommon'] = lastCommon except ValueError: print(f'No common ancestor between {self.base} and {self.branch}') needWorker = False # if nothing is added to queue, no worker needs to be spawned prSHAs = self._getCommitSHAsFromURL(url) allSHAs = list(compareSHAs.values()) + prSHAs for sha in allSHAs: # CHECKING IF ALREADY TESTED and order by newest shaConfigs = Config.objects(commitSHA=sha).order_by('-id') if shaConfigs.count() == 0: print("NEW COMMIT", sha) queue = QueueObject() queue.commitSHA = sha queue.installID = self.auth.install_id try: queue.save() except me.NotUniqueError: print('SHA is already queued') continue queue.runUrl = self._createCheckRun(sha, "Performance Run") if sha in prSHAs: queue.compareOptions = compareSHAs queue.compareUrl = self._createCheckRun( sha, "Performance Comparison") queue.running = False queue.save() needWorker = True # Switch on worker spawn else: print("Available Tests for SHA", shaConfigs.count()) print("COMMIT ALREADY TESTED", sha) continue return needWorker
def singleResults(setup): print('\n[CALLBACK] Single Results') # Retrieve data start = time.time() conf = Config.objects().get(id=setup) results = Result.objects(config=conf).hint('config_hashed') df = aggregate_results(results) print(f'\tAggregated singles in {(time.time() - start)} seconds') return [df.to_json(), f'{conf.commitSHA[0:8]}: {conf.commitMessage}']
def getOptions(keyword, setup): print(f'\n[CALLBACK] Checking Types for {keyword}') print('\tSETUPS', setup) conf = Config.objects().get(id=setup) opts = Result.objects(config=conf).distinct(f'dynamic_{keyword}') checkboxes = [] selected = [] for val in opts: checkboxes.append({'label': val, 'value': val}) selected.append(val) return sorted(checkboxes, key=lambda c: c['label']), sorted(selected)
def _aggregateResults(config, sliderDict, sliderPos): print('[CALLBACK] Getting Results') if config is None or config == []: return None, None start = time.time() parsedSlider = pd.read_json(sliderDict) baseConf = Config.objects().get(id=config) baseRes = Result.objects(config=baseConf) base_df = aggregate_results(baseRes) compData = [] for i in range(sliderPos[0] + 1, sliderPos[1] + 1): # Get Matching Config for other SHA sha = parsedSlider.iloc[i].SHA try: conf = Config.objects().get(commitSHA=sha, setup=baseConf.setup) except me.MultipleObjectsReturned: conf = Config.objects( commitSHA=sha, setup=baseConf.setup).order_by('-id').first() except me.DoesNotExist: continue # Get Results df = aggregate_results(Result.objects(config=conf)) print(f'\t{len(df)}') if len(df) != 0: compData.append(df.to_json()) print(f'\tAggregated all results: {time.time() - start} seconds') return [base_df.to_json(), compData], [sliderPos, config]
def FillFailureList(timer): failures = Config.objects(failure__exists=True) fList = [] fList.append(html.Tr(children=[html.Th('SHA'), html.Th('Failure')])) for q in failures: row = html.Tr(children=[ html.Td(q.commitSHA), html.Td(q.failure), ]) fList.append(row) return fList
def updateSetups(sha): print('\n[CALLBACK] Checking Setups for Single') if sha is None: print('\tSelection is None') return [], [] print(f'\tChecking {sha}') config_all = Config.objects(commitSHA=sha).order_by('-date') setups = [] # Listing available setups for conf in config_all: failure = True if conf.failure is not None else False try: system = conf.system except AttributeError: system = 'no system' try: name = conf.setup.name except: name = 'no name' if failure: setups.append( {'label': f'{name}: {system} [{conf.failure}]', 'value': f'{str(conf.id)}', 'disabled': failure}) else: setups.append( {'label': f'{name}: {system} Run dates [{conf.date}]', 'value': f'{str(conf.id)}', 'disabled': failure}) if len(setups) != 0: for setup in setups: if not setup['disabled']: return setups, setup['value'] return setups, [] else: return [], []
def getConfigs(string): ids = re.findall(r'(\S+) # (\S+)', string) conf0 = Config.objects().get(id=ids[0][0]) conf1 = Config.objects().get(id=ids[0][1]) return conf0, conf1
def save_failed_config(self, failure: str): """ Saving failed configs to not re-run them again :param failure: Failure Mode :return: """ db_entry = Config() db_entry.name = 'Performance Testing Failed' # TODO: Keep name field? db_entry.date = datetime.utcnow() db_entry.commitSHA = self.sha db_entry.commitMessage = self.repo.commit(self.sha).message db_entry.commitDate = self.repo.commit(self.sha).authored_datetime # Saving Setup used in perf script if self.perfSetup is not None: db_entry.setup = self.perfSetup db_entry.failure = failure db_entry.save()
def parse_and_upload(self): print("uploading", self.mdFlexDir) try: cpu = get_cpu_info()["brand"] except Exception as e: print(f"Couldn't determine CPU brand: {e}") cpu = "N/A" run_timestamp = datetime.utcnow() coarse_pattern = re.compile( r'Collected times for\s+{(.*)}\s:\s\[(.*)\]') config_pattern = re.compile(r'([^,]+): ([^,]+)') times_pattern = re.compile(r'(\d+)') config_runs = coarse_pattern.findall( self.measure_output.stdout.decode('utf-8')) db_entry = Config() db_entry.name = 'performance via single tuning phase' # TODO: Keep name field? db_entry.date = run_timestamp db_entry.commitSHA = self.sha db_entry.commitMessage = self.repo.commit(self.sha).message db_entry.commitDate = self.repo.commit(self.sha).authored_datetime db_entry.mergedBaseSHA = self.baseSHA # Assumes tests were run on this system db_entry.system = cpu # Saving Setup used in perf script db_entry.setup = self.perfSetup # TODO: Decide if uniqueness is enforced (Change spare in models to False) # db_entry.unique = db_entry.name + db_entry.commitSHA + db_entry.system + str(db_entry.date) # try: # db_entry.save() # except NotUniqueError: # print("Exact Configuration for system and commit + date already saved!") # continue try: db_entry.save() except Exception as e: self.updateStatus(-1, "UPLOAD", str(e)) return False, f'Upload of config to DB failed {e}' print(db_entry) for run in config_runs: results = Result() results.config = db_entry # Filter all config parameters config = config_pattern.findall(run[0]) # Parsing output try: # Parsing Config keys and values for pair in config: key = pair[0].replace(' ', '') # Replace spaces key = 'dynamic_' + key # Adding prefix to clearly show dynamic field creation in DB quantity = pair[1].replace(' ', '') # Replace spaces try: # Try converting to float if appropriate quantity = float(quantity) except ValueError: pass print(key, quantity) results[key] = quantity # Parsing times times = times_pattern.findall(run[1]) times = [float(t) for t in times] results.measurements = times results.meanTime = np.mean(times) # Mean running Time results.minTime = np.min(times) # Min running Time except Exception as e: print(f'Parsing of measurement failed {e}') self.updateStatus(-1, "PARSING", str(e)) return False, f'Parsing failed with {e}' try: results.save() except Exception as e: self.updateStatus(-1, "UPLOAD", str(e)) return False, f'Upload of Result failed with {e}' print(results) os.chdir(self.baseDir) self.updateStatus(1, "UPLOAD", "RESULT UPLOAD succeeded\n") return True, 'Upload succeeded'
def comparePerformance(self, q: QueueObject): """ Function to compare performance between different commits in the Repo. Works on a checkrun already created via the GitHub checks api at compareUrl. Comparison options include: 1a) Merge Current Master Head into branch and compare performance between merged and un-merged master 1b) Merge failed -> Compare between non-merged feature branch and master 2) To Be Implemented (perf already available): Compare against the Fork Point 3) To Be Implemented (perf already available): Compare against the Last Common commit between master and feature branch Args: q: QueueObject containing all necessary information to run comparison Returns: """ commitSHA = q.commitSHA print(f"Comparing Performance for {commitSHA} {q.compareUrl}") # Update Status to in Progress r = requests.patch(url=q.compareUrl, headers=self.auth.getTokenHeader(), json=initialStatus()) pretty_request(r) codes, headers, messages, images = [], [], [], [] try: # Get Pull Requests associated with sha # commitPR_url = f'{compareUrl.split("/check-runs/")[0]}/commits/{str(sha)}/pulls' # r = requests.get(url=commitPR_url, headers=self.auth.getTokenHeader()) baseSHA = q.compareOptions['0_BaseSHA'] baseConfigs = Config.objects(commitSHA=baseSHA).order_by('-date') if baseConfigs.first() is None: raise RuntimeError( f'<b>No performance runs for the PR base {baseSHA} were found</b>' ) # Check for all configs, aka Setups for base in baseConfigs: test = Config.objects( commitSHA=commitSHA, system=base.system, setup=base.setup).order_by( '-date').first() # Get freshest config if test is None: raise RuntimeError( f'<b>No matching configs between this commit {commitSHA} and PR base {baseSHA} could be found.</b>' ) # Case 1) Merge worked out if test.mergedBaseSHA is not None: headers.append( 'Merged Master into Feature Branch Comparison') else: headers.append('Feature vs. Master Comparison (no-merge)') # TODO: Add comparison for lastcommon and fork point, perf tests are already running fig, minSpeeds, meanSpeeds, missing = self._compareConfigs( base, test) # Upload figure buf = io.BytesIO() fig.savefig(buf, format='png') buf.seek(0) imgur = ImgurUploader() link, hash = imgur.upload(buf.read()) images.append(link) test.compImgurLink = link test.compDeleteHash = hash test.save() messages.append( f'<b>Perf Results:</b>\n\n' f'<b>Setup:</b> {test.setup.name}\n' f'<b>Comparing this commit</b> {commitSHA} with base {baseSHA}\n' f'<b>Threshold to pass:</b> speedup >= {CheckFlow.PERF_THRESHOLD}\n' f'<b>Minimum Time Speedup Average:</b> {np.mean(minSpeeds)}\n' f'<b>Mean Time Speedup Average:</b> {np.mean(meanSpeeds)}\n\n' f'<b>Not available configs to compare:</b> {missing}') # Setup Params for message codes.append(1 if np.mean(minSpeeds) >= CheckFlow. PERF_THRESHOLD else -1) params = codeStatus(codes, headers, messages, images) except RuntimeError as v: code = [0] header = ['COMPARISON'] message = [str(v)[-500:]] params = codeStatus(code, header, message) except Exception as e: code = [-1] header = ['COMPARISON'] message = [str(e)[-500:]] params = codeStatus(code, header, message) # Patch Checkrun r = requests.patch(url=q.compareUrl, headers=self.auth.getTokenHeader(), json=params) pretty_request(r)
me.connect('performancedb', host=os.environ['MONGOHOST'], username=os.environ['USERNAME'], password=os.environ['PASSWORD']) # New setups with more tuning-samples and changed rebuild frequency homoID = '5f44050def458403b65f97fa' imhomoID = '5f44050def458403b65f97f9' sha = '20382287f7f3d1ff2aa8414891ea657245670c80' h**o = Setup.objects().get(id=homoID) inhomo = Setup.objects().get(id=imhomoID) for s_name, setup in zip(['h**o', 'inhomo'], [h**o, inhomo]): configs = Config.objects(setup=setup, commitSHA=sha) # TODO: Remove limit here for conf in configs: results = list(Result.objects(config=conf)) # [:10] data = [] labels = [] for res in results: res: Result resDict = res.__dict__ keys = [ k for k in resDict.keys() if 'dynamic' in k and '_dynamic_lock' not in k ] labels.append(''.join([f'{str(resDict[k])} ' for k in keys])) data.append(res.measurements)