def resolveTopdownMetrics(self, pmc): """ Resolves pmu events for given topdown metrics. The method resolves a list of pmu events, for one or more nodes in the topdown hierarchy :param pmc: PMU events to be enabled for the current profile session """ import copy from xpedite.pmu.event import TopdownMetrics from xpedite.pmu.event import Event, TopdownNode, Metric pmc = copy.copy(pmc) topdownNodes = [ i for i, counter in enumerate(pmc) if isinstance(counter, (TopdownNode, Metric)) ] if topdownNodes: topdown = self.topdownCache.get(self.cpuInfo.cpuId) self.topdownMetrics = TopdownMetrics() for index in topdownNodes: node = pmc[index] topdownNode = self.topdownMetrics.add(topdown, node) pmc[index] = [ Event(event.name.title().replace('_', '').replace(' ', ''), event.name) for event in topdownNode.events ] return pmc
def report(self, reportName=None, benchmarkPaths=None, classifier=DefaultClassifier(), txnFilter=None, reportThreshold=3000, resultOrder=ResultOrder.WorstToBest): """ Ends active profile session and generates reports. This method executes the following steps 1. Ends samples collection and disconnects tcp connection to target 2. Gathers sample files for the current profile session and loads elapsed time and pmu counters 3. Groups related counters to build transactions and timelines 4. Generates html report and stores results :param reportName: Name of the profile report (Default value = None) :type reportName: str :param benchmarkPaths: List of stored reports from previous runs, for benchmarking (Default value = None) :param classifier: Predicate to classify transactions into different categories (Default value = DefaultClassifier() :type classifier: xpedite.txn.classifier.ProbeDataClassifier :param txnFilter: Lambda to filter transactions prior to report generation :type txnFilter: callable accepting a txn instance and returns a bool :param reportThreshold: Threshold for number of transactions rendered in html reports (Default value = 3000) :type reportThreshold: int :param resultOrder: Default sort order of transactions in latency constituent reports :type resultOrder: xpedite.pmu.ResultOrder """ from xpedite.profiler.reportgenerator import ReportGenerator from xpedite.txn.repo import TxnRepoFactory from xpedite.pmu.event import Event try: if not self.app.dryRun: try: self.app.endProfile() except Exception as ex: LOGGER.warn('Detected unclean profile termination - %s', ex) if self.eventSet: self.app.disablePMU() repoFactory = TxnRepoFactory() pmc = [ Event(req.name, req.uarchName) for req in self.eventSet.requests() ] if self.eventSet else [] repo = repoFactory.buildTxnRepo(self.app, self.cpuInfo, self.probes, self.topdownCache, self.topdownMetrics, pmc, self.benchmarkProbes, benchmarkPaths) reportName = reportName if reportName else self.app.name reportGenerator = ReportGenerator(reportName) return reportGenerator.generateReport(self.app, repo, classifier, resultOrder, reportThreshold, txnFilter, benchmarkPaths) except Exception as ex: LOGGER.exception('failed to generate report') raise ex
def test_request_sorter(): eventsFile = os.path.join(os.path.dirname(__file__), 'test_events.json') eventsDb = EventsLoader().loadJson(eventsFile) assert eventsDb events = [ Event('EVENT_3', 'EVENT_3'), Event('EVENT_2', 'EVENT_2'), Event('EVENT_1', 'EVENT_1'), Event('EVENT_0', 'EVENT_0'), ] eventState = PMUCtrl.resolveEvents(eventsDb, [0], events) assert len(eventState) == len(events) assert len(eventState.genericRequests) == len(events) for i, event in enumerate(events): assert event.uarchName == eventState.genericRequests[i].uarchName PMUCtrl.allocateEvents(eventState) assert len(eventState.genericRequests) == len(events) print(eventState.genericRequests) for i, event in enumerate(events): assert event.uarchName == eventState.genericRequests[len(events) - i - 1].uarchName
def loadBenchmarkInfo(path): """ Loads info about benchmark from file system :param path: path of the benchmark info file """ configParser = ConfigParser.RawConfigParser() fileName = os.path.join(path, BENCHMARK_FILE_NAME) if os.path.exists(fileName): configParser.read(fileName) benchmarkName = configParser.get(BENCHMARK_SECTION, BENCHMARK_NAME_KEY) legend = configParser.get(BENCHMARK_SECTION, BENCHMARK_LEGEND_KEY) if not configParser.has_section(BENCHMARK_CPU_INFO_SECTION): LOGGER.warn('failed to load benchmark %s - cpu info missing', benchmarkName) return None cpuId = configParser.get(BENCHMARK_CPU_INFO_SECTION, BENCHMARK_CPU_ID_KEY) cpuFrequency = configParser.get(BENCHMARK_CPU_INFO_SECTION, BENCHMARK_CPU_FREQUENCY_KEY) cpuInfo = CpuInfo(cpuId, int(cpuFrequency)) events = None if configParser.has_option(BENCHMARK_PMC_SECTION, BENCHMARK_PMC_COUNTER_COUNT): counterCount = configParser.getint( BENCHMARK_PMC_SECTION, BENCHMARK_PMC_COUNTER_COUNT) events = [] for i in range(counterCount): eventStr = configParser.get( BENCHMARK_PMC_SECTION, BENCHMARK_PMC_COUNTER.format(i)) eventFields = eventStr.split(',') events.append( Event(eventFields[0], eventFields[1], bool(eventFields[2]), bool(eventFields[3]))) return Benchmark(benchmarkName, cpuInfo, path, legend, events) return None