Exemplo n.º 1
0
    def buildFromRecords(self, records):
        """
    Parses and loads list of instrumented probes from probe records

    :param records: records from the appInfo file

    """
        probes = {}
        for record in records:
            fields = {}
            for field in record.split(self.FIELD_DELIMITER):
                index = field.find(self.KEY_VALUE_DELIMITER)
                if index == -1 or len(field) < (index + 1):
                    raise InvariantViloation(
                        'detected invalid probe record in app info file - {}'.
                        format(record))
                fields.update({field[:index]: field[index + 1:]})
            if fields:
                try:
                    probes.update({
                        fields[self.FIELD_CALL_SITE]:
                        AnchoredProbe(
                            fields[self.FIELD_NAME], fields[self.FIELD_FILE],
                            fields[self.FIELD_LINE],
                            fields[self.FIELD_ATTRIBUTES],
                            fields[self.FIELD_STATUS] ==
                            self.PROBE_STATUS_ENABLED, fields[self.FIELD_NAME])
                    })
                except KeyError as error:
                    raise InvariantViloation(
                        'detected record missing field {} - \n{}\n{}'.format(
                            error, record, fields))
        return probes
Exemplo n.º 2
0
  def buildTimelineTable(self, timelineStats, probes, resultOrder, threshold, uid,
      logAbsoluteValues=False, logTimeline=False, logData=False):
    """
    Builds a html table for timelines with common category and route

    :param timelineStats: A collection of timelines to be reported
    :param probes: List of probes in route taken by, the transaction collection
    :param resultOrder: Sort order for a collection of timelines
    :param threshold: Threshold for number of transactions rendered in html reports.
    :param logAbsoluteValues: Flag to enable reporting of absolute tsc values
    :param logTimeline: Flag to enable reporting of timeline details
    :param logData: Flag to enable logging of data associated with transaction

    """
    begin = time.time()
    tableContainer = HTML().div(klass=TABLE_REPORT_CONTAINER)
    table = tableContainer.table(border='1', klass=TABLE_REPORT)
    self.buildBreakupTableHeader(table, probes, logAbsoluteValues, logTimeline, logData)
    tbody = table.tbody

    timelineCollection = self.reorderTimelineRecords(timelineStats.timelineCollection, resultOrder)

    #write table rows
    for i, timeline in enumerate(timelineCollection, 1):
      row = tbody.tr
      row.td('{0:,}'.format(i), klass=TD_KEY)
      row.td('{:,}'.format(timeline.txnId), klass=TD_KEY)
      if logData:
        row.td('{}'.format(timeline.data), klass=TD_KEY)
      row.td('{:,}'.format(timeline.inception), klass=TD_KEY)

      j = None
      for j, timepoint in enumerate(timeline):
        if logTimeline:
          row.td(DURATION_FORMAT.format(timepoint.point))
          if j < len(timeline) -1: # skip the duration for the last time point, since it's always 0
            self.buildTimepointCell(row, uid, i, j, timepoint)
        elif j < len(timeline) -1: # skip the duration for the last time point, since it's always 0
          self.buildTimepointCell(row, uid, i, j, timepoint)
      self.buildTimepointCell(row, uid, i, j, timeline.endpoint, klass=TD_END)

      if logAbsoluteValues:
        for j, probe in enumerate(probes):
          counter = timeline.txn[j]
          if probe != counter.probe:
            from xpedite.types import InvariantViloation
            raise InvariantViloation(
              'transaction {} does not match route {}'.format(timeline.txn, probes)
            )
          tsc = counter.tsc if counter else '---'
          row.td('{}'.format(tsc), klass=TD_DEBUG)
      if i >= threshold:
        break
      elapsed = time.time() - begin
      if elapsed >= 5:
        LOGGER.completed('\tprocessed %d out of %d transactions | %0.2f%% complete',
          i, threshold, float(100 * float(i)/float(threshold))
        )
        begin = time.time()
    return tableContainer
Exemplo n.º 3
0
    def __enter__(self):
        """Instantiates a tcp client and connects to the target application"""
        if self.client:
            raise InvariantViloation('environment already in use')

        self.loadAppInfo()
        if not self.dryRun:
            self.client = DatagramClient(self.ip, self.port)
            self.client.connect()
        return self
Exemplo n.º 4
0
  def __eq__(self, other):
    if not other.isAnchored():
      from xpedite.types import InvariantViloation
      raise InvariantViloation('only anchored probes can be compared')

    if self.lineNo != other.lineNo or self.fileName != other.fileName:
      return False

    if self.path and other.path:
      longPath, shortPath = (self.path, other.path) if len(self.path) >= len(other.path) else (other.path, self.path)
      return longPath.find(shortPath, len(longPath) - len(shortPath)) != -1
    return True
Exemplo n.º 5
0
    def admin(self, cmd, timeout=10):
        """
    Sends command to enable/disable/query probe status

    :param cmd: Command to execute in target application
    :param timeout: Maximum time to await a response from app (Default value = 10 seconds)

    """
        self.client.send(cmd)
        pdu = self.client.readFrame(timeout)
        if len(pdu) < 5 or pdu[4] != '|':
            raise InvariantViloation(
                'Invalid response - pdu not in expected format \n{}\n'.format(
                    pdu))
        status = pdu[3]
        if not str.isdigit(status):
            raise InvariantViloation(
                'Invalid response - status code not in expected format \n{}\n'.
                format(pdu))
        result = pdu[5:] if len(pdu) > 5 else ''
        if int(status):
            raise Exception('Failed to execute request - {}'.format(result))
        return result
Exemplo n.º 6
0
    def addBenchmark(self, transCollection):
        """
    Adds transaction collection for a benchmark to this repository

    :param transCollection: Transaction collection for a benchmark profile

    """
        if self._currentCollection and self._currentCollection is transCollection:
            from xpedite.types import InvariantViloation
            raise InvariantViloation(
                'attempting to add current transaction collection {} as benchmark'
                .format(transCollection))
        transCollection.repo = self
        self._benchmarkCollections.update(
            {transCollection.name: transCollection})
Exemplo n.º 7
0
def buildInitCell(nb, numOfCategories, d3Flots, appName, runId):
  """
  Method to build the init cell which contains the intro,
   serialized transactions object and metadata for generating reports

  """
  from xpedite.jupyter.templates import loadInitCell
  initCode = loadInitCell()
  try:
    envLink = buildReportLink('envReport', Action.Load)
    initCode = initCode.format(
      envLink=envLink, appName=appName, categoryCount=numOfCategories + 1, runId=runId
    )
  except TypeError:
    typeErr = 'Number of placeholders in init code string do not match the number of args supplied'
    LOGGER.exception(typeErr)
    raise InvariantViloation(typeErr)

  nb['cells'] = [nbf.new_code_cell(source=initCode, metadata={'init_cell': True, 'isInit': '0xFFFFFFFFA5A55A5DUL',\
  'hide_input': True, 'editable': False, 'deletable': False,\
  'd3Flots': d3Flots})] + nb['cells']
Exemplo n.º 8
0
def buildReportCells(nb, result, dataFilePath, profiles):
    """
  Method to build the report cells. Populates the
   metadata to be stored in init cell and preloads
   source code for creating flots and html links
   Returns the total num of categories in a run.

  """
    from xpedite.jupyter.snippetsBuilder import buildSnippets
    from xpedite.jupyter.xpediteData import XpediteDataFactory
    from xpedite.jupyter.templates import loadCategoryMarkup

    nb['cells'] = []
    d3Flots = []
    flotCode = loadCategoryMarkup()
    reportCount = 0

    xpdf = XpediteDataFactory(dataFilePath)
    xpdf.appendRecord('envReport', 'environment report',
                      result.envReport.zContent)
    xpdProfiles = copy.deepcopy(profiles)
    xpdProfiles.transactionRepo = None
    xpdf.appendRecord('profiles', 'xpedite profiles', xpdProfiles)

    # create and compress snippets
    snippetData = buildSnippets(xpdProfiles)
    zSnippetData = zlib.compress(snippetData)
    zSnippetData = base64.b64encode(zSnippetData)
    xpdf.appendRecord('snippets', 'snippets', zSnippetData)

    cellNum = None
    for cellNum, cell in enumerate(result.reportCells):
        linksCode = ''
        d3Flot = buildD3Flot(cell)

        # populate create html links for reports
        reportNum = None
        for reportNum, report in enumerate(cell.htmlReport):
            reportCount += 1
            xpdKey = 'report{}'.format(reportCount)
            linksCode += '<li><a href={} target="_blank">{}</a></li>'.format(
                buildReportLink(xpdKey, Action.Load), report.name)
            xpdf.appendRecord(xpdKey, 'htmlReport', report.zContent)

        # populate init cell metadata
        d3Flots.append(d3Flot.toDict())

        # fill notebook cells with flot + report links code
        try:
            cellCode = flotCode.format(name=cell.flot.title,
                                       description=cell.flot.description,
                                       cellNum=cellNum,
                                       reportNum=reportNum + 1,
                                       linksCode=linksCode)
        except TypeError:
            typeErr = 'Number of placeholders in cell code string do not match the number of args supplied'
            LOGGER.exception(typeErr)
            raise InvariantViloation(typeErr)

        nb['cells'].append(
            nbf.new_code_cell(source=cellCode,
                              metadata={
                                  'init_cell': True,
                                  'hide_input': True,
                                  'editable': False,
                                  'deletable': True
                              }))

    xpdf.commit()
    return cellNum, d3Flots
Exemplo n.º 9
0
def buildTimelineStats(category, route, probes, txnSubCollection): # pylint: disable=too-many-locals
  """
  Builds timeline statistics from a subcollection of transactions

  :param probes: List of probes enabled for a profiling session
  :param txnSubCollection: A subcollection of transactions

  """
  from xpedite.types import InvariantViloation
  begin = time.time()
  cpuInfo = txnSubCollection.cpuInfo
  topdownMetrics = txnSubCollection.topdownMetrics
  timelineCollection = []
  topdownKeys = topdownMetrics.topdownKeys() if topdownMetrics else []
  deltaSeriesRepo = DeltaSeriesRepo(txnSubCollection.events, topdownKeys, probes)
  pmcNames = deltaSeriesRepo.pmcNames
  eventsMap = deltaSeriesRepo.buildEventsMap()
  timelineStats = TimelineStats(
    txnSubCollection.name, cpuInfo, category, route,
    probes, timelineCollection, deltaSeriesRepo
  )
  tscDeltaSeriesCollection = deltaSeriesRepo.getTscDeltaSeriesCollection()

  pmcCount = len(txnSubCollection.events) if txnSubCollection.events else 0
  inceptionTsc = None
  defaultIndices = range(len(route))

  totalTxnCount = len(txnSubCollection)
  for txnCount, txn in enumerate(txnSubCollection):
    timeline = Timeline(txn)
    indices = conflateRoutes(txn.route, route) if len(txn) > len(route) else defaultIndices
    firstCounter = prevCounter = None
    maxTsc = 0
    i = -1
    endpoint = TimePoint('end', 0, deltaPmcs=([0]* pmcCount if pmcCount > 0 else None))
    for j in indices:
      i += 1
      probe = probes[i]
      counter = txn[j]
      if not compareProbes(probe, counter.probe):
        raise InvariantViloation('category [{}] has mismatch of probes '
          '"{}" vs "{}" in \n\ttransaction {}]\n\troute {}'.format(
            category, probe, counter.probe, txn.txnId, probes
          )
        )

      if counter:
        tsc = counter.tsc
        maxTsc = max(maxTsc, tsc)
        if not firstCounter:
          firstCounter = prevCounter = counter
        elif tsc:
          duration = cpuInfo.convertCyclesToTime(tsc - prevCounter.tsc)
          point = cpuInfo.convertCyclesToTime(prevCounter.tsc - firstCounter.tsc)
          timePoint = TimePoint(probes[i-1].name, point, duration, data=prevCounter.data)

          if len(counter.pmcs) < pmcCount:
            raise InvariantViloation(
              'category [{}] has transaction {} with counter {} '
              'missing pmc samples {}/{}'.format(
                category, txn.txnId, counter, len(counter.pmcs), pmcCount
              )
            )
          if pmcCount != 0:
            timePoint.pmcNames = pmcNames
            timePoint.deltaPmcs = []
            for k in range(pmcCount):
              deltaPmc = counter.pmcs[k] - prevCounter.pmcs[k] if counter.threadId == prevCounter.threadId  else NAN
              endpoint.deltaPmcs[k] += (deltaPmc if counter.threadId == prevCounter.threadId else 0)
              timePoint.deltaPmcs.append(deltaPmc)
              deltaSeriesRepo[pmcNames[k]][i-1].addDelta(deltaPmc)
            if topdownMetrics:
              counterMap = CounterMap(eventsMap, timePoint.deltaPmcs)
              timePoint.topdownValues = topdownMetrics.compute(counterMap)
              for td in timePoint.topdownValues:
                deltaSeriesRepo[td.name][i-1].addDelta(td.value)
          timeline.addTimePoint(timePoint)
          tscDeltaSeriesCollection[i-1].addDelta(duration)
          prevCounter = counter
        else:
          raise InvariantViloation(
            'category [{}] has transaction {} with missing tsc for probe {}/counter {}'.format(
              category, txn.txnId, probe, counter
            )
          )
      else:
        raise InvariantViloation(
          'category [{}] has transaction {} with probe {} missing counter data'.format(
            category, probe, txn.txnId
          )
        )

    if prevCounter:
      point = cpuInfo.convertCyclesToTime(prevCounter.tsc - firstCounter.tsc)
      timeline.addTimePoint(TimePoint(probes[-1].name, point, 0, data=prevCounter.data))

    endpoint.duration = cpuInfo.convertCyclesToTime(maxTsc - firstCounter.tsc)
    if pmcCount != 0:
      endpoint.pmcNames = pmcNames
      for k, deltaPmc in enumerate(endpoint.deltaPmcs):
        deltaSeriesRepo[pmcNames[k]][-1].addDelta(deltaPmc)
      if topdownMetrics:
        counterMap = CounterMap(eventsMap, endpoint.deltaPmcs)
        endpoint.topdownValues = topdownMetrics.compute(counterMap)
        for td in endpoint.topdownValues:
          deltaSeriesRepo[td.name][-1].addDelta(td.value)
    timeline.endpoint = endpoint

    timelineCollection.append(timeline)
    tscDeltaSeriesCollection[-1].addDelta(endpoint.duration)

    elapsed = time.time() - begin
    if elapsed >= 5:
      LOGGER.completed(
        '\n\tprocessed %d out of %d transactions | %0.2f%% complete |',
        txnCount, totalTxnCount, float(100 * float(txnCount)/float(totalTxnCount))
      )
      begin = time.time()

    if not inceptionTsc:
      inceptionTsc = firstCounter.tsc
      timeline.inception = 0
    else:
      timeline.inception = int(cpuInfo.convertCyclesToTime(firstCounter.tsc - inceptionTsc) / 1000)

  return timelineStats
Exemplo n.º 10
0
    def generateHistograms(self, repo, classifier, runId):
        """
    Generates latency distribuion histograms for each category/route combination

    :param repo: Repository of transaction collection
    :type repo: xpedite.txn.repo.TxnRepo
    :param classifier: Classifier to categorize transactions into various types
    :param runId: Epoch time stamp to uniquely identify a profiling session

    """
        histograms = {}
        txnCollections = [repo.getCurrent()] + list(
            repo.getBenchmarks().values())
        if not txnCollections[0].isCurrent(
        ) or txnCollections[0].name != CURRENT_RUN:
            from xpedite.types import InvariantViloation
            raise InvariantViloation(
                'expecing transactions for current run at index 0 in the repository. '
                'instead found {}'.format(txnCollections[0].name))

        elapsedTimeBundles = self.analytics.buildElapsedTimeBundles(
            txnCollections, classifier)

        for category, elaspsedTimeBundle in elapsedTimeBundles.items():
            buckets = buildBuckets(elaspsedTimeBundle[0], 35)
            if not buckets:
                LOGGER.debug(
                    'category %s has not enough data points to generate histogram',
                    category)
                continue

            LOGGER.debug('Buckets:\n%s', buckets)

            yaxis = []
            conflatedCounts = []
            LOGGER.debug('Bucket values:')
            for i, elapsedTimeList in enumerate(elaspsedTimeBundle):
                bucketValues, conflatedCountersCount = timeAction(
                    'building counter distribution',
                    lambda bkts=buckets, etl=elapsedTimeList:
                    buildDistribution(bkts, etl))
                conflatedCounts.append(conflatedCountersCount)
                LOGGER.debug('%s', bucketValues)
                title = txnCollections[i].name
                legend = formatLegend(title, min(elapsedTimeList),
                                      max(elapsedTimeList),
                                      numpy.mean(elapsedTimeList),
                                      numpy.median(elapsedTimeList),
                                      numpy.percentile(elapsedTimeList, 95),
                                      numpy.percentile(elapsedTimeList, 99))
                yaxis.append((legend, bucketValues))

            benchmarkConflatedCounts = sum(conflatedCounts, 1)
            if conflatedCounts[0] + benchmarkConflatedCounts > 0:
                LOGGER.debug(
                    'conflation - due to narrow bucket range [%s to %s] - (%d) in current run and (%d) in all '
                    'bencmark counter values are conflated', buckets[0],
                    buckets[len(buckets) - 1], conflatedCounts[0],
                    benchmarkConflatedCounts)

            buckets = formatBuckets(buckets)
            options, data = buildHistograms(buckets, yaxis, False)
            title = '{} - latency distribution benchmark'.format(category)
            description = 'Latency distribution (current run ID #{} vs chosen benchmarks)'.format(
                runId)
            histograms.update(
                {category: Histogram(title, description, data, options)})
        return histograms