Esempio n. 1
0
  def testInferHeuristicRules(self):
    nheap = native_heap.NativeHeap()
    mock_addr = 0
    for (mock_alloc_size, mock_source_path) in _HEURISTIC_TEST_STACK_TRACES:
      mock_strace = stacktrace.Stacktrace()
      mock_addr += 4  # Addr is irrelevant, just keep it distinct.
      mock_frame = stacktrace.Frame(mock_addr)
      mock_frame.SetSymbolInfo(symbol.Symbol(str(mock_addr), mock_source_path))
      for _ in xrange(10):  # Just repeat the same stack frame 10 times
        mock_strace.Add(mock_frame)
      nheap.Add(native_heap.Allocation(
          size=mock_alloc_size, count=1, stack_trace=mock_strace))

    rule_tree = native_heap_classifier.InferHeuristicRulesFromHeap(
        nheap, threshold=0.05)
    res = native_heap_classifier.Classify(nheap, rule_tree)
    self._CheckResult(res.total, '', _HEURISTIC_EXPECTED_RESULTS)
Esempio n. 2
0
def _CreateProfile(args, req_vars):  # pylint: disable=W0613
    """Creates (and caches) a profile from a set of dumps.

  The profiling data can be retrieved afterwards using the /profile/{PROFILE_ID}
  endpoints (below).
  """
    classifier = None  # A classifier module (/classification/*_classifier.py).
    dumps = {}  # dump-time -> obj. to classify (e.g., |memory_map.Map|).
    for arg in 'type', 'source', 'ruleset':
        assert (arg in req_vars), 'Expecting %s argument in POST data' % arg

    # Step 1: collect the memory dumps, according to what the client specified in
    # the 'type' and 'source' POST arguments.

    # Case 1a: The client requests to load data from an archive.
    if req_vars['source'] == 'archive':
        archive = _persistent_storage.OpenArchive(req_vars['archive'])
        if not archive:
            return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars[
                'archive']
        first_timestamp = None
        for timestamp_str in req_vars['snapshots']:
            timestamp = file_storage.Archive.StrToTimestamp(timestamp_str)
            first_timestamp = first_timestamp or timestamp
            time_delta = int((timestamp - first_timestamp).total_seconds())
            if req_vars['type'] == 'mmap':
                dumps[time_delta] = archive.LoadMemMaps(timestamp)
            elif req_vars['type'] == 'nheap':
                dumps[time_delta] = archive.LoadNativeHeap(timestamp)

    # Case 1b: Use a dump recently cached (only mmap, via _DumpMmapsForProcess).
    elif req_vars['source'] == 'cache':
        assert (req_vars['type'] == 'mmap'
                ), 'Only cached mmap dumps are supported.'
        dumps[0] = _GetCacheObject(req_vars['id'])

    if not dumps:
        return _HTTP_GONE, [], 'No memory dumps could be retrieved'

    # Initialize the classifier (mmap or nheap) and prepare symbols for nheap.
    if req_vars['type'] == 'mmap':
        classifier = mmap_classifier
    elif req_vars['type'] == 'nheap':
        classifier = native_heap_classifier
        if not archive.HasSymbols():
            return _HTTP_GONE, [], 'No symbols in archive %s' % req_vars[
                'archive']
        symbols = archive.LoadSymbols()
        for nheap in dumps.itervalues():
            nheap.SymbolizeUsingSymbolDB(symbols)

    if not classifier:
        return _HTTP_GONE, [], 'Classifier %s not supported.' % req_vars['type']

    # Step 2: Load the rule-set specified by the client in the 'ruleset' POST arg.
    if req_vars['ruleset'] == 'heuristic':
        assert (req_vars['type'] == 'nheap'), (
            'heuristic rules are supported only for nheap')
        rules = native_heap_classifier.InferHeuristicRulesFromHeap(dumps[0])
    else:
        rules_path = os.path.join(constants.CLASSIFICATION_RULES_PATH,
                                  req_vars['ruleset'])
        if not os.path.isfile(rules_path):
            return _HTTP_GONE, [], 'Cannot find the rule-set %s' % rules_path
        with open(rules_path) as f:
            rules = classifier.LoadRules(f.read())

    # Step 3: Aggregate the dump data using the classifier and generate the
    # profile data (which will be kept cached here in the server).
    # The resulting profile will consist of 1+ snapshots (depending on the number
    # dumps the client has requested to process) and a number of 1+ metrics
    # (depending on the buckets' keys returned by the classifier).

    # Converts the {time: dump_obj} dict into a {time: |AggregatedResult|} dict.
    # using the classifier.
    snapshots = collections.OrderedDict(
        (time, classifier.Classify(dump, rules))
        for time, dump in sorted(dumps.iteritems()))

    # Add the profile to the cache (and eventually discard old items).
    # |profile_id| is the key that the client will use in subsequent requests
    # (to the /ajax/profile/{ID}/ endpoints) to refer to this particular profile.
    profile_id = _CacheObject(snapshots)

    first_snapshot = next(snapshots.itervalues())
    return _HTTP_OK, [], {
        'id': profile_id,
        'times': snapshots.keys(),
        'metrics': first_snapshot.keys,
        'rootBucket': first_snapshot.total.name + '/'
    }