Beispiel #1
0
  def runTest(self):
    rule_tree = mmap_classifier.LoadRules(_TEST_RULES)
    mmap = memory_map.Map()
    for m in _TEST_MMAPS:
      mmap.Add(memory_map.MapEntry(
          m[0], m[1], m[2], m[3], 0, m[4], m[5], m[6], m[7]))

    res = mmap_classifier.Classify(mmap, rule_tree)

    def CheckResult(node, prefix):
      node_name = prefix + node.name
      self.assertIn(node_name, _EXPECTED_RESULTS)
      subtotal = node.values[0]
      values = node.values[1:]

      # First check that the subtotal matches clean + dirty + shared + priv.
      self.assertEqual(subtotal, values[0] + values[1] + values[2] + values[3])

      # Then check that the single values match the expectations.
      self.assertEqual(values, _EXPECTED_RESULTS[node_name])

      for child in node.children:
        CheckResult(child, node_name + '::')

    CheckResult(res.total, '')
Beispiel #2
0
def _ListProcessClassifiedMmaps(process, mmap_rule):
    """Prints process classified memory maps
  """
    maps = process.DumpMemoryMaps()
    if not os.path.exists(mmap_rule):
        print 'File', mmap_rule, 'not found'
        return
    with open(mmap_rule) as f:
        rules = mmap_classifier.LoadRules(f.read())
    classified_results_tree = mmap_classifier.Classify(maps, rules)
    print json.dumps(classified_results_tree, cls=serialization.Encoder)
Beispiel #3
0
def _CreateProfile(args, req_vars):  # pylint: disable=W0613
    """Creates (and caches) a profile from a set of dumps.

  The profiling data can be retrieved afterwards using the /profile/{PROFILE_ID}
  endpoints (below).
  """
    classifier = None  # A classifier module (/classification/*_classifier.py).
    dumps = {}  # dump-time -> obj. to classify (e.g., |memory_map.Map|).
    for arg in 'type', 'source', 'ruleset':
        assert (arg in req_vars), 'Expecting %s argument in POST data' % arg

    # Step 1: collect the memory dumps, according to what the client specified in
    # the 'type' and 'source' POST arguments.

    # Case 1: Generate a profile from a set of mmap dumps.
    if req_vars['type'] == 'mmap':
        classifier = mmap_classifier
        # Case 1a: Use a cached mmap dumps.
        if req_vars['source'] == 'cache':
            dumps[0] = _GetCacheObject(req_vars['id'])
        # Case 1b: Load mem dumps from an archive.
        elif req_vars['source'] == 'archive':
            archive = _persistent_storage.OpenArchive(req_vars['archive'])
            if not archive:
                return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars[
                    'archive']
            first_timestamp = None
            for timestamp_str in req_vars['snapshots']:
                timestamp = dateutil.parser.parse(timestamp_str)
                first_timestamp = timestamp if not first_timestamp else first_timestamp
                time_delta = int((timestamp - first_timestamp).total_seconds())
                dumps[time_delta] = archive.LoadMemMaps(timestamp)

    # TODO(primiano): Add support for native_heap types.

    # Step 2: Load the rule-set specified by the client in the 'ruleset' POST arg.
    # Also, perform some basic sanity checking.
    rules_path = os.path.join(memory_inspector.ROOT_DIR,
                              'classification_rules', req_vars['ruleset'])
    if not classifier:
        return _HTTP_GONE, [], 'Classifier %s not supported.' % req_vars['type']
    if not dumps:
        return _HTTP_GONE, [], 'No memory dumps could be retrieved'
    if not os.path.isfile(rules_path):
        return _HTTP_GONE, [], 'Cannot find the rule-set %s' % rules_path
    with open(rules_path) as f:
        rules = mmap_classifier.LoadRules(f.read())

    # Step 3: Aggregate the data using the desired classifier and generate the
    # profile dictionary (which will be kept cached here in the server).
    # The resulting profile will consist of 1+ snapshots (depending on the number
    # dumps the client has requested to process) and a number of 1+ metrics
    # (depending on the buckets' keys returned by the classifier).

    # Converts the {time: dump_obj} dict into a {time: |AggregatedResult|} dict.
    # using the classifier.
    snapshots = collections.OrderedDict(
        (time, classifier.Classify(dump, rules))
        for time, dump in sorted(dumps.iteritems()))

    # Add the profile to the cache (and eventually discard old items).
    # |profile_id| is the key that the client will use in subsequent requests
    # (to the /ajax/profile/{ID}/ endpoints) to refer to this particular profile.
    profile_id = _CacheObject(snapshots)

    first_snapshot = next(snapshots.itervalues())

    # |metrics| is the key set of any of the aggregated result
    return _HTTP_OK, [], {
        'id': profile_id,
        'times': snapshots.keys(),
        'metrics': first_snapshot.keys,
        'rootBucket': first_snapshot.total.name + '/'
    }