예제 #1
0
def SaveDeltaSizeInfo(delta_size_info, path, file_obj=None):
    """Saves |delta_size_info| to |path|."""

    changed_symbols = delta_size_info.raw_symbols \
        .WhereDiffStatusIs(models.DIFF_STATUS_UNCHANGED).Inverted()
    before_symbols = models.SymbolGroup(
        [sym.before_symbol for sym in changed_symbols if sym.before_symbol])
    after_symbols = models.SymbolGroup(
        [sym.after_symbol for sym in changed_symbols if sym.after_symbol])

    # TODO(huangs): Use io.BytesIO for Python 3.
    before_size_file = cStringIO.StringIO()
    after_size_file = cStringIO.StringIO()

    after_promise = parallel.CallOnThread(SaveSizeInfo,
                                          delta_size_info.after,
                                          '',
                                          file_obj=after_size_file,
                                          include_padding=True,
                                          sparse_symbols=after_symbols)
    SaveSizeInfo(delta_size_info.before,
                 '',
                 file_obj=before_size_file,
                 include_padding=True,
                 sparse_symbols=before_symbols)

    with file_obj or open(path, 'wb') as output_file:
        w = _Writer(output_file)

        # |_SIZEDIFF_HEADER| is multi-line with new line at end, so use
        # WriteString() instead of WriteLine().
        w.WriteString(_SIZEDIFF_HEADER)

        # JSON metadata
        headers = {
            'version': 1,
            'before_length': before_size_file.tell(),
        }
        metadata_str = json.dumps(headers,
                                  output_file,
                                  indent=2,
                                  sort_keys=True)
        # TODO(huangs): Remove .replace() after transitioning to Python 3.
        # Strip space at end of each line, injected by Python 2 json.dumps().
        metadata_str = metadata_str.replace(' \n', '\n')
        w.WriteLine(str(len(metadata_str)))
        w.WriteLine(metadata_str)

        before_size_file.seek(0)
        shutil.copyfileobj(before_size_file, output_file)

        after_promise.get()
        after_size_file.seek(0)
        shutil.copyfileobj(after_size_file, output_file)
예제 #2
0
def SaveDeltaSizeInfo(delta_size_info, path, file_obj=None):
  """Saves |delta_size_info| to |path|."""

  changed_symbols = delta_size_info.raw_symbols \
      .WhereDiffStatusIs(models.DIFF_STATUS_UNCHANGED).Inverted()
  before_symbols = models.SymbolGroup(
      [sym.before_symbol for sym in changed_symbols if sym.before_symbol])
  after_symbols = models.SymbolGroup(
      [sym.after_symbol for sym in changed_symbols if sym.after_symbol])

  before_size_file = io.BytesIO()
  after_size_file = io.BytesIO()

  after_promise = parallel.CallOnThread(
      SaveSizeInfo,
      delta_size_info.after,
      '',
      file_obj=after_size_file,
      include_padding=True,
      sparse_symbols=after_symbols)
  SaveSizeInfo(
      delta_size_info.before,
      '',
      file_obj=before_size_file,
      include_padding=True,
      sparse_symbols=before_symbols)

  with file_obj or open(path, 'wb') as output_file:
    w = _Writer(output_file)

    # |_SIZEDIFF_HEADER| is multi-line with new line at end, so use
    # WriteString() instead of WriteLine().
    w.WriteString(_SIZEDIFF_HEADER)

    # JSON header fields
    fields = {
        'version': 1,
        'before_length': before_size_file.tell(),
    }
    fields_str = json.dumps(fields, indent=2, sort_keys=True)
    w.WriteLine(str(len(fields_str)))
    w.WriteLine(fields_str)

    before_size_file.seek(0)
    shutil.copyfileobj(before_size_file, output_file)

    after_promise.get()
    after_size_file.seek(0)
    shutil.copyfileobj(after_size_file, output_file)
예제 #3
0
def SaveDeltaSizeInfo(delta_size_info, path, file_obj=None):
    """Saves |delta_size_info| to |path|."""

    if not file_obj:
        with open(path, 'wb') as f:
            return SaveDeltaSizeInfo(delta_size_info, path, f)

    changed_symbols = delta_size_info.raw_symbols \
        .WhereDiffStatusIs(models.DIFF_STATUS_UNCHANGED).Inverted()
    before_symbols = models.SymbolGroup(
        [sym.before_symbol for sym in changed_symbols if sym.before_symbol])
    after_symbols = models.SymbolGroup(
        [sym.after_symbol for sym in changed_symbols if sym.after_symbol])

    before_size_file = io.BytesIO()
    after_size_file = io.BytesIO()

    after_promise = parallel.CallOnThread(SaveSizeInfo,
                                          delta_size_info.after,
                                          '',
                                          file_obj=after_size_file,
                                          include_padding=True,
                                          sparse_symbols=after_symbols)
    SaveSizeInfo(delta_size_info.before,
                 '',
                 file_obj=before_size_file,
                 include_padding=True,
                 sparse_symbols=before_symbols)

    w = _Writer(file_obj)
    w.WriteBytes(_COMMON_HEADER + _SIZEDIFF_HEADER)
    # JSON header fields
    fields = {
        'version': _SIZEDIFF_VERSION,
        'before_length': before_size_file.tell(),
    }
    fields_str = json.dumps(fields, indent=2, sort_keys=True)

    w.WriteLine(str(len(fields_str)))
    w.WriteLine(fields_str)

    w.WriteBytes(before_size_file.getvalue())
    after_promise.get()
    w.WriteBytes(after_size_file.getvalue())

    return None
예제 #4
0
def SaveDeltaSizeInfo(delta_size_info, path, file_obj=None):
    """Saves |delta_size_info| to |path|."""

    changed_symbols = delta_size_info.raw_symbols \
        .WhereDiffStatusIs(models.DIFF_STATUS_UNCHANGED).Inverted()
    before_symbols = models.SymbolGroup(
        [sym.before_symbol for sym in changed_symbols if sym.before_symbol])
    after_symbols = models.SymbolGroup(
        [sym.after_symbol for sym in changed_symbols if sym.after_symbol])

    before_size_file = cStringIO.StringIO()
    after_size_file = cStringIO.StringIO()

    after_promise = concurrent.CallOnThread(SaveSizeInfo,
                                            delta_size_info.after,
                                            '',
                                            file_obj=after_size_file,
                                            include_padding=True,
                                            sparse_symbols=after_symbols)
    SaveSizeInfo(delta_size_info.before,
                 '',
                 file_obj=before_size_file,
                 include_padding=True,
                 sparse_symbols=before_symbols)

    with file_obj or open(path, 'wb') as output_file:
        output_file.write(_SIZEDIFF_HEADER)
        # JSON metadata
        headers = {
            'version': 1,
            'before_length': before_size_file.tell(),
        }
        metadata_str = json.dumps(headers,
                                  output_file,
                                  indent=2,
                                  sort_keys=True)
        output_file.write('%d\n' % len(metadata_str))
        output_file.write(metadata_str)
        output_file.write('\n')

        before_size_file.seek(0)
        shutil.copyfileobj(before_size_file, output_file)

        after_promise.get()
        after_size_file.seek(0)
        shutil.copyfileobj(after_size_file, output_file)
예제 #5
0
def Analyze(path, lazy_paths=None):
    """Returns a SizeInfo for the given |path|.

  Args:
    path: Can be a .size file, or a .map(.gz). If the latter, then lazy_paths
        must be provided as well.
  """
    if path.endswith('.size'):
        logging.debug('Loading results from: %s', path)
        size_info = file_format.LoadSizeInfo(path)
        # Recompute derived values (padding and function names).
        logging.info('Calculating padding')
        _RemoveDuplicatesAndCalculatePadding(size_info.symbols)
        logging.info('Deriving signatures')
        # Re-parse out function parameters.
        _NormalizeNames(size_info.symbols)
        return size_info
    elif not path.endswith('.map') and not path.endswith('.map.gz'):
        raise Exception('Expected input to be a .map or a .size')
    else:
        # output_directory needed for source file information.
        lazy_paths.VerifyOutputDirectory()
        # tool_prefix needed for c++filt.
        lazy_paths.VerifyToolPrefix()

        with _OpenMaybeGz(path) as map_file:
            section_sizes, symbols = linker_map_parser.MapFileParser().Parse(
                map_file)
        size_info = models.SizeInfo(section_sizes, models.SymbolGroup(symbols))

        # Map file for some reason doesn't unmangle all names.
        logging.info('Calculating padding')
        _RemoveDuplicatesAndCalculatePadding(size_info.symbols)
        # Unmangle prints its own log statement.
        _UnmangleRemainingSymbols(size_info.symbols, lazy_paths.tool_prefix)
        logging.info('Extracting source paths from .ninja files')
        all_found = _ExtractSourcePaths(size_info.symbols,
                                        lazy_paths.output_directory)
        assert all_found, (
            'One or more source file paths could not be found. Likely caused by '
            '.ninja files being generated at a different time than the .map file.'
        )
        # Resolve paths prints its own log statement.
        logging.info('Normalizing names')
        _NormalizeNames(size_info.symbols)
        logging.info('Normalizing paths')
        _NormalizeObjectPaths(size_info.symbols)

    if logging.getLogger().isEnabledFor(logging.INFO):
        for line in describe.DescribeSizeInfoCoverage(size_info):
            logging.info(line)
    logging.info('Finished analyzing %d symbols', len(size_info.symbols))
    return size_info
예제 #6
0
def _RemoveDuplicatesAndCalculatePadding(symbol_group):
    """Removes symbols at the same address and calculates the |padding| field.

  Symbols must already be sorted by |address|.
  """
    to_remove = []
    seen_sections = []
    for i, symbol in enumerate(symbol_group[1:]):
        prev_symbol = symbol_group[i]
        if prev_symbol.section_name != symbol.section_name:
            assert symbol.section_name not in seen_sections, (
                'Input symbols must be sorted by section, then address.')
            seen_sections.append(symbol.section_name)
            continue
        if symbol.address <= 0 or prev_symbol.address <= 0:
            continue
        # Fold symbols that are at the same address (happens in nm output).
        prev_is_padding_only = prev_symbol.size_without_padding == 0
        if symbol.address == prev_symbol.address and not prev_is_padding_only:
            symbol.size = max(prev_symbol.size, symbol.size)
            to_remove.add(symbol)
            continue
        # Even with symbols at the same address removed, overlaps can still
        # happen. In this case, padding will be negative (and this is fine).
        padding = symbol.address - prev_symbol.end_address
        # These thresholds were found by manually auditing arm32 Chrome.
        # E.g.: Set them to 0 and see what warnings get logged.
        # TODO(agrieve): See if these thresholds make sense for architectures
        #     other than arm32.
        if not symbol.name.startswith('*') and (
                symbol.section in 'rd' and padding >= 256
                or symbol.section in 't' and padding >= 64):
            # For nm data, this is caused by data that has no associated symbol.
            # The linker map file lists them with no name, but with a file.
            # Example:
            #   .data 0x02d42764 0x120 .../V8SharedWorkerGlobalScope.o
            # Where as most look like:
            #   .data.MANGLED_NAME...
            logging.debug('Large padding of %d between:\n  A) %r\n  B) %r' %
                          (padding, prev_symbol, symbol))
            continue
        symbol.padding = padding
        symbol.size += padding
        assert symbol.size >= 0, (
            'Symbol has negative size (likely not sorted propertly): '
            '%r\nprev symbol: %r' % (symbol, prev_symbol))
    # Map files have no overlaps, so worth special-casing the no-op case.
    if to_remove:
        logging.info('Removing %d overlapping symbols', len(to_remove))
        symbol_group -= models.SymbolGroup(to_remove)
예제 #7
0
    def Finalize(self, remaining):
        self.groups.sort(key=lambda s: (s.name.startswith('Other'), -s.pss))
        if remaining:
            stars = remaining.Filter(lambda s: s.name.startswith('*'))
            if stars:
                remaining = stars.Inverted()
                stars = stars.Sorted()
                stars.SetName('** Merged Symbols')
                self.groups.append(stars)
            remaining.SetName('Other')
            self.groups.append(remaining)

        logging.debug('Finalized')
        return models.SymbolGroup(self.groups, is_sorted=True)
예제 #8
0
  def Finalize(self, remaining):
    self.groups.sort(key=lambda s:(s.name.startswith('Other'), -abs(s.pss)))
    if remaining:
      stars = remaining.Filter(lambda s: s.name.startswith('*'))
      if stars:
        remaining = stars.Inverted()
        stars = stars.Sorted()
        stars.SetName('** Merged Symbols')
        self.groups.append(stars)

      others_by_path = remaining.GroupedByPath(depth=1).Sorted()
      for subgroup in others_by_path:
        subgroup.SetName('Other //' + subgroup.name)
      self.groups.extend(others_by_path)

    logging.debug('Finalized')
    return models.SymbolGroup(self.groups)
예제 #9
0
def Analyze(path, output_directory=None, tool_prefix=''):
    if path.endswith('.size'):
        logging.debug('Loading results from: %s', path)
        size_info = file_format.LoadSizeInfo(path)
        # Recompute derived values (padding and function names).
        logging.info('Calculating padding')
        _RemoveDuplicatesAndCalculatePadding(size_info.symbols)
        logging.info('Deriving signatures')
        # Re-parse out function parameters.
        _NormalizeNames(size_info.symbols)
        return size_info
    elif not path.endswith('.map') and not path.endswith('.map.gz'):
        raise Exception('Expected input to be a .map or a .size')
    else:
        # Verify tool_prefix early.
        output_directory, tool_prefix = (_DetectToolPrefix(
            tool_prefix, path, output_directory))

        with _OpenMaybeGz(path) as map_file:
            section_sizes, symbols = linker_map_parser.MapFileParser().Parse(
                map_file)
        timestamp = datetime.datetime.utcfromtimestamp(os.path.getmtime(path))
        size_info = models.SizeInfo(section_sizes,
                                    models.SymbolGroup(symbols),
                                    timestamp=timestamp)

        # Map file for some reason doesn't unmangle all names.
        logging.info('Calculating padding')
        _RemoveDuplicatesAndCalculatePadding(size_info.symbols)
        # Unmangle prints its own log statement.
        _UnmangleRemainingSymbols(size_info.symbols, tool_prefix)
        logging.info('Extracting source paths from .ninja files')
        _ExtractSourcePaths(size_info.symbols, output_directory)
        # Resolve paths prints its own log statement.
        logging.info('Normalizing names')
        _NormalizeNames(size_info.symbols)
        logging.info('Normalizing paths')
        _NormalizeObjectPaths(size_info.symbols)

    if logging.getLogger().isEnabledFor(logging.INFO):
        for line in describe.DescribeSizeInfoCoverage(size_info):
            logging.info(line)
    logging.info('Finished analyzing %d symbols', len(size_info.symbols))
    return size_info
예제 #10
0
def _ExpandSparseSymbols(sparse_symbols):
    """Expands a symbol list with all aliases of all symbols in the list.

  Args:
    sparse_symbols: A list or SymbolGroup to expand.
  """
    representative_symbols = set()
    raw_symbols = []
    logging.debug('Expanding sparse_symbols with aliases of included symbols')
    for sym in sparse_symbols:
        if sym.aliases:
            num_syms = len(representative_symbols)
            representative_symbols.add(sym.aliases[0])
            if num_syms < len(representative_symbols):
                raw_symbols.extend(sym.aliases)
        else:
            raw_symbols.append(sym)
    logging.debug('Done expanding sparse_symbols')
    return models.SymbolGroup(raw_symbols)
예제 #11
0
def _ExpandSparseSymbols(sparse_symbols):
  """Expands a symbol list with all aliases of all symbols in the list.

  Args:
    sparse_symbols: A list or SymbolGroup to expand.
  """
  representative_symbols = set()
  raw_symbols = set()
  logging.debug('Expanding sparse_symbols with aliases of included symbols')
  for sym in sparse_symbols:
    if sym.aliases:
      representative_symbols.add(sym.aliases[0])
    else:
      raw_symbols.add(sym)
  for sym in representative_symbols:
    raw_symbols.update(set(sym.aliases))
  raw_symbols = list(raw_symbols)
  SortSymbols(raw_symbols)
  logging.debug('Done expanding sparse_symbols')
  return models.SymbolGroup(raw_symbols)
예제 #12
0
def _LoadSizeInfoFromFile(file_obj):
  """Loads a size_info from the given file."""
  lines = iter(file_obj)
  next(lines)  # Comment line.
  actual_version = next(lines)[:-1]
  assert actual_version == _SERIALIZATION_VERSION, (
      'Version mismatch. Need to write some upgrade code.')
  json_len = int(next(lines))
  json_str = file_obj.read(json_len)
  headers = json.loads(json_str)
  section_sizes = headers['section_sizes']
  metadata = headers.get('metadata')
  lines = iter(file_obj)
  next(lines)  # newline after closing } of json.

  num_path_tuples = int(next(lines))
  path_tuples = [None] * num_path_tuples
  for i in xrange(num_path_tuples):
    path_tuples[i] = next(lines)[:-1].split('\t')

  section_names = next(lines)[:-1].split('\t')
  section_counts = [int(c) for c in next(lines)[:-1].split('\t')]

  def read_numeric(delta=False):
    ret = []
    delta_multiplier = int(delta)
    for _ in section_counts:
      value = 0
      fields = next(lines).split(' ')
      for i, f in enumerate(fields):
        value = value * delta_multiplier + int(f)
        fields[i] = value
      ret.append(fields)
    return ret

  addresses = read_numeric(delta=True)
  sizes = read_numeric(delta=False)
  path_indices = read_numeric(delta=True)

  symbol_list = [None] * sum(section_counts)
  symbol_idx = 0
  for section_index, cur_section_name in enumerate(section_names):
    for i in xrange(section_counts[section_index]):
      line = next(lines)[:-1]
      is_anonymous = line.endswith('\t1')
      name = line[:-2] if is_anonymous else line

      new_sym = models.Symbol.__new__(models.Symbol)
      new_sym.section_name = cur_section_name
      new_sym.address = addresses[section_index][i]
      new_sym.size = sizes[section_index][i]
      new_sym.name = name
      paths = path_tuples[path_indices[section_index][i]]
      new_sym.object_path = paths[0]
      new_sym.source_path = paths[1]
      new_sym.is_anonymous = is_anonymous
      new_sym.padding = 0  # Derived
      new_sym.full_name = None  # Derived
      symbol_list[symbol_idx] = new_sym
      symbol_idx += 1

  symbols = models.SymbolGroup(symbol_list)
  return models.SizeInfo(section_sizes, symbols, metadata=metadata)
예제 #13
0
def CreateSizeInfo(map_path,
                   elf_path,
                   tool_prefix,
                   output_directory,
                   raw_only=False):
    """Creates a SizeInfo.

  Args:
    map_path: Path to the linker .map(.gz) file to parse.
    elf_path: Path to the corresponding unstripped ELF file. Used to find symbol
        aliases and inlined functions. Can be None.
    tool_prefix: Prefix for c++filt & nm (required).
    output_directory: Build output directory. If None, source_paths and symbol
        alias information will not be recorded.
    raw_only: Fill in just the information required for creating a .size file.
  """
    source_mapper = None
    if output_directory:
        # Start by finding the elf_object_paths, so that nm can run on them while
        # the linker .map is being parsed.
        logging.info('Parsing ninja files.')
        source_mapper, elf_object_paths = ninja_parser.Parse(
            output_directory, elf_path)
        assert not elf_path or elf_object_paths, (
            'Failed to find link command in ninja files for ' +
            os.path.relpath(elf_path, output_directory))

    if elf_path:
        # Run nm on the elf file to retrieve the list of symbol names per-address.
        # This list is required because the .map file contains only a single name
        # for each address, yet multiple symbols are often coalesced when they are
        # identical. This coalescing happens mainly for small symbols and for C++
        # templates. Such symbols make up ~500kb of libchrome.so on Android.
        elf_nm_result = nm.CollectAliasesByAddressAsync(elf_path, tool_prefix)

        # Run nm on all .o/.a files to retrieve the symbol names within them.
        # The list is used to detect when mutiple .o files contain the same symbol
        # (e.g. inline functions), and to update the object_path / source_path
        # fields accordingly.
        # Looking in object files is required because the .map file choses a
        # single path for these symbols.
        # Rather than record all paths for each symbol, set the paths to be the
        # common ancestor of all paths.
        if output_directory:
            bulk_analyzer = nm.BulkObjectFileAnalyzer(tool_prefix,
                                                      output_directory)
            bulk_analyzer.AnalyzePaths(elf_object_paths)

    logging.info('Parsing Linker Map')
    with _OpenMaybeGz(map_path) as map_file:
        section_sizes, raw_symbols = (
            linker_map_parser.MapFileParser().Parse(map_file))

    if elf_path:
        logging.debug('Validating section sizes')
        elf_section_sizes = _SectionSizesFromElf(elf_path, tool_prefix)
        for k, v in elf_section_sizes.iteritems():
            if v != section_sizes.get(k):
                logging.error(
                    'ELF file and .map file do not agree on section sizes.')
                logging.error('.map file: %r', section_sizes)
                logging.error('readelf: %r', elf_section_sizes)
                sys.exit(1)

    if elf_path and output_directory:
        missed_object_paths = _DiscoverMissedObjectPaths(
            raw_symbols, elf_object_paths)
        bulk_analyzer.AnalyzePaths(missed_object_paths)
        bulk_analyzer.Close()

    if source_mapper:
        logging.info('Looking up source paths from ninja files')
        _ExtractSourcePaths(raw_symbols, source_mapper)
        assert source_mapper.unmatched_paths_count == 0, (
            'One or more source file paths could not be found. Likely caused by '
            '.ninja files being generated at a different time than the .map file.'
        )

    logging.info('Stripping linker prefixes from symbol names')
    _StripLinkerAddedSymbolPrefixes(raw_symbols)
    # Map file for some reason doesn't unmangle all names.
    # Unmangle prints its own log statement.
    _UnmangleRemainingSymbols(raw_symbols, tool_prefix)

    if elf_path:
        logging.info('Adding aliased symbols, as reported by nm')
        # This normally does not block (it's finished by this time).
        aliases_by_address = elf_nm_result.get()
        _AddSymbolAliases(raw_symbols, aliases_by_address)

        if output_directory:
            # For aliases, this provides path information where there wasn't any.
            logging.info('Computing ancestor paths for inline functions and '
                         'normalizing object paths')

            object_paths_by_name = bulk_analyzer.Get()
            logging.debug(
                'Fetched path information for %d symbols from %d files',
                len(object_paths_by_name),
                len(elf_object_paths) + len(missed_object_paths))
            _ComputeAnscestorPathsAndNormalizeObjectPaths(
                raw_symbols, object_paths_by_name, source_mapper)

    if not elf_path or not output_directory:
        logging.info('Normalizing object paths.')
        for symbol in raw_symbols:
            symbol.object_path = _NormalizeObjectPath(symbol.object_path)

    size_info = models.SizeInfo(section_sizes, models.SymbolGroup(raw_symbols))

    # Name normalization not strictly required, but makes for smaller files.
    if raw_only:
        logging.info('Normalizing symbol names')
        _NormalizeNames(size_info.symbols)
    else:
        _PostProcessSizeInfo(size_info)

    if logging.getLogger().isEnabledFor(logging.DEBUG):
        # Padding is reported in size coverage logs.
        if raw_only:
            _CalculatePadding(size_info.symbols)
        for line in describe.DescribeSizeInfoCoverage(size_info):
            logging.info(line)
    logging.info('Recorded info for %d symbols', len(size_info.symbols))
    return size_info
예제 #14
0
def _LoadSizeInfoFromFile(file_obj):
    """Loads a size_info from the given file."""
    lines = iter(file_obj)
    next(lines)  # Comment line.
    actual_version = next(lines)[:-1]
    assert actual_version == _SERIALIZATION_VERSION, (
        'Version mismatch. Need to write some upgrade code.')
    json_len = int(next(lines))
    json_str = file_obj.read(json_len)
    headers = json.loads(json_str)
    section_sizes = headers['section_sizes']
    metadata = headers.get('metadata')
    lines = iter(file_obj)
    next(lines)  # newline after closing } of json.

    num_path_tuples = int(next(lines))
    path_tuples = [None] * num_path_tuples
    for i in xrange(num_path_tuples):
        path_tuples[i] = next(lines)[:-1].split('\t')

    section_names = next(lines)[:-1].split('\t')
    section_counts = [int(c) for c in next(lines)[:-1].split('\t')]

    def read_numeric(delta=False):
        ret = []
        delta_multiplier = int(delta)
        for _ in section_counts:
            value = 0
            fields = next(lines).split(' ')
            for i, f in enumerate(fields):
                value = value * delta_multiplier + int(f)
                fields[i] = value
            ret.append(fields)
        return ret

    addresses = read_numeric(delta=True)
    sizes = read_numeric(delta=False)
    path_indices = read_numeric(delta=True)

    raw_symbols = [None] * sum(section_counts)
    symbol_idx = 0
    for section_index, cur_section_name in enumerate(section_names):
        alias_counter = 0
        for i in xrange(section_counts[section_index]):
            parts = next(lines)[:-1].split('\t')
            flags_part = None
            aliases_part = None

            if len(parts) == 3:
                aliases_part = parts[1]
                flags_part = parts[2]
            elif len(parts) == 2:
                if parts[1][0] == '0':
                    aliases_part = parts[1]
                else:
                    flags_part = parts[1]

            name = parts[0]
            flags = int(flags_part, 16) if flags_part else 0
            num_aliases = int(aliases_part, 16) if aliases_part else 0

            new_sym = models.Symbol.__new__(models.Symbol)
            new_sym.section_name = cur_section_name
            new_sym.address = addresses[section_index][i]
            new_sym.size = sizes[section_index][i]
            new_sym.name = name
            paths = path_tuples[path_indices[section_index][i]]
            new_sym.object_path = paths[0]
            new_sym.source_path = paths[1]
            new_sym.flags = flags
            new_sym.padding = 0  # Derived
            new_sym.full_name = None  # Derived

            if num_aliases:
                assert alias_counter == 0
                new_sym.aliases = [new_sym]
                alias_counter = num_aliases - 1
            elif alias_counter > 0:
                new_sym.aliases = raw_symbols[symbol_idx - 1].aliases
                new_sym.aliases.append(new_sym)
                alias_counter -= 1
            else:
                new_sym.aliases = None

            raw_symbols[symbol_idx] = new_sym
            symbol_idx += 1

    return models.SizeInfo(section_sizes,
                           models.SymbolGroup(raw_symbols),
                           metadata=metadata)
예제 #15
0
def _SaveSizeInfoToFile(size_info, file_obj):
    file_obj.write('# Created by //tools/binary_size\n')
    file_obj.write('%s\n' % _SERIALIZATION_VERSION)
    headers = {
        'metadata': size_info.metadata,
        'section_sizes': size_info.section_sizes,
    }
    metadata_str = json.dumps(headers, file_obj, indent=2, sort_keys=True)
    file_obj.write('%d\n' % len(metadata_str))
    file_obj.write(metadata_str)
    file_obj.write('\n')
    _LogSize(file_obj, 'header')  # For libchrome: 570 bytes.

    # Store a single copy of all paths and have them referenced by index.
    # Using an OrderedDict makes the indices more repetitive (better compression).
    path_tuples = collections.OrderedDict.fromkeys(
        (s.object_path, s.source_path) for s in size_info.symbols)
    for i, key in enumerate(path_tuples):
        path_tuples[key] = i
    file_obj.write('%d\n' % len(path_tuples))
    file_obj.writelines('%s\t%s\n' % pair for pair in path_tuples)
    _LogSize(file_obj, 'paths')  # For libchrome, adds 200kb.

    # Symbol counts by section.
    by_section = models.SymbolGroup(size_info.symbols)
    by_section = by_section.GroupBySectionName().SortedByName()
    file_obj.write('%s\n' % '\t'.join(g.name for g in by_section))
    file_obj.write('%s\n' % '\t'.join(str(len(g)) for g in by_section))

    def write_numeric(func, delta=False):
        for group in by_section:
            prev_value = 0
            last_sym = group[-1]
            for symbol in group:
                value = func(symbol)
                if delta:
                    value, prev_value = value - prev_value, value
                file_obj.write(str(value))
                if symbol is not last_sym:
                    file_obj.write(' ')
            file_obj.write('\n')

    write_numeric(lambda s: s.address, delta=True)
    _LogSize(file_obj, 'addresses')  # For libchrome, adds 300kb.
    # Do not write padding, it will be recalcualted from addresses on load.
    write_numeric(lambda s: s.size_without_padding)
    _LogSize(file_obj, 'sizes')  # For libchrome, adds 300kb
    write_numeric(lambda s: path_tuples[(s.object_path, s.source_path)],
                  delta=True)
    _LogSize(file_obj, 'path indices')  # For libchrome: adds 125kb.

    prev_aliases = None
    for group in by_section:
        for symbol in group:
            # Do not write name when full_name exists. It will be derived on load.
            file_obj.write(symbol.full_name or symbol.name)
            if symbol.aliases and symbol.aliases is not prev_aliases:
                file_obj.write('\t0%x' % symbol.num_aliases)
            prev_aliases = symbol.aliases
            if symbol.flags:
                file_obj.write('\t%x' % symbol.flags)
            file_obj.write('\n')
    _LogSize(file_obj, 'names (final)')  # For libchrome: adds 3.5mb.