示例#1
0
def is_open_sourced(path, skip_directory_contents_check=False):
    """Returns if the given path is open sourced."""
    if is_open_source_repo():
        return True
    if path in ['', '.']:
        return False
    global _cache_open_sourced
    if path in _cached_is_open_sourced:
        return _cached_is_open_sourced[path]
    paths_metadata_file = os.path.join(path, METADATA_FILE)
    if not skip_directory_contents_check and os.path.exists(
            paths_metadata_file):
        # Check if this is the first call and is a directory.  If so, we consider
        # the whole directory open sourced if either it is listed in the parent
        # (what is checked later) or if it has an OPEN_SOURCE file with only '*' in
        # it.
        rules = file_util.read_metadata_file(paths_metadata_file)
        if len(rules) == 1 and rules[0] == '*':
            return _cache_open_sourced(path, True)
    parent = os.path.dirname(path)
    parent_metadata_file = os.path.join(parent, METADATA_FILE)
    if os.path.exists(parent_metadata_file):
        open_source_rules = file_util.read_metadata_file(parent_metadata_file)
        return _cache_open_sourced(
            path,
            is_basename_open_sourced(os.path.basename(path),
                                     open_source_rules))
    return _cache_open_sourced(
        path, is_open_sourced(parent, skip_directory_contents_check=True))
示例#2
0
def _read_ignore_rule(path):
    """Reads the mapping of paths to lint checks to ignore from a file.

  The ignore file is expected to define a simple mapping between file paths
  and the lint rules to ignore (the <List Class>.NAME attributes). Hash
  characters ('#') can be used for comments, as well as blank lines for
  readability.

  A typical # filter in the file should look like:

    # Exclude src/xyzzy.cpp from the checks "gnusto" and "rezrov"
    "src/xyzzy.cpp": ["gnusto", "rezrov"]
  """
    if not path:
        return {}

    result = json.loads('\n'.join(file_util.read_metadata_file(path)))

    # Quick verification.
    # Make sure everything exists in the non-open source repo.  (We do
    # not run this check on the open source repo since not all files are
    # currently open sourced.)
    if not open_source.is_open_source_repo():
        unknown_path_list = [key for key in result if not os.path.exists(key)]
        assert not unknown_path_list, (
            'The key in \'%s\' contains unknown files: %s' %
            (path, unknown_path_list))
    return result
示例#3
0
def _find_sync_set(src, src_submodules_paths):
  # sync_set is a list of relative paths of directories and files that need
  # to be synchronized/copied.
  sync_set = set()
  for src_dir, subdirs, filenames in os.walk(src):
    src_dir = os.path.normpath(src_dir)
    rel_src_dir = os.path.relpath(src_dir, src)
    # Prune all submodules, we assume they will all be open sourced but not
    # by copying files, but checking out the same revision.
    subdirs[:] = [s for s in subdirs
                  if os.path.join(src_dir, s) not in src_submodules_paths]
    # Prune all subdirectories which are symbolic links. If we walk into these
    # and ask git whether or not the files inside are ignorable, git will error
    # out complaining that the path includes a symbolic link.
    subdirs[:] = [s for s in subdirs if not os.path.islink(s)]
    # Prune any subdirectory matching gitignores, like out.
    subdirs[:] = [s for s in subdirs
                  if not _is_ignorable(os.path.join(src_dir, s), True, cwd=src)]
    basenames = subdirs + filenames
    if open_source.METADATA_FILE not in filenames:
      # The default (without a new OPEN_SOURCE metdata file) open sourcing of
      # directory is the status of the open sourcing of its parent directory.
      all_included = src_dir in sync_set
      _add_directory_sync_set(src, rel_src_dir, basenames,
                              lambda x, y: all_included,
                              None, sync_set)
    else:
      new_open_source_rules = file_util.read_metadata_file(
          os.path.join(src_dir, open_source.METADATA_FILE))
      _add_directory_sync_set(src, rel_src_dir, basenames,
                              open_source.is_basename_open_sourced,
                              new_open_source_rules,
                              sync_set)

  return sync_set
示例#4
0
def _read_ignore_rule(path):
    """Reads the mapping of paths to lint checks to ignore from a file.

  The ignore file is expected to define a simple mapping between file paths
  and the lint rules to ignore (the <List Class>.NAME attributes). Hash
  characters ('#') can be used for comments, as well as blank lines for
  readability.

  A typical # filter in the file should look like:

    # Exclude src/xyzzy.cpp from the checks "gnusto" and "rezrov"
    "src/xyzzy.cpp": ["gnusto", "rezrov"]
  """
    if not path:
        return {}

    result = json.loads("\n".join(file_util.read_metadata_file(path)))

    # Quick verification.
    # Make sure everything exists in the non-open source repo.  (We do
    # not run this check on the open source repo since not all files are
    # currently open sourced.)
    if not open_source.is_open_source_repo():
        unknown_path_list = [key for key in result if not os.path.exists(key)]
        assert not unknown_path_list, "The key in '%s' contains unknown files: %s" % (path, unknown_path_list)
    return result
示例#5
0
def get_all_tests():
  """Returns the list of all unittest names."""
  test_info_files = file_util.read_metadata_file(
      build_common.get_all_unittest_info_path())
  tests = set()
  for test_info_file in test_info_files:
    # The basename of |test_info_file| is something like bionic_test.1.json.
    m = re.match(r'(.+)\.[0-9]+\.json', os.path.basename(test_info_file))
    if not m:
      continue
    tests.add(m.group(1))
  return sorted(tests)
示例#6
0
 def run(self, path):
     dirname = os.path.dirname(path)
     for line in file_util.read_metadata_file(path):
         # Checks that each line matches at least one file in the directory.
         if line.startswith("!"):
             pattern = os.path.join(dirname, line[1:])
         else:
             pattern = os.path.join(dirname, line)
         if not glob.glob(pattern):
             logging.error("'%s' in %s does not match any file in %s/." % (line, path, dirname))
             return False
     return True
示例#7
0
 def run(self, path):
     dirname = os.path.dirname(path)
     for line in file_util.read_metadata_file(path):
         # Checks that each line matches at least one file in the directory.
         if line.startswith('!'):
             pattern = os.path.join(dirname, line[1:])
         else:
             pattern = os.path.join(dirname, line)
         if not glob.glob(pattern):
             logging.error('\'%s\' in %s does not match any file in %s/.' %
                           (line, path, dirname))
             return False
     return True
示例#8
0
def is_open_sourced(path, skip_directory_contents_check=False):
    """Returns if the given path is open sourced."""
    if is_open_source_repo():
        return True
    if path in ["", "."]:
        return False
    global _cache_open_sourced
    if path in _cached_is_open_sourced:
        return _cached_is_open_sourced[path]
    paths_metadata_file = os.path.join(path, METADATA_FILE)
    if not skip_directory_contents_check and os.path.exists(paths_metadata_file):
        # Check if this is the first call and is a directory.  If so, we consider
        # the whole directory open sourced if either it is listed in the parent
        # (what is checked later) or if it has an OPEN_SOURCE file with only '*' in
        # it.
        rules = file_util.read_metadata_file(paths_metadata_file)
        if len(rules) == 1 and rules[0] == "*":
            return _cache_open_sourced(path, True)
    parent = os.path.dirname(path)
    parent_metadata_file = os.path.join(parent, METADATA_FILE)
    if os.path.exists(parent_metadata_file):
        open_source_rules = file_util.read_metadata_file(parent_metadata_file)
        return _cache_open_sourced(path, is_basename_open_sourced(os.path.basename(path), open_source_rules))
    return _cache_open_sourced(path, is_open_sourced(parent, skip_directory_contents_check=True))
示例#9
0
    def __init__(self,
                 deps_file_path,
                 unpacked_final_path,
                 url=None,
                 link_subdir=None,
                 download_method=None,
                 unpack_method=None,
                 cache_base_path=None,
                 cache_history_size=None):
        """Sets up the basic configuration for this package.

    |deps_file_path| is the relative path to the DEPS.XXXX file to use for this
    package.
    |unpacked_final_path| is the path the unpacked package should appear at.
    |url| is the URL to use to retrieve the download. If not specified (the
    typical case), the URL is taken from the first line of the DEPS file.
    |link_subdir| is the subdirectory of the unpacked package from the cache
    that should appear at the final location. This is useful if the archive
    unpacks to a subdirectory.
    |download_method| is a function to call taking a pair of arguments, (URL,
    archive_path), which should retrieve the package given its URL, and write
    the contents as a file to archive_path.
    |unpack_method| is a function to call taking a pair of arguments,
    (archive_path, destination_path), to extract the archive file to the
    indicated destination.
    |cache_base_path| allows a derived class to choose the cache path
    explicitly, but is really only meant for the unittest.
    |cache_history_size| allows a derived class to choose the cache history
    size, but it is really only meant for the unittest.
    """
        if cache_base_path:
            cache_base_path = os.path.abspath(cache_base_path)

        self._name = os.path.basename(unpacked_final_path)
        self._cache_base_path = cache_base_path or _DEFAULT_CACHE_BASE_PATH
        self._cache_history_size = cache_history_size or _DEFAULT_CACHE_HISTORY_SIZE
        self._deps_file_path = os.path.join(build_common.get_arc_root(),
                                            deps_file_path)
        self._unpacked_final_path = os.path.join(build_common.get_arc_root(),
                                                 unpacked_final_path)
        self._link_subdir = link_subdir or '.'
        self._download_method = download_method or default_download_url()
        self._unpack_method = unpack_method or unpack_zip_archive()
        self._deps_file_lines = file_util.read_metadata_file(deps_file_path)
        self._url = url or self._deps_file_lines[0]
        self._unpacked_cache_path = (self._get_cache_entry_path(
            self._deps_file_lines))
示例#10
0
  def __init__(self, deps_file_path, unpacked_final_path, url=None,
               link_subdir=None, download_method=None, unpack_method=None,
               cache_base_path=None, cache_history_size=None):
    """Sets up the basic configuration for this package.

    |deps_file_path| is the relative path to the DEPS.XXXX file to use for this
    package.
    |unpacked_final_path| is the path the unpacked package should appear at.
    |url| is the URL to use to retrieve the download. If not specified (the
    typical case), the URL is taken from the first line of the DEPS file.
    |link_subdir| is the subdirectory of the unpacked package from the cache
    that should appear at the final location. This is useful if the archive
    unpacks to a subdirectory.
    |download_method| is a function to call taking a pair of arguments, (URL,
    archive_path), which should retrieve the package given its URL, and write
    the contents as a file to archive_path.
    |unpack_method| is a function to call taking a pair of arguments,
    (archive_path, destination_path), to extract the archive file to the
    indicated destination.
    |cache_base_path| allows a derived class to choose the cache path
    explicitly, but is really only meant for the unittest.
    |cache_history_size| allows a derived class to choose the cache history
    size, but it is really only meant for the unittest.
    """
    if cache_base_path:
      cache_base_path = os.path.abspath(cache_base_path)

    self._name = os.path.basename(unpacked_final_path)
    self._cache_base_path = cache_base_path or _DEFAULT_CACHE_BASE_PATH
    self._cache_history_size = cache_history_size or _DEFAULT_CACHE_HISTORY_SIZE
    self._deps_file_path = os.path.join(
        build_common.get_arc_root(), deps_file_path)
    self._unpacked_final_path = os.path.join(
        build_common.get_arc_root(), unpacked_final_path)
    self._link_subdir = link_subdir or '.'
    self._download_method = download_method or default_download_url()
    self._unpack_method = unpack_method or unpack_zip_archive()
    self._deps_file_lines = file_util.read_metadata_file(deps_file_path)
    self._url = url or self._deps_file_lines[0]
    self._unpacked_cache_path = (
        self._get_cache_entry_path(self._deps_file_lines))
示例#11
0
  def _populate_cache_from_non_symlinked_files(self, history):
    final_url_path = os.path.join(self._unpacked_final_path, 'URL')
    # See if there is an existing URL file
    if not os.path.isfile(final_url_path):
      return

    # Read the content of the URL file in the subdirectory to figure out
    # how to move it into the cache (the DEPS hash may not match!)
    url_file_content = file_util.read_metadata_file(final_url_path)
    cache_path = self._get_cache_entry_path(url_file_content)
    cache_link = os.path.abspath(os.path.join(cache_path, self._link_subdir))

    # Ensure that this cache path is in our history as the most recent entry.
    history.ensure_recent(cache_path)

    # If there appears to be something already cached, then we do not need to do
    # anything.
    if os.path.isdir(cache_path):
      return

    # Move the existing unpacked download into the cache directory
    file_util.makedirs_safely(os.path.dirname(cache_link))
    os.rename(self._unpacked_final_path, cache_link)
示例#12
0
def _find_sync_set(src, src_submodules_paths):
    # sync_set is a list of relative paths of directories and files that need
    # to be synchronized/copied.
    sync_set = set()
    for src_dir, subdirs, filenames in os.walk(src):
        src_dir = os.path.normpath(src_dir)
        rel_src_dir = os.path.relpath(src_dir, src)
        # Prune all submodules, we assume they will all be open sourced but not
        # by copying files, but checking out the same revision.
        subdirs[:] = [
            s for s in subdirs
            if os.path.join(src_dir, s) not in src_submodules_paths
        ]
        # Prune all subdirectories which are symbolic links. If we walk into these
        # and ask git whether or not the files inside are ignorable, git will error
        # out complaining that the path includes a symbolic link.
        subdirs[:] = [s for s in subdirs if not os.path.islink(s)]
        # Prune any subdirectory matching gitignores, like out.
        subdirs[:] = [
            s for s in subdirs
            if not _is_ignorable(os.path.join(src_dir, s), True, cwd=src)
        ]
        basenames = subdirs + filenames
        if open_source.METADATA_FILE not in filenames:
            # The default (without a new OPEN_SOURCE metdata file) open sourcing of
            # directory is the status of the open sourcing of its parent directory.
            all_included = src_dir in sync_set
            _add_directory_sync_set(src, rel_src_dir, basenames,
                                    lambda x, y: all_included, None, sync_set)
        else:
            new_open_source_rules = file_util.read_metadata_file(
                os.path.join(src_dir, open_source.METADATA_FILE))
            _add_directory_sync_set(src, rel_src_dir, basenames,
                                    open_source.is_basename_open_sourced,
                                    new_open_source_rules, sync_set)

    return sync_set
示例#13
0
    def _populate_cache_from_non_symlinked_files(self, history):
        final_url_path = os.path.join(self._unpacked_final_path, 'URL')
        # See if there is an existing URL file
        if not os.path.isfile(final_url_path):
            return

        # Read the content of the URL file in the subdirectory to figure out
        # how to move it into the cache (the DEPS hash may not match!)
        url_file_content = file_util.read_metadata_file(final_url_path)
        cache_path = self._get_cache_entry_path(url_file_content)
        cache_link = os.path.abspath(
            os.path.join(cache_path, self._link_subdir))

        # Ensure that this cache path is in our history as the most recent entry.
        history.ensure_recent(cache_path)

        # If there appears to be something already cached, then we do not need to do
        # anything.
        if os.path.isdir(cache_path):
            return

        # Move the existing unpacked download into the cache directory
        file_util.makedirs_safely(os.path.dirname(cache_link))
        os.rename(self._unpacked_final_path, cache_link)