Пример #1
0
  def load(cls, data, isolated_basedir):  # pylint: disable=W0221
    """Special case loading to disallow different OS.

    It is not possible to load a .isolated.state files from a different OS, this
    file is saved in OS-specific format.
    """
    out = super(SavedState, cls).load(data, isolated_basedir)
    if data.get('OS') != sys.platform:
      raise isolated_format.IsolatedError('Unexpected OS %s', data.get('OS'))

    # Converts human readable form back into the proper class type.
    algo = data.get('algo')
    if not algo in isolated_format.SUPPORTED_ALGOS:
      raise isolated_format.IsolatedError('Unknown algo \'%s\'' % out.algo)
    out.algo = isolated_format.SUPPORTED_ALGOS[algo]

    # Refuse the load non-exact version, even minor difference. This is unlike
    # isolateserver.load_isolated(). This is because .isolated.state could have
    # changed significantly even in minor version difference.
    if out.version != cls.EXPECTED_VERSION:
      raise isolated_format.IsolatedError(
          'Unsupported version \'%s\'' % out.version)

    # The .isolate file must be valid. If it is not present anymore, zap the
    # value as if it was not noted, so .isolate_file can safely be overriden
    # later.
    if out.isolate_file and not fs.isfile(out.isolate_filepath):
      out.isolate_file = None
    if out.isolate_file:
      # It could be absolute on Windows if the drive containing the .isolate and
      # the drive containing the .isolated files differ, .e.g .isolate is on
      # C:\\ and .isolated is on D:\\   .
      assert not os.path.isabs(out.isolate_file) or sys.platform == 'win32'
      assert fs.isfile(out.isolate_filepath), out.isolate_filepath
    return out
Пример #2
0
  def load(cls, data, isolated_basedir):  # pylint: disable=W0221
    """Special case loading to disallow different OS.

    It is not possible to load a .isolated.state files from a different OS, this
    file is saved in OS-specific format.
    """
    out = super(SavedState, cls).load(data, isolated_basedir)
    if data.get('OS') != sys.platform:
      raise isolated_format.IsolatedError('Unexpected OS %s', data.get('OS'))

    # Converts human readable form back into the proper class type.
    algo = data.get('algo')
    if not algo in isolated_format.SUPPORTED_ALGOS:
      raise isolated_format.IsolatedError('Unknown algo \'%s\'' % out.algo)
    out.algo = isolated_format.SUPPORTED_ALGOS[algo]

    # Refuse the load non-exact version, even minor difference. This is unlike
    # isolateserver.load_isolated(). This is because .isolated.state could have
    # changed significantly even in minor version difference.
    if out.version != cls.EXPECTED_VERSION:
      raise isolated_format.IsolatedError(
          'Unsupported version \'%s\'' % out.version)

    # The .isolate file must be valid. If it is not present anymore, zap the
    # value as if it was not noted, so .isolate_file can safely be overriden
    # later.
    if out.isolate_file and not fs.isfile(out.isolate_filepath):
      out.isolate_file = None
    if out.isolate_file:
      # It could be absolute on Windows if the drive containing the .isolate and
      # the drive containing the .isolated files differ, .e.g .isolate is on
      # C:\\ and .isolated is on D:\\   .
      assert not os.path.isabs(out.isolate_file) or sys.platform == 'win32'
      assert fs.isfile(out.isolate_filepath), out.isolate_filepath
    return out
Пример #3
0
def link_file(outfile, infile, action):
  """Links a file. The type of link depends on |action|.

  Returns:
    True if the action was caried on, False if fallback was used.
  """
  if action not in (HARDLINK, HARDLINK_WITH_FALLBACK, SYMLINK, COPY):
    raise ValueError('Unknown mapping action %s' % action)
  if not fs.isfile(infile):
    raise OSError('%s is missing' % infile)
  if fs.isfile(outfile):
    raise OSError(
        '%s already exist; insize:%d; outsize:%d' %
        (outfile, fs.stat(infile).st_size, fs.stat(outfile).st_size))

  if action == COPY:
    readable_copy(outfile, infile)
  elif action == SYMLINK and sys.platform != 'win32':
    # On windows, symlink are converted to hardlink and fails over to copy.
    fs.symlink(infile, outfile)  # pylint: disable=E1101
  else:
    # HARDLINK or HARDLINK_WITH_FALLBACK.
    try:
      hardlink(infile, outfile)
    except OSError as e:
      if action == HARDLINK:
        raise OSError('Failed to hardlink %s to %s: %s' % (infile, outfile, e))
      # Probably a different file system.
      logging.warning(
          'Failed to hardlink, failing back to copy %s to %s' % (
            infile, outfile))
      readable_copy(outfile, infile)
      # Signal caller that fallback copy was used.
      return False
  return True
Пример #4
0
def link_file(outfile, infile, action):
  """Links a file. The type of link depends on |action|.

  Returns:
    True if the action was caried on, False if fallback was used.
  """
  if action not in (HARDLINK, HARDLINK_WITH_FALLBACK, SYMLINK, COPY):
    raise ValueError('Unknown mapping action %s' % action)
  if not fs.isfile(infile):
    raise OSError('%s is missing' % infile)
  if fs.isfile(outfile):
    raise OSError(
        '%s already exist; insize:%d; outsize:%d' %
        (outfile, fs.stat(infile).st_size, fs.stat(outfile).st_size))

  if action == COPY:
    readable_copy(outfile, infile)
  elif action == SYMLINK and sys.platform != 'win32':
    # On windows, symlink are converted to hardlink and fails over to copy.
    fs.symlink(infile, outfile)  # pylint: disable=E1101
  else:
    # HARDLINK or HARDLINK_WITH_FALLBACK.
    try:
      hardlink(infile, outfile)
    except OSError as e:
      if action == HARDLINK:
        raise OSError('Failed to hardlink %s to %s: %s' % (infile, outfile, e))
      # Probably a different file system.
      logging.warning(
          'Failed to hardlink, failing back to copy %s to %s' % (
            infile, outfile))
      readable_copy(outfile, infile)
      # Signal caller that fallback copy was used.
      return False
  return True
Пример #5
0
    def test_main_naked_with_cipd_client_no_packages(self):
        cipd_cache = os.path.join(self.tempdir, 'cipd_cache')
        cmd = [
            '--no-log',
            '--cache',
            os.path.join(self.tempdir, 'isolated_cache'),
            '--cipd-enabled',
            '--cipd-client-version',
            'git:wowza',
            '--cipd-server',
            self.cipd_server.url,
            '--cipd-cache',
            cipd_cache,
            '--named-cache-root',
            os.path.join(self.tempdir, 'named_cache'),
            '--raw-cmd',
            '--relative-cwd',
            'a',
            '--',
            'bin/echo${EXECUTABLE_SUFFIX}',
            'hello',
            'world',
        ]

        self.capture_popen_env = True
        ret = run_isolated.main(cmd)
        self.assertEqual(0, ret)

        # The CIPD client was bootstrapped and hardlinked (or copied on Win).
        client_binary_file = unicode(
            os.path.join(cipd_cache, 'clients',
                         'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
        self.assertTrue(fs.isfile(client_binary_file))
        client_binary_link = unicode(
            os.path.join(cipd_cache, 'bin', 'cipd' + cipd.EXECUTABLE_SUFFIX))
        self.assertTrue(fs.isfile(client_binary_link))

        # 'cipd ensure' was NOT called (only 'echo hello world' was).
        env = self.popen_calls[0][1].pop('env')
        self.assertEqual([
            ([self.ir_dir(u'a', 'bin', 'echo'), u'hello', u'world'], {
                'cwd': self.ir_dir('a'),
                'detached': True
            }),
        ], self.popen_calls)

        # Directory with cipd client is in front of PATH.
        path = env['PATH'].split(os.pathsep)
        self.assertEqual(os.path.join(cipd_cache, 'bin'), path[0])

        # CIPD_CACHE_DIR is set.
        self.assertEqual(os.path.join(cipd_cache, 'cache'),
                         env['CIPD_CACHE_DIR'])
Пример #6
0
def link_file(outfile, infile, action):
  """Links a file. The type of link depends on |action|.

  Returns:
    True if the action was carried on, False if fallback was used.
  """
  if action < 1 or action > COPY:
    raise ValueError('Unknown mapping action %s' % action)
  # TODO(maruel): Skip these checks.
  if not fs.isfile(infile):
    raise OSError('%s is missing' % infile)
  if fs.isfile(outfile):
    raise OSError(
        '%s already exist; insize:%d; outsize:%d' %
        (outfile, fs.stat(infile).st_size, fs.stat(outfile).st_size))

  if action == COPY:
    readable_copy(outfile, infile)
    return True

  if action in (SYMLINK, SYMLINK_WITH_FALLBACK):
    try:
      fs.symlink(infile, outfile)  # pylint: disable=E1101
      return True
    except OSError:
      if action == SYMLINK:
        raise
      logging.warning(
          'Failed to symlink, falling back to copy %s to %s' % (
            infile, outfile))
      # Signal caller that fallback copy was used.
      readable_copy(outfile, infile)
      return False

  # HARDLINK or HARDLINK_WITH_FALLBACK.
  try:
    hardlink(infile, outfile)
    return True
  except OSError as e:
    if action == HARDLINK:
      raise OSError('Failed to hardlink %s to %s: %s' % (infile, outfile, e))

  # Probably a different file system.
  logging.warning(
      'Failed to hardlink, falling back to copy %s to %s' % (
        infile, outfile))
  readable_copy(outfile, infile)
  # Signal caller that fallback copy was used.
  return False
Пример #7
0
    def _load(self, trim, time_fn):
        """Loads state of the cache from json file.

    If cache_dir does not exist on disk, it is created.
    """
        self._lock.assert_locked()

        if not fs.isfile(self.state_file):
            if not fs.isdir(self.cache_dir):
                fs.makedirs(self.cache_dir)
        else:
            # Load state of the cache.
            try:
                self._lru = lru.LRUDict.load(self.state_file)
            except ValueError as err:
                logging.error('Failed to load cache state: %s' % (err, ))
                # Don't want to keep broken state file.
                file_path.try_remove(self.state_file)
        if time_fn:
            self._lru.time_fn = time_fn
        if trim:
            self._trim()
        # We want the initial cache size after trimming, i.e. what is readily
        # avaiable.
        self._initial_number_items = len(self._lru)
        self._initial_size = sum(self._lru.itervalues())
        if self._evicted:
            logging.info('Trimming evicted items with the following sizes: %s',
                         sorted(self._evicted))
    def __init__(self, cache_dir, policies, time_fn=None):
        """Initializes NamedCaches.

    Arguments:
    - cache_dir is a directory for persistent cache storage.
    - policies is a CachePolicies instance.
    - time_fn is a function that returns timestamp (float) and used to take
      timestamps when new caches are requested. Used in unit tests.
    """
        super(NamedCache, self).__init__(cache_dir)
        self._policies = policies
        # LRU {cache_name -> tuple(cache_location, size)}
        self.state_file = os.path.join(cache_dir, self.STATE_FILE)
        self._lru = lru.LRUDict()
        if not fs.isdir(self.cache_dir):
            fs.makedirs(self.cache_dir)
        elif fs.isfile(self.state_file):
            try:
                self._lru = lru.LRUDict.load(self.state_file)
            except ValueError:
                logging.exception(
                    'NamedCache: failed to load named cache state file; obliterating'
                )
                file_path.rmtree(self.cache_dir)
            with self._lock:
                self._try_upgrade()
        if time_fn:
            self._lru.time_fn = time_fn
Пример #9
0
 def _save(self):
     """Saves the LRU ordering."""
     self._lock.assert_locked()
     if sys.platform != 'win32':
         d = os.path.dirname(self.state_file)
         if fs.isdir(d):
             # Necessary otherwise the file can't be created.
             file_path.set_read_only(d, False)
     if fs.isfile(self.state_file):
         file_path.set_read_only(self.state_file, False)
     self._lru.save(self.state_file)
Пример #10
0
def is_valid_file(path, size):
    """Returns if the given files appears valid.

  Currently it just checks the file exists and its size matches the expectation.
  """
    if size == UNKNOWN_FILE_SIZE:
        return fs.isfile(path)
    try:
        actual_size = fs.stat(path).st_size
    except OSError as e:
        logging.warning('Can\'t read item %s, assuming it\'s invalid: %s',
                        os.path.basename(path), e)
        return False
    if size != actual_size:
        logging.warning('Found invalid item %s; %d != %d',
                        os.path.basename(path), actual_size, size)
        return False
    return True
Пример #11
0
    def test_rmtree_win(self):
      # Mock our sleep for faster test case execution.
      sleeps = []
      self.mock(time, 'sleep', sleeps.append)
      self.mock(sys, 'stderr', StringIO.StringIO())

      # Open a child process, so the file is locked.
      subdir = os.path.join(self.tempdir, 'to_be_deleted')
      fs.mkdir(subdir)
      script = 'import time; open(\'a\', \'w\'); time.sleep(60)'
      proc = subprocess.Popen([sys.executable, '-c', script], cwd=subdir)
      try:
        # Wait until the file exist.
        while not fs.isfile(os.path.join(subdir, 'a')):
          self.assertEqual(None, proc.poll())
        file_path.rmtree(subdir)
        self.assertEqual([2, 4, 2], sleeps)
        # sys.stderr.getvalue() would return a fair amount of output but it is
        # not completely deterministic so we're not testing it here.
      finally:
        proc.wait()
Пример #12
0
    def test_rmtree_win(self):
      # Mock our sleep for faster test case execution.
      sleeps = []
      self.mock(time, 'sleep', sleeps.append)
      self.mock(sys, 'stderr', StringIO.StringIO())

      # Open a child process, so the file is locked.
      subdir = os.path.join(self.tempdir, 'to_be_deleted')
      fs.mkdir(subdir)
      script = 'import time; open(\'a\', \'w\'); time.sleep(60)'
      proc = subprocess.Popen([sys.executable, '-c', script], cwd=subdir)
      try:
        # Wait until the file exist.
        while not fs.isfile(os.path.join(subdir, 'a')):
          self.assertEqual(None, proc.poll())
        file_path.rmtree(subdir)
        self.assertEqual([2, 4, 2], sleeps)
        # sys.stderr.getvalue() would return a fair amount of output but it is
        # not completely deterministic so we're not testing it here.
      finally:
        proc.wait()
Пример #13
0
    def _load(self, trim, time_fn):
        """Loads state of the cache from json file.

    If cache_dir does not exist on disk, it is created.
    """
        self._lock.assert_locked()

        if not fs.isfile(self.state_file):
            if not fs.isdir(self.cache_dir):
                fs.makedirs(self.cache_dir)
        else:
            # Load state of the cache.
            try:
                self._lru = lru.LRUDict.load(self.state_file)
            except ValueError as err:
                logging.error('Failed to load cache state: %s' % (err, ))
                # Don't want to keep broken state file.
                file_path.try_remove(self.state_file)
        if time_fn:
            self._lru.time_fn = time_fn
        if trim:
            self._trim()
Пример #14
0
def copy_recursively(src, dst):
  """Efficiently copies a file or directory from src_dir to dst_dir.

  `item` may be a file, directory, or a symlink to a file or directory.
  All symlinks are replaced with their targets, so the resulting
  directory structure in dst_dir will never have any symlinks.

  To increase speed, copy_recursively hardlinks individual files into the
  (newly created) directory structure if possible, unlike Python's
  shutil.copytree().
  """
  orig_src = src
  try:
    # Replace symlinks with their final target.
    while fs.islink(src):
      res = fs.readlink(src)
      src = os.path.join(os.path.dirname(src), res)
    # TODO(sadafm): Explicitly handle cyclic symlinks.

    # Note that fs.isfile (which is a wrapper around os.path.isfile) throws
    # an exception if src does not exist. A warning will be logged in that case.
    if fs.isfile(src):
      file_path.link_file(dst, src, file_path.HARDLINK_WITH_FALLBACK)
      return

    if not fs.exists(dst):
      os.makedirs(dst)

    for child in fs.listdir(src):
      copy_recursively(os.path.join(src, child), os.path.join(dst, child))

  except OSError as e:
    if e.errno == errno.ENOENT:
      logging.warning('Path %s does not exist or %s is a broken symlink',
                      src, orig_src)
    else:
      logging.info("Couldn't collect output file %s: %s", src, e)
Пример #15
0
def get_client(service_url,
               package_template,
               version,
               cache_dir,
               timeout=None):
    """Returns a context manager that yields a CipdClient. A blocking call.

  Upon exit from the context manager, the client binary may be deleted
  (if the internal cache is full).

  Args:
    service_url (str): URL of the CIPD backend.
    package_template (str): package name template of the CIPD client.
    version (str): version of CIPD client package.
    cache_dir: directory to store instance cache, version cache
      and a hardlink to the client binary.
    timeout (int): if not None, timeout in seconds for this function.

  Yields:
    CipdClient.

  Raises:
    Error if CIPD client version cannot be resolved or client cannot be fetched.
  """
    timeoutfn = tools.sliding_timeout(timeout)

    # Package names are always lower case.
    # TODO(maruel): Assert instead?
    package_name = package_template.lower().replace('${platform}',
                                                    get_platform())

    # Resolve version to instance id.
    # Is it an instance id already? They look like HEX SHA1.
    if isolated_format.is_valid_hash(version, hashlib.sha1):
        instance_id = version
    elif ':' in version:  # it's an immutable tag, cache the resolved version
        # version_cache is {hash(package_name, tag) -> instance id} mapping.
        # It does not take a lot of disk space.
        version_cache = isolateserver.DiskCache(
            unicode(os.path.join(cache_dir, 'versions')),
            isolateserver.CachePolicies(0, 0, 300),
            hashlib.sha1,
            trim=True)
        with version_cache:
            version_cache.cleanup()
            # Convert (package_name, version) to a string that may be used as a
            # filename in disk cache by hashing it.
            version_digest = hashlib.sha1('%s\n%s' %
                                          (package_name, version)).hexdigest()
            try:
                with version_cache.getfileobj(version_digest) as f:
                    instance_id = f.read()
            except isolateserver.CacheMiss:
                instance_id = resolve_version(service_url,
                                              package_name,
                                              version,
                                              timeout=timeoutfn())
                version_cache.write(version_digest, instance_id)
    else:  # it's a ref, hit the backend
        instance_id = resolve_version(service_url,
                                      package_name,
                                      version,
                                      timeout=timeoutfn())

    # instance_cache is {instance_id -> client binary} mapping.
    # It is bounded by 5 client versions.
    instance_cache = isolateserver.DiskCache(
        unicode(os.path.join(cache_dir, 'clients')),
        isolateserver.CachePolicies(0, 0, 5),
        hashlib.sha1,
        trim=True)
    with instance_cache:
        instance_cache.cleanup()
        if instance_id not in instance_cache:
            logging.info('Fetching CIPD client %s:%s', package_name,
                         instance_id)
            fetch_url = get_client_fetch_url(service_url,
                                             package_name,
                                             instance_id,
                                             timeout=timeoutfn())
            _fetch_cipd_client(instance_cache, instance_id, fetch_url,
                               timeoutfn)

        # A single host can run multiple swarming bots, but ATM they do not share
        # same root bot directory. Thus, it is safe to use the same name for the
        # binary.
        cipd_bin_dir = unicode(os.path.join(cache_dir, 'bin'))
        binary_path = os.path.join(cipd_bin_dir, 'cipd' + EXECUTABLE_SUFFIX)
        if fs.isfile(binary_path):
            file_path.remove(binary_path)
        else:
            file_path.ensure_tree(cipd_bin_dir)

        with instance_cache.getfileobj(instance_id) as f:
            isolateserver.putfile(f, binary_path, 0511)  # -r-x--x--x

        _ensure_batfile(binary_path)

        yield CipdClient(binary_path,
                         package_name=package_name,
                         instance_id=instance_id,
                         service_url=service_url)
Пример #16
0
def expand_directory_and_symlink(indir, relfile, blacklist, follow_symlinks):
    """Expands a single input. It can result in multiple outputs.

  This function is recursive when relfile is a directory.

  Note: this code doesn't properly handle recursive symlink like one created
  with:
    ln -s .. foo

  Yields:
    tuple(Relative path, bool is_symlink) to files and symlinks inside |indir|.
  """
    if os.path.isabs(relfile):
        raise MappingError(u'Can\'t map absolute path %s' % relfile)

    infile = file_path.normpath(os.path.join(indir, relfile))
    if not infile.startswith(indir):
        raise MappingError(u'Can\'t map file %s outside %s' % (infile, indir))

    filepath = os.path.join(indir, relfile)
    native_filepath = file_path.get_native_path_case(filepath)
    if filepath != native_filepath:
        # Special case './'.
        if filepath != native_filepath + u'.' + os.path.sep:
            # While it'd be nice to enforce path casing on Windows, it's impractical.
            # Also give up enforcing strict path case on OSX. Really, it's that sad.
            # The case where it happens is very specific and hard to reproduce:
            # get_native_path_case(
            #    u'Foo.framework/Versions/A/Resources/Something.nib') will return
            # u'Foo.framework/Versions/A/resources/Something.nib', e.g. lowercase 'r'.
            #
            # Note that this is really something deep in OSX because running
            # ls Foo.framework/Versions/A
            # will print out 'Resources', while file_path.get_native_path_case()
            # returns a lower case 'r'.
            #
            # So *something* is happening under the hood resulting in the command 'ls'
            # and Carbon.File.FSPathMakeRef('path').FSRefMakePath() to disagree.  We
            # have no idea why.
            if sys.platform not in ('darwin', 'win32'):
                raise MappingError(
                    u'File path doesn\'t equal native file path\n%s != %s' %
                    (filepath, native_filepath))

    symlinks = []
    if follow_symlinks:
        try:
            relfile, symlinks = _expand_symlinks(indir, relfile)
        except OSError:
            # The file doesn't exist, it will throw below.
            pass

    # The symlinks need to be mapped in.
    for s in symlinks:
        yield s, True

    if relfile.endswith(os.path.sep):
        if not fs.isdir(infile):
            raise MappingError(u'%s is not a directory but ends with "%s"' %
                               (infile, os.path.sep))

        # Special case './'.
        if relfile.startswith(u'.' + os.path.sep):
            relfile = relfile[2:]
        try:
            for filename in fs.listdir(infile):
                inner_relfile = os.path.join(relfile, filename)
                if blacklist and blacklist(inner_relfile):
                    continue
                if fs.isdir(os.path.join(indir, inner_relfile)):
                    inner_relfile += os.path.sep
                # Apply recursively.
                for i, is_symlink in expand_directory_and_symlink(
                        indir, inner_relfile, blacklist, follow_symlinks):
                    yield i, is_symlink
        except OSError as e:
            raise MappingError(u'Unable to iterate over directory %s.\n%s' %
                               (infile, e))
    else:
        # Always add individual files even if they were blacklisted.
        if fs.isdir(infile):
            raise MappingError(
                u'Input directory %s must have a trailing slash' % infile)

        if not fs.isfile(infile):
            raise MappingError(u'Input file %s doesn\'t exist' % infile)

        yield relfile, False
Пример #17
0
def get_client(service_url,
               package_template,
               version,
               cache_dir,
               timeout=None):
    """Returns a context manager that yields a CipdClient. A blocking call.

  Upon exit from the context manager, the client binary may be deleted
  (if the internal cache is full).

  Args:
    service_url (str): URL of the CIPD backend.
    package_template (str): package name template of the CIPD client.
    version (str): version of CIPD client package.
    cache_dir: directory to store instance cache, version cache
      and a hardlink to the client binary.
    timeout (int): if not None, timeout in seconds for this function.

  Yields:
    CipdClient.

  Raises:
    Error if CIPD client version cannot be resolved or client cannot be fetched.
  """
    timeoutfn = tools.sliding_timeout(timeout)

    # Package names are always lower case.
    # TODO(maruel): Assert instead?
    package_name = package_template.lower().replace('${platform}',
                                                    get_platform())

    # Resolve version to instance id.
    # Is it an instance id already? They look like HEX SHA1.
    if isolated_format.is_valid_hash(version, hashlib.sha1):
        instance_id = version
    elif ':' in version:  # it's an immutable tag, cache the resolved version
        # version_cache is {hash(package_name, tag) -> instance id} mapping.
        # It does not take a lot of disk space.
        version_cache = local_caching.DiskContentAddressedCache(
            six.text_type(os.path.join(cache_dir, 'versions')),
            local_caching.CachePolicies(
                # 1GiB.
                max_cache_size=1024 * 1024 * 1024,
                min_free_space=0,
                max_items=300,
                # 3 weeks.
                max_age_secs=21 * 24 * 60 * 60),
            trim=True)
        # Convert (package_name, version) to a string that may be used as a
        # filename in disk cache by hashing it.
        version_digest = hashlib.sha1('%s\n%s' %
                                      (package_name, version)).hexdigest()
        try:
            with version_cache.getfileobj(version_digest) as f:
                instance_id = f.read()
        except local_caching.CacheMiss:
            instance_id = resolve_version(service_url,
                                          package_name,
                                          version,
                                          timeout=timeoutfn())
            version_cache.write(version_digest, instance_id)
        version_cache.trim()
    else:  # it's a ref, hit the backend
        instance_id = resolve_version(service_url,
                                      package_name,
                                      version,
                                      timeout=timeoutfn())

    # instance_cache is {instance_id -> client binary} mapping.
    # It is bounded by 5 client versions.
    instance_cache = local_caching.DiskContentAddressedCache(
        six.text_type(os.path.join(cache_dir, 'clients')),
        local_caching.CachePolicies(
            # 1GiB.
            max_cache_size=1024 * 1024 * 1024,
            min_free_space=0,
            max_items=10,
            # 3 weeks.
            max_age_secs=21 * 24 * 60 * 60),
        trim=True)
    if instance_id not in instance_cache:
        logging.info('Fetching CIPD client %s:%s', package_name, instance_id)
        fetch_url = get_client_fetch_url(service_url,
                                         package_name,
                                         instance_id,
                                         timeout=timeoutfn())
        _fetch_cipd_client(instance_cache, instance_id, fetch_url, timeoutfn)

    # A single host can run multiple swarming bots, but they cannot share same
    # root bot directory. Thus, it is safe to use the same name for the binary.
    cipd_bin_dir = six.text_type(os.path.join(cache_dir, 'bin'))
    binary_path = os.path.join(cipd_bin_dir, 'cipd' + EXECUTABLE_SUFFIX)
    if fs.isfile(binary_path):
        # TODO(maruel): Do not unconditionally remove the binary.
        try:
            file_path.remove(binary_path)
        except WindowsError:  # pylint: disable=undefined-variable
            # See whether cipd.exe is running for crbug.com/1028781
            ret = subprocess42.call(['tasklist.exe'])
            if ret:
                logging.error('tasklist returns non-zero: %d', ret)
            raise
    else:
        file_path.ensure_tree(cipd_bin_dir)

    with instance_cache.getfileobj(instance_id) as f:
        isolateserver.putfile(f, binary_path, 0o511)  # -r-x--x--x

    _ensure_batfile(binary_path)

    yield CipdClient(binary_path,
                     package_name=package_name,
                     instance_id=instance_id,
                     service_url=service_url)
    instance_cache.trim()
Пример #18
0
    def test_main_naked_with_packages(self):
        self.mock(cipd, 'get_platform', lambda: 'linux-amd64')

        pins = {
            '': [
                ('infra/data/x', 'badc0fee' * 5),
                ('infra/data/y', 'cafebabe' * 5),
            ],
            'bin': [
                ('infra/tools/echo/linux-amd64', 'deadbeef' * 5),
            ],
        }

        def fake_ensure(args, **_kwargs):
            if (args[0].endswith('/cipd') and args[1] == 'ensure'
                    and '-json-output' in args):
                idx = args.index('-json-output')
                with open(args[idx + 1], 'w') as json_out:
                    json.dump(
                        {
                            'result': {
                                subdir: [{
                                    'package': pkg,
                                    'instance_id': ver
                                } for pkg, ver in packages]
                                for subdir, packages in pins.iteritems()
                            }
                        }, json_out)
                return 0

        self.popen_mocks.append(fake_ensure)
        cipd_cache = os.path.join(self.tempdir, 'cipd_cache')
        cmd = [
            '--no-log',
            '--cache',
            os.path.join(self.tempdir, 'isolated_cache'),
            '--cipd-client-version',
            'git:wowza',
            '--cipd-package',
            'bin:infra/tools/echo/${platform}:latest',
            '--cipd-package',
            '.:infra/data/x:latest',
            '--cipd-package',
            '.:infra/data/y:canary',
            '--cipd-server',
            self.cipd_server.url,
            '--cipd-cache',
            cipd_cache,
            '--named-cache-root',
            os.path.join(self.tempdir, 'named_cache'),
            '--raw-cmd',
            '--',
            'bin/echo${EXECUTABLE_SUFFIX}',
            'hello',
            'world',
        ]
        ret = run_isolated.main(cmd)
        self.assertEqual(0, ret)

        self.assertEqual(2, len(self.popen_calls))

        # Test cipd-ensure command for installing packages.
        cipd_ensure_cmd, _ = self.popen_calls[0]
        self.assertEqual(cipd_ensure_cmd[:2], [
            os.path.join(cipd_cache, 'bin', 'cipd' + cipd.EXECUTABLE_SUFFIX),
            'ensure',
        ])
        cache_dir_index = cipd_ensure_cmd.index('-cache-dir')
        self.assertEqual(cipd_ensure_cmd[cache_dir_index + 1],
                         os.path.join(cipd_cache, 'cache'))

        # Test cipd client cache. `git:wowza` was a tag and so is cacheable.
        self.assertEqual(len(os.listdir(os.path.join(cipd_cache, 'versions'))),
                         2)
        version_file = unicode(
            os.path.join(cipd_cache, 'versions',
                         '765a0de4c618f91faf923cb68a47bb564aed412d'))
        self.assertTrue(fs.isfile(version_file))
        with open(version_file) as f:
            self.assertEqual(f.read(),
                             'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')

        client_binary_file = unicode(
            os.path.join(cipd_cache, 'clients',
                         'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
        self.assertTrue(fs.isfile(client_binary_file))

        # Test echo call.
        echo_cmd, _ = self.popen_calls[1]
        self.assertTrue(
            echo_cmd[0].endswith(os.path.sep + 'bin' + os.path.sep + 'echo' +
                                 cipd.EXECUTABLE_SUFFIX), echo_cmd[0])
        self.assertEqual(echo_cmd[1:], [u'hello', u'world'])
Пример #19
0
  def test_main_naked_with_packages(self):
    pin_idx_ref = [0]
    pins = [
      [
        ('infra/data/x', 'badc0fee'*5),
        ('infra/data/y', 'cafebabe'*5),
      ],
      [
        ('infra/tools/echo/linux-amd64', 'deadbeef'*5),
      ],
    ]

    def fake_ensure(args, **_kwargs):
      if (args[0].endswith('/cipd') and
          args[1] == 'ensure'
          and '-json-output' in args):
        idx = args.index('-json-output')
        with open(args[idx+1], 'w') as json_out:
          json.dump({
            'result': [
              {'package': pkg, 'instance_id': ver}
              for pkg, ver in pins[pin_idx_ref[0]]
            ],
          }, json_out)
        pin_idx_ref[0] += 1
        return 0

    self.popen_mocks.append(fake_ensure)
    cipd_cache = os.path.join(self.tempdir, 'cipd_cache')
    cmd = [
      '--no-log',
      '--cache', os.path.join(self.tempdir, 'cache'),
      '--cipd-client-version', 'git:wowza',
      '--cipd-package', 'bin:infra/tools/echo/${platform}:latest',
      '--cipd-package', '.:infra/data/x:latest',
      '--cipd-package', '.:infra/data/y:canary',
      '--cipd-server', self.cipd_server.url,
      '--cipd-cache', cipd_cache,
      '--named-cache-root', os.path.join(self.tempdir, 'c'),
      'bin/echo${EXECUTABLE_SUFFIX}',
      'hello',
      'world',
    ]
    ret = run_isolated.main(cmd)
    self.assertEqual(0, ret)

    self.assertEqual(3, len(self.popen_calls))

    # Test cipd-ensure command for installing packages.
    for cipd_ensure_cmd, _ in self.popen_calls[0:2]:
      self.assertEqual(cipd_ensure_cmd[:2], [
        os.path.join(cipd_cache, 'cipd' + cipd.EXECUTABLE_SUFFIX),
        'ensure',
      ])
      cache_dir_index = cipd_ensure_cmd.index('-cache-dir')
      self.assertEqual(
          cipd_ensure_cmd[cache_dir_index+1],
          os.path.join(cipd_cache, 'cipd_internal'))

    # Test cipd client cache. `git:wowza` was a tag and so is cacheable.
    self.assertEqual(len(os.listdir(os.path.join(cipd_cache, 'versions'))), 2)
    version_file = unicode(os.path.join(
        cipd_cache, 'versions', '633d2aa4119cc66803f1600f9c4d85ce0e0581b5'))
    self.assertTrue(fs.isfile(version_file))
    with open(version_file) as f:
      self.assertEqual(f.read(), 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')

    client_binary_file = unicode(os.path.join(
        cipd_cache, 'clients', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
    self.assertTrue(fs.isfile(client_binary_file))

    # Test echo call.
    echo_cmd, _ = self.popen_calls[2]
    self.assertTrue(echo_cmd[0].endswith(
        os.path.sep + 'bin' + os.path.sep + 'echo' + cipd.EXECUTABLE_SUFFIX),
        echo_cmd[0])
    self.assertEqual(echo_cmd[1:], ['hello', 'world'])