Esempio n. 1
0
  def _archive_smoke(self, size):
    self.server.store_hash_instead()
    files = {}
    for i in range(5):
      name = '512mb_%d.%s' % (i, isolateserver.ALREADY_COMPRESSED_TYPES[0])
      logging.info('Writing %s', name)
      p = os.path.join(self.tempdir, name)
      h = hashlib.sha1()
      data = os.urandom(1024)
      with open(p, 'wb') as f:
        # Write 512MiB.
        for _ in range(size / len(data)):
          f.write(data)
          h.update(data)
      os.chmod(p, 0o600)
      files[p] = h.hexdigest()

    server_ref = isolate_storage.ServerRef(self.server.url, 'default')
    with isolateserver.get_storage(server_ref) as storage:
      logging.info('Archiving')
      results, cold, hot = isolateserver.archive_files_to_storage(
          storage, list(files), None)
      logging.info('Done')

    expected = {'default': {h: h for h in files.values()}}
    self.assertEqual(expected, self.server.contents)
    self.assertEqual(files, dict(results))
    # Everything is cold.
    f = os.path.join(self.tempdir, '512mb_3.7z')
    self.assertEqual(
        sorted(files.items()), sorted((f.path, f.digest) for f in cold))
    self.assertEqual([], [(f.path, f.digest) for f in hot])
Esempio n. 2
0
  def run_push_and_fetch_test(self, namespace):
    storage = isolateserver.get_storage(
        isolate_storage.ServerRef(self.server.url, namespace))

    # Upload items.
    items = [
        isolateserver.BufferItem('item %d' % i, storage.server_ref.hash_algo)
        for i in range(10)
    ]
    uploaded = storage.upload_items(items)
    self.assertEqual(set(items), set(uploaded))

    # Fetch them all back into local memory cache.
    cache = local_caching.MemoryContentAddressedCache()
    queue = isolateserver.FetchQueue(storage, cache)

    # Start fetching.
    pending = set()
    for item in items:
      pending.add(item.digest)
      queue.add(item.digest)
      queue.wait_on(item.digest)

    # Wait for fetch to complete.
    while pending:
      fetched = queue.wait()
      pending.discard(fetched)

    # Ensure fetched same data as was pushed.
    actual = []
    for i in items:
      with cache.getfileobj(i.digest) as f:
        actual.append(f.read())

    self.assertEqual([''.join(i.content()) for i in items], actual)
def upload_zip_bundle(isolate_server, bundle):
  """Uploads a zip package to isolate storage and returns raw fetch URL.

  Args:
    isolate_server: URL of an isolate server.
    bundle: instance of ZipPackage to upload.

  Returns:
    URL to get the file from.
  """
  # Swarming bot would need to be able to grab the file from the storage
  # using raw HTTP GET. Use 'default' namespace so that the raw data returned
  # to a bot is not zipped, since swarm_bot doesn't understand compressed
  # data yet. This namespace have nothing to do with |namespace| passed to
  # run_isolated.py that is used to store files for isolated task.
  logging.info('Zipping up and uploading files...')
  start_time = now()
  isolate_item = isolateserver.BufferItem(
      bundle.zip_into_buffer(), high_priority=True)
  with isolateserver.get_storage(isolate_server, 'default') as storage:
    uploaded = storage.upload_items([isolate_item])
    bundle_url = storage.get_fetch_url(isolate_item)
  elapsed = now() - start_time
  if isolate_item in uploaded:
    logging.info('Upload complete, time elapsed: %f', elapsed)
  else:
    logging.info('Zip file already on server, time elapsed: %f', elapsed)
  return bundle_url
Esempio n. 4
0
  def run_push_and_fetch_test(self, namespace):
    storage = isolateserver.get_storage(self.rootdir, namespace)

    # Upload items.
    items = [isolateserver.BufferItem('item %d' % i) for i in xrange(10)]
    uploaded = storage.upload_items(items)
    self.assertEqual(set(items), set(uploaded))

    # Fetch them all back into local memory cache.
    cache = isolateserver.MemoryCache()
    queue = isolateserver.FetchQueue(storage, cache)

    # Start fetching.
    pending = set()
    for item in items:
      pending.add(item.digest)
      queue.add(item.digest)

    # Wait for fetch to complete.
    while pending:
      fetched = queue.wait(pending)
      pending.discard(fetched)

    # Ensure fetched same data as was pushed.
    self.assertEqual(
        [i.buffer for i in items],
        [cache.read(i.digest) for i in items])
Esempio n. 5
0
    def run_push_and_fetch_test(self, namespace):
        storage = isolateserver.get_storage(self.server.url, namespace)

        # Upload items.
        items = [isolateserver.BufferItem('item %d' % i) for i in xrange(10)]
        uploaded = storage.upload_items(items)
        self.assertEqual(set(items), set(uploaded))

        # Fetch them all back into local memory cache.
        cache = isolateserver.MemoryCache()
        queue = isolateserver.FetchQueue(storage, cache)

        # Start fetching.
        pending = set()
        for item in items:
            pending.add(item.digest)
            queue.add(item.digest)

        # Wait for fetch to complete.
        while pending:
            fetched = queue.wait(pending)
            pending.discard(fetched)

        # Ensure fetched same data as was pushed.
        self.assertEqual([i.buffer for i in items],
                         [cache.read(i.digest) for i in items])
Esempio n. 6
0
def main(args):
    tools.disable_buffering()
    parser = tools.OptionParserWithLogging(usage="%prog <options>", version=__version__, log_file=RUN_ISOLATED_LOG_FILE)

    data_group = optparse.OptionGroup(parser, "Data source")
    data_group.add_option("-s", "--isolated", metavar="FILE", help="File/url describing what to map or run")
    data_group.add_option("-H", "--hash", help="Hash of the .isolated to grab from the hash table")
    isolateserver.add_isolate_server_options(data_group, True)
    parser.add_option_group(data_group)

    cache_group = optparse.OptionGroup(parser, "Cache management")
    cache_group.add_option("--cache", default="cache", metavar="DIR", help="Cache directory, default=%default")
    cache_group.add_option(
        "--max-cache-size",
        type="int",
        metavar="NNN",
        default=20 * 1024 * 1024 * 1024,
        help="Trim if the cache gets larger than this value, default=%default",
    )
    cache_group.add_option(
        "--min-free-space",
        type="int",
        metavar="NNN",
        default=2 * 1024 * 1024 * 1024,
        help="Trim if disk free space becomes lower than this value, " "default=%default",
    )
    cache_group.add_option(
        "--max-items",
        type="int",
        metavar="NNN",
        default=100000,
        help="Trim if more than this number of items are in the cache " "default=%default",
    )
    parser.add_option_group(cache_group)

    auth.add_auth_options(parser)
    options, args = parser.parse_args(args)
    auth.process_auth_options(parser, options)
    isolateserver.process_isolate_server_options(data_group, options)

    if bool(options.isolated) == bool(options.hash):
        logging.debug("One and only one of --isolated or --hash is required.")
        parser.error("One and only one of --isolated or --hash is required.")

    options.cache = os.path.abspath(options.cache)
    policies = CachePolicies(options.max_cache_size, options.min_free_space, options.max_items)

    try:
        # |options.cache| path may not exist until DiskCache() instance is created.
        cache = DiskCache(options.cache, policies, isolateserver.get_hash_algo(options.namespace))
        remote = options.isolate_server or options.indir
        with isolateserver.get_storage(remote, options.namespace) as storage:
            # Hashing schemes used by |storage| and |cache| MUST match.
            assert storage.hash_algo == cache.hash_algo
            return run_tha_test(options.isolated or options.hash, storage, cache, args)
    except Exception as e:
        # Make sure any exception is logged.
        tools.report_error(e)
        logging.exception(e)
        return 1
Esempio n. 7
0
def upload_zip_bundle(isolate_server, bundle):
    """Uploads a zip package to isolate storage and returns raw fetch URL.

  Args:
    isolate_server: URL of an isolate server.
    bundle: instance of ZipPackage to upload.

  Returns:
    URL to get the file from on success.
    None on failure.
  """
    # Swarming bot would need to be able to grab the file from the storage
    # using raw HTTP GET. Use 'default' namespace so that the raw data returned
    # to a bot is not zipped, since swarm_bot doesn't understand compressed
    # data yet. This namespace have nothing to do with |namespace| passed to
    # run_isolated.py that is used to store files for isolated task.
    logging.info('Zipping up and uploading files...')
    try:
        start_time = now()
        isolate_item = isolateserver.BufferItem(bundle.zip_into_buffer(),
                                                high_priority=True)
        with isolateserver.get_storage(isolate_server, 'default') as storage:
            uploaded = storage.upload_items([isolate_item])
            bundle_url = storage.get_fetch_url(isolate_item)
        elapsed = now() - start_time
    except (IOError, OSError) as exc:
        tools.report_error('Failed to upload the zip file: %s' % exc)
        return None
    if isolate_item in uploaded:
        logging.info('Upload complete, time elapsed: %f', elapsed)
    else:
        logging.info('Zip file already on server, time elapsed: %f', elapsed)
    return bundle_url
Esempio n. 8
0
  def __init__(
      self, isolated_hash, test_name, shards, test_filter, slave_os,
      working_dir, isolate_server, verbose, profile, priority, algo):
    """Populates a manifest object.
      Args:
        isolated_hash - The manifest's sha-1 that the slave is going to fetch.
        test_name - The name to give the test request.
        shards - The number of swarm shards to request.
        test_filter - The gtest filter to apply when running the test.
        slave_os - OS to run on.
        working_dir - Relative working directory to start the script.
        isolate_server - isolate server url.
        verbose - if True, have the slave print more details.
        profile - if True, have the slave print more timing data.
        priority - int between 0 and 1000, lower the higher priority.
        algo - hashing algorithm used.
    """
    self.isolated_hash = isolated_hash
    self.bundle = zip_package.ZipPackage(ROOT_DIR)

    self._test_name = test_name
    self._shards = shards
    self._test_filter = test_filter
    self._target_platform = slave_os
    self._working_dir = working_dir

    self.isolate_server = isolate_server
    self.storage = isolateserver.get_storage(isolate_server, 'default')
    self.verbose = bool(verbose)
    self.profile = bool(profile)
    self.priority = priority
    self._algo = algo

    self._isolate_item = None
    self._tasks = []
Esempio n. 9
0
def isolate_and_archive(trees, server_ref):
  """Isolates and uploads a bunch of isolated trees.

  Args:
    trees: list of pairs (Options, working directory) that describe what tree
        to isolate. Options are processed by 'process_isolate_options'.
    server_ref: isolate_storage.ServerRef instance.

  Returns a dict {target name -> isolate hash or None}, where target name is
  a name of *.isolated file without an extension (e.g. 'base_unittests').

  Have multiple failure modes:
    * If the upload fails due to server or network error returns None.
    * If some *.isolate file is incorrect (but rest of them are fine and were
      successfully uploaded), returns a dict where the value of the entry
      corresponding to invalid *.isolate file is None.
  """
  if not trees:
    return {}

  # Helper generator to avoid materializing the full (huge) list of files until
  # the very end (in upload_items()).
  def emit_files(root_dir, files):
    for path, meta in files.iteritems():
      yield (os.path.join(root_dir, path), meta)

  # Process all *.isolate files, it involves parsing, file system traversal and
  # hashing. The result is a list of generators that produce files to upload
  # and the mapping {target name -> hash of *.isolated file} to return from
  # this function.
  files_generators = []
  isolated_hashes = {}
  with tools.Profiler('Isolate'):
    for opts, cwd in trees:
      target_name = os.path.splitext(os.path.basename(opts.isolated))[0]
      try:
        complete_state, files, isolated_hash = prepare_for_archival(opts, cwd)
        files_generators.append(emit_files(complete_state.root_dir, files))
        isolated_hashes[target_name] = isolated_hash[0]
        print('%s  %s' % (isolated_hash[0], target_name))
      except Exception:
        logging.exception('Exception when isolating %s', target_name)
        isolated_hashes[target_name] = None

  # All bad? Nothing to upload.
  if all(v is None for v in isolated_hashes.itervalues()):
    return isolated_hashes

  # Now upload all necessary files at once.
  with tools.Profiler('Upload'):
    try:
      items = _process_infiles(itertools.chain(*files_generators))
      with isolateserver.get_storage(server_ref) as storage:
        storage.upload_items(items)
    except Exception:
      logging.exception('Exception while uploading files')
      return None

  return isolated_hashes
Esempio n. 10
0
class RunIsolatedTestOutputFiles(RunIsolatedTestBase):
    def _run_test(self, isolated, command):
        # Starts a full isolate server mock and have run_tha_test() uploads results
        # back after the task completed.
        server = isolateserver_mock.MockIsolateServer()
        try:
            script = ('import sys\n'
                      'open(sys.argv[1], "w").write("bar")\n'
                      'open(sys.argv[2], "w").write("baz")\n')
            script_hash = isolateserver_mock.hash_content(script)
            isolated['files']['cmd.py'] = {
                'h': script_hash,
                'm': 0700,
                's': len(script),
            }
            if sys.platform == 'win32':
                isolated['files']['cmd.py'].pop('m')
            isolated_data = json_dumps(isolated)
            isolated_hash = isolateserver_mock.hash_content(isolated_data)
            server.add_content('default-store', script)
            server.add_content('default-store', isolated_data)
            store = isolateserver.get_storage(server.url, 'default-store')

            self.mock(sys, 'stdout', StringIO.StringIO())
            ret = run_isolated.run_tha_test(command, isolated_hash, store,
                                            isolateserver.MemoryCache(),
                                            ['foo', 'foodir/foo2'],
                                            init_named_caches_stub, False,
                                            None, None, None, None, None,
                                            run_isolated.noop_install_packages,
                                            False)
            self.assertEqual(0, ret)

            # It uploaded back. Assert the store has a new item containing foo.
            hashes = {isolated_hash, script_hash}
            foo_output_hash = isolateserver_mock.hash_content('bar')
            foo2_output_hash = isolateserver_mock.hash_content('baz')
            hashes.add(foo_output_hash)
            hashes.add(foo2_output_hash)
            isolated = {
                'algo': 'sha-1',
                'files': {
                    'foo': {
                        'h': foo_output_hash,
                        # TODO(maruel): Handle umask.
                        'm': 0640,
                        's': 3,
                    },
                    'foodir/foo2': {
                        'h': foo2_output_hash,
                        # TODO(maruel): Handle umask.
                        'm': 0640,
                        's': 3,
                    },
                },
                'version': isolated_format.ISOLATED_FILE_VERSION,
            }
Esempio n. 11
0
def main(args):
    parser = logging_utils.OptionParserWithLogging(
        usage='%prog <options>',
        version=__version__,
        log_file=RUN_ISOLATED_LOG_FILE)
    parser.add_option(
        '--json',
        help=
        'dump output metadata to json file. When used, run_isolated returns '
        'non-zero only on internal failure')
    parser.add_option('--hard-timeout',
                      type='float',
                      help='Enforce hard timeout in execution')
    parser.add_option('--grace-period',
                      type='float',
                      help='Grace period between SIGTERM and SIGKILL')
    data_group = optparse.OptionGroup(parser, 'Data source')
    data_group.add_option(
        '-s',
        '--isolated',
        help='Hash of the .isolated to grab from the isolate server')
    isolateserver.add_isolate_server_options(data_group)
    parser.add_option_group(data_group)

    isolateserver.add_cache_options(parser)
    parser.set_defaults(cache='cache')

    debug_group = optparse.OptionGroup(parser, 'Debugging')
    debug_group.add_option(
        '--leak-temp-dir',
        action='store_true',
        help='Deliberately leak isolate\'s temp dir for later examination '
        '[default: %default]')
    debug_group.add_option('--root-dir',
                           help='Use a directory instead of a random one')
    parser.add_option_group(debug_group)

    auth.add_auth_options(parser)
    options, args = parser.parse_args(args)
    if not options.isolated:
        parser.error('--isolated is required.')
    auth.process_auth_options(parser, options)
    isolateserver.process_isolate_server_options(parser, options, True)

    cache = isolateserver.process_cache_options(options)
    if options.root_dir:
        options.root_dir = unicode(os.path.abspath(options.root_dir))
    if options.json:
        options.json = unicode(os.path.abspath(options.json))
    with isolateserver.get_storage(options.isolate_server,
                                   options.namespace) as storage:
        # Hashing schemes used by |storage| and |cache| MUST match.
        assert storage.hash_algo == cache.hash_algo
        return run_tha_test(options.isolated, storage, cache,
                            options.leak_temp_dir, options.json,
                            options.root_dir, options.hard_timeout,
                            options.grace_period, args)
Esempio n. 12
0
    def __init__(
        self,
        isolate_server,
        namespace,
        isolated_hash,
        task_name,
        extra_args,
        shards,
        env,
        dimensions,
        working_dir,
        deadline,
        verbose,
        profile,
        priority,
    ):
        """Populates a manifest object.
      Args:
        isolate_server - isolate server url.
        namespace - isolate server namespace to use.
        isolated_hash - the manifest's sha-1 that the slave is going to fetch.
        task_name - the name to give the task request.
        extra_args - additional arguments to pass to isolated command.
        shards - the number of swarming shards to request.
        env - environment variables to set.
        dimensions - dimensions to filter the task on.
        working_dir - relative working directory to start the script.
        deadline - maximum pending time before this task expires.
        verbose - if True, have the slave print more details.
        profile - if True, have the slave print more timing data.
        priority - int between 0 and 1000, lower the higher priority.
    """
        self.isolate_server = isolate_server
        self.namespace = namespace
        # The reason is that swarm_bot doesn't understand compressed data yet. So
        # the data to be downloaded by swarm_bot is in 'default', independent of
        # what run_isolated.py is going to fetch.
        self.storage = isolateserver.get_storage(isolate_server, "default")

        self.isolated_hash = isolated_hash
        self.extra_args = tuple(extra_args or [])
        self.bundle = zip_package.ZipPackage(ROOT_DIR)

        self._task_name = task_name
        self._shards = shards
        self._env = env.copy()
        self._dimensions = dimensions.copy()
        self._working_dir = working_dir
        self._deadline = deadline

        self.verbose = bool(verbose)
        self.profile = bool(profile)
        self.priority = priority

        self._isolate_item = None
        self._tasks = []
Esempio n. 13
0
def main(args):
  tools.disable_buffering()
  parser = logging_utils.OptionParserWithLogging(
      usage='%prog <options>',
      version=__version__,
      log_file=RUN_ISOLATED_LOG_FILE)
  parser.add_option(
      '--json',
      help='dump output metadata to json file. When used, run_isolated returns '
           'non-zero only on internal failure')
  parser.add_option(
      '--hard-timeout', type='int', help='Enforce hard timeout in execution')
  parser.add_option(
      '--grace-period', type='int',
      help='Grace period between SIGTERM and SIGKILL')
  data_group = optparse.OptionGroup(parser, 'Data source')
  data_group.add_option(
      '-s', '--isolated',
      help='Hash of the .isolated to grab from the isolate server')
  isolateserver.add_isolate_server_options(data_group)
  parser.add_option_group(data_group)

  isolateserver.add_cache_options(parser)
  parser.set_defaults(cache='cache')

  debug_group = optparse.OptionGroup(parser, 'Debugging')
  debug_group.add_option(
      '--leak-temp-dir',
      action='store_true',
      help='Deliberately leak isolate\'s temp dir for later examination '
          '[default: %default]')
  debug_group.add_option(
      '--root-dir', help='Use a directory instead of a random one')
  parser.add_option_group(debug_group)

  auth.add_auth_options(parser)
  options, args = parser.parse_args(args)
  if not options.isolated:
    parser.error('--isolated is required.')
  auth.process_auth_options(parser, options)
  isolateserver.process_isolate_server_options(parser, options, True)

  cache = isolateserver.process_cache_options(options)
  if options.root_dir:
    options.root_dir = unicode(os.path.abspath(options.root_dir))
  if options.json:
    options.json = unicode(os.path.abspath(options.json))
  with isolateserver.get_storage(
      options.isolate_server, options.namespace) as storage:
    # Hashing schemes used by |storage| and |cache| MUST match.
    assert storage.hash_algo == cache.hash_algo
    return run_tha_test(
        options.isolated, storage, cache, options.leak_temp_dir, options.json,
        options.root_dir, options.hard_timeout, options.grace_period, args)
Esempio n. 14
0
  def run_synchronous_push_test(self, namespace):
    storage = isolateserver.get_storage(self.rootdir, namespace)

    # Items to upload.
    items = [isolateserver.BufferItem('item %d' % i) for i in xrange(10)]

    # Storage is empty, all items are missing.
    missing = dict(storage.get_missing_items(items))
    self.assertEqual(set(items), set(missing))

    # Push, one by one.
    for item, push_state in missing.iteritems():
      storage.push(item, push_state)

    # All items are there now.
    self.assertFalse(dict(storage.get_missing_items(items)))
Esempio n. 15
0
    def run_synchronous_push_test(self, namespace):
        storage = isolateserver.get_storage(self.server.url, namespace)

        # Items to upload.
        items = [isolateserver.BufferItem('item %d' % i) for i in xrange(10)]

        # Storage is empty, all items are missing.
        missing = dict(storage.get_missing_items(items))
        self.assertEqual(set(items), set(missing))

        # Push, one by one.
        for item, push_state in missing.iteritems():
            storage.push(item, push_state)

        # All items are there now.
        self.assertFalse(dict(storage.get_missing_items(items)))
  def run_upload_items_test(self, namespace):
    storage = isolateserver.get_storage(self.server.url, namespace)

    # Items to upload.
    items = [isolateserver.BufferItem('item %d' % i) for i in xrange(10)]

    # Do it.
    uploaded = storage.upload_items(items)
    self.assertEqual(set(items), set(uploaded))

    # All items are there now.
    self.assertFalse(dict(storage.get_missing_items(items)))

    # Now ensure upload_items skips existing items.
    more = [isolateserver.BufferItem('more item %d' % i) for i in xrange(10)]

    # Uploaded only |more|.
    uploaded = storage.upload_items(items + more)
    self.assertEqual(set(more), set(uploaded))
Esempio n. 17
0
    def __init__(self, isolate_server, namespace, isolated_hash, task_name,
                 extra_args, shards, env, dimensions, working_dir, deadline,
                 verbose, profile, priority):
        """Populates a manifest object.
      Args:
        isolate_server - isolate server url.
        namespace - isolate server namespace to use.
        isolated_hash - the manifest's sha-1 that the slave is going to fetch.
        task_name - the name to give the task request.
        extra_args - additional arguments to pass to isolated command.
        shards - the number of swarming shards to request.
        env - environment variables to set.
        dimensions - dimensions to filter the task on.
        working_dir - relative working directory to start the script.
        deadline - maximum pending time before this task expires.
        verbose - if True, have the slave print more details.
        profile - if True, have the slave print more timing data.
        priority - int between 0 and 1000, lower the higher priority.
    """
        self.isolate_server = isolate_server
        self.namespace = namespace
        # The reason is that swarm_bot doesn't understand compressed data yet. So
        # the data to be downloaded by swarm_bot is in 'default', independent of
        # what run_isolated.py is going to fetch.
        self.storage = isolateserver.get_storage(isolate_server, 'default')

        self.isolated_hash = isolated_hash
        self.extra_args = tuple(extra_args or [])
        self.bundle = zip_package.ZipPackage(ROOT_DIR)

        self._task_name = task_name
        self._shards = shards
        self._env = env.copy()
        self._dimensions = dimensions.copy()
        self._working_dir = working_dir
        self._deadline = deadline

        self.verbose = bool(verbose)
        self.profile = bool(profile)
        self.priority = priority

        self._isolate_item = None
        self._tasks = []
Esempio n. 18
0
  def run_upload_items_test(self, namespace):
    storage = isolateserver.get_storage(self.rootdir, namespace)

    # Items to upload.
    items = [isolateserver.BufferItem('item %d' % i) for i in xrange(10)]

    # Do it.
    uploaded = storage.upload_items(items)
    self.assertEqual(set(items), set(uploaded))

    # All items are there now.
    self.assertFalse(dict(storage.get_missing_items(items)))

    # Now ensure upload_items skips existing items.
    more = [isolateserver.BufferItem('more item %d' % i) for i in xrange(10)]

    # Uploaded only |more|.
    uploaded = storage.upload_items(items + more)
    self.assertEqual(set(more), set(uploaded))
 def _get_storage(self, isolate_server, namespace):
   """Returns isolateserver.Storage to use to fetch files."""
   assert self.task_output_dir
   with self._lock:
     if not self._storage:
       self._storage = isolateserver.get_storage(isolate_server, namespace)
     else:
       # Shards must all use exact same isolate server and namespace.
       if self._storage.location != isolate_server:
         logging.error(
             'Task shards are using multiple isolate servers: %s and %s',
             self._storage.location, isolate_server)
         return None
       if self._storage.namespace != namespace:
         logging.error(
             'Task shards are using multiple namespaces: %s and %s',
             self._storage.namespace, namespace)
         return None
     return self._storage
Esempio n. 20
0
 def _get_storage(self, isolate_server, namespace):
     """Returns isolateserver.Storage to use to fetch files."""
     with self._lock:
         if not self._storage:
             self._storage = isolateserver.get_storage(
                 isolate_server, namespace)
         else:
             # Shards must all use exact same isolate server and namespace.
             if self._storage.location != isolate_server:
                 logging.error(
                     'Task shards are using multiple isolate servers: %s and %s',
                     self._storage.location, isolate_server)
                 return None
             if self._storage.namespace != namespace:
                 logging.error(
                     'Task shards are using multiple namespaces: %s and %s',
                     self._storage.namespace, namespace)
                 return None
         return self._storage
Esempio n. 21
0
  def __init__(
      self, isolate_server, namespace, isolated_hash, task_name, shards, env,
      dimensions, working_dir, verbose, profile, priority, algo):
    """Populates a manifest object.
      Args:
        isolate_server - isolate server url.
        namespace - isolate server namespace to use.
        isolated_hash - The manifest's sha-1 that the slave is going to fetch.
        task_name - The name to give the task request.
        shards - The number of swarming shards to request.
        env - environment variables to set.
        dimensions - dimensions to filter the task on.
        working_dir - Relative working directory to start the script.
        verbose - if True, have the slave print more details.
        profile - if True, have the slave print more timing data.
        priority - int between 0 and 1000, lower the higher priority.
        algo - hashing algorithm used.
    """
    self.isolate_server = isolate_server
    self.namespace = namespace
    # The reason is that swarm_bot doesn't understand compressed data yet. So
    # the data to be downloaded by swarm_bot is in 'default', independent of
    # what run_isolated.py is going to fetch.
    self.storage = isolateserver.get_storage(isolate_server, 'default')

    self.isolated_hash = isolated_hash
    self.bundle = zip_package.ZipPackage(ROOT_DIR)

    self._task_name = task_name
    self._shards = shards
    self._env = env.copy()
    self._dimensions = dimensions.copy()
    self._working_dir = working_dir

    self.verbose = bool(verbose)
    self.profile = bool(profile)
    self.priority = priority
    self._algo = algo

    self._isolate_item = None
    self._tasks = []
Esempio n. 22
0
def main(args):
  tools.disable_buffering()
  parser = tools.OptionParserWithLogging(
      usage='%prog <options>',
      version=__version__,
      log_file=RUN_ISOLATED_LOG_FILE)

  data_group = optparse.OptionGroup(parser, 'Data source')
  data_group.add_option(
      '-s', '--isolated',
      help='Hash of the .isolated to grab from the isolate server')
  data_group.add_option(
      '-H', dest='isolated', help=optparse.SUPPRESS_HELP)
  isolateserver.add_isolate_server_options(data_group)
  parser.add_option_group(data_group)

  isolateserver.add_cache_options(parser)
  parser.set_defaults(cache='cache')

  debug_group = optparse.OptionGroup(parser, 'Debugging')
  debug_group.add_option(
      '--leak-temp-dir',
      action='store_true',
      help='Deliberately leak isolate\'s temp dir for later examination '
          '[default: %default]')
  parser.add_option_group(debug_group)

  auth.add_auth_options(parser)
  options, args = parser.parse_args(args)
  if not options.isolated:
    parser.error('--isolated is required.')
  auth.process_auth_options(parser, options)
  isolateserver.process_isolate_server_options(parser, options, True)

  cache = isolateserver.process_cache_options(options)
  with isolateserver.get_storage(
      options.isolate_server, options.namespace) as storage:
    # Hashing schemes used by |storage| and |cache| MUST match.
    assert storage.hash_algo == cache.hash_algo
    return run_tha_test(
        options.isolated, storage, cache, options.leak_temp_dir, args)
Esempio n. 23
0
def main(args):
  tools.disable_buffering()
  parser = logging_utils.OptionParserWithLogging(
      usage='%prog <options>',
      version=__version__,
      log_file=RUN_ISOLATED_LOG_FILE)

  data_group = optparse.OptionGroup(parser, 'Data source')
  data_group.add_option(
      '-s', '--isolated',
      help='Hash of the .isolated to grab from the isolate server')
  data_group.add_option(
      '-H', dest='isolated', help=optparse.SUPPRESS_HELP)
  isolateserver.add_isolate_server_options(data_group)
  parser.add_option_group(data_group)

  isolateserver.add_cache_options(parser)
  parser.set_defaults(cache='cache')

  debug_group = optparse.OptionGroup(parser, 'Debugging')
  debug_group.add_option(
      '--leak-temp-dir',
      action='store_true',
      help='Deliberately leak isolate\'s temp dir for later examination '
          '[default: %default]')
  parser.add_option_group(debug_group)

  auth.add_auth_options(parser)
  options, args = parser.parse_args(args)
  if not options.isolated:
    parser.error('--isolated is required.')
  auth.process_auth_options(parser, options)
  isolateserver.process_isolate_server_options(parser, options, True)

  cache = isolateserver.process_cache_options(options)
  with isolateserver.get_storage(
      options.isolate_server, options.namespace) as storage:
    # Hashing schemes used by |storage| and |cache| MUST match.
    assert storage.hash_algo == cache.hash_algo
    return run_tha_test(
        options.isolated, storage, cache, options.leak_temp_dir, args)
def main(args):
    tools.disable_buffering()
    parser = logging_utils.OptionParserWithLogging(
        usage="%prog <options>", version=__version__, log_file=RUN_ISOLATED_LOG_FILE
    )
    parser.add_option(
        "--json",
        help="dump output metadata to json file. When used, run_isolated returns " "non-zero only on internal failure",
    )
    data_group = optparse.OptionGroup(parser, "Data source")
    data_group.add_option("-s", "--isolated", help="Hash of the .isolated to grab from the isolate server")
    isolateserver.add_isolate_server_options(data_group)
    parser.add_option_group(data_group)

    isolateserver.add_cache_options(parser)
    parser.set_defaults(cache="cache")

    debug_group = optparse.OptionGroup(parser, "Debugging")
    debug_group.add_option(
        "--leak-temp-dir",
        action="store_true",
        help="Deliberately leak isolate's temp dir for later examination " "[default: %default]",
    )
    debug_group.add_option("--root-dir", help="Use a directory instead of a random one")
    parser.add_option_group(debug_group)

    auth.add_auth_options(parser)
    options, args = parser.parse_args(args)
    if not options.isolated:
        parser.error("--isolated is required.")
    auth.process_auth_options(parser, options)
    isolateserver.process_isolate_server_options(parser, options, True)

    cache = isolateserver.process_cache_options(options)
    with isolateserver.get_storage(options.isolate_server, options.namespace) as storage:
        # Hashing schemes used by |storage| and |cache| MUST match.
        assert storage.hash_algo == cache.hash_algo
        return run_tha_test(
            options.isolated, storage, cache, options.leak_temp_dir, options.json, options.root_dir, args
        )
  def run_upload_items_test(self, namespace):
    storage = isolateserver.get_storage(
        isolate_storage.ServerRef(self.server.url, namespace))

    # Items to upload.
    items = [
      isolateserver.BufferItem('item %d' % i, storage.server_ref.hash_algo)
      for i in xrange(10)
    ]

    # Do it.
    uploaded = storage.upload_items(items)
    self.assertEqual(set(items), set(uploaded))

    # Now ensure upload_items skips existing items.
    more = [
      isolateserver.BufferItem('more item %d' % i, storage.server_ref.hash_algo)
      for i in xrange(10)
    ]

    # Uploaded only |more|.
    uploaded = storage.upload_items(items + more)
    self.assertEqual(set(more), set(uploaded))
Esempio n. 26
0
def main(args):
  # Warning: when --argsfile is used, the strings are unicode instances, when
  # parsed normally, the strings are str instances.
  (parser, options, args) = parse_args(args)

  if not file_path.enable_symlink():
    logging.error('Symlink support is not enabled')

  isolate_cache = isolateserver.process_cache_options(options, trim=False)
  named_cache_manager = named_cache.process_named_cache_options(parser, options)
  if options.clean:
    if options.isolated:
      parser.error('Can\'t use --isolated with --clean.')
    if options.isolate_server:
      parser.error('Can\'t use --isolate-server with --clean.')
    if options.json:
      parser.error('Can\'t use --json with --clean.')
    if options.named_caches:
      parser.error('Can\t use --named-cache with --clean.')
    clean_caches(options, isolate_cache, named_cache_manager)
    return 0

  if not options.no_clean:
    clean_caches(options, isolate_cache, named_cache_manager)

  if not options.isolated and not args:
    parser.error('--isolated or command to run is required.')

  auth.process_auth_options(parser, options)

  isolateserver.process_isolate_server_options(
      parser, options, True, False)
  if not options.isolate_server:
    if options.isolated:
      parser.error('--isolated requires --isolate-server')
    if ISOLATED_OUTDIR_PARAMETER in args:
      parser.error(
        '%s in args requires --isolate-server' % ISOLATED_OUTDIR_PARAMETER)

  if options.root_dir:
    options.root_dir = unicode(os.path.abspath(options.root_dir))
  if options.json:
    options.json = unicode(os.path.abspath(options.json))

  if any('=' not in i for i in options.env):
    parser.error(
        '--env required key=value form. value can be skipped to delete '
        'the variable')
  options.env = dict(i.split('=', 1) for i in options.env)

  prefixes = {}
  cwd = os.path.realpath(os.getcwd())
  for item in options.env_prefix:
    if '=' not in item:
      parser.error(
        '--env-prefix %r is malformed, must be in the form `VAR=./path`'
        % item)
    key, opath = item.split('=', 1)
    if os.path.isabs(opath):
      parser.error('--env-prefix %r path is bad, must be relative.' % opath)
    opath = os.path.normpath(opath)
    if not os.path.realpath(os.path.join(cwd, opath)).startswith(cwd):
      parser.error(
        '--env-prefix %r path is bad, must be relative and not contain `..`.'
        % opath)
    prefixes.setdefault(key, []).append(opath)
  options.env_prefix = prefixes

  cipd.validate_cipd_options(parser, options)

  install_packages_fn = noop_install_packages
  if options.cipd_enabled:
    install_packages_fn = lambda run_dir: install_client_and_packages(
        run_dir, cipd.parse_package_args(options.cipd_packages),
        options.cipd_server, options.cipd_client_package,
        options.cipd_client_version, cache_dir=options.cipd_cache)

  @contextlib.contextmanager
  def install_named_caches(run_dir):
    # WARNING: this function depends on "options" variable defined in the outer
    # function.
    caches = [
      (os.path.join(run_dir, unicode(relpath)), name)
      for name, relpath in options.named_caches
    ]
    with named_cache_manager.open():
      for path, name in caches:
        named_cache_manager.install(path, name)
    try:
      yield
    finally:
      # Uninstall each named cache, returning it to the cache pool. If an
      # uninstall fails for a given cache, it will remain in the task's
      # temporary space, get cleaned up by the Swarming bot, and be lost.
      #
      # If the Swarming bot cannot clean up the cache, it will handle it like
      # any other bot file that could not be removed.
      with named_cache_manager.open():
        for path, name in caches:
          try:
            named_cache_manager.uninstall(path, name)
          except named_cache.Error:
            logging.exception('Error while removing named cache %r at %r. '
                              'The cache will be lost.', path, name)

  extra_args = []
  command = []
  if options.raw_cmd:
    command = args
    if options.relative_cwd:
      a = os.path.normpath(os.path.abspath(options.relative_cwd))
      if not a.startswith(os.getcwd()):
        parser.error(
            '--relative-cwd must not try to escape the working directory')
  else:
    if options.relative_cwd:
      parser.error('--relative-cwd requires --raw-cmd')
    extra_args = args

  data = TaskData(
      command=command,
      relative_cwd=options.relative_cwd,
      extra_args=extra_args,
      isolated_hash=options.isolated,
      storage=None,
      isolate_cache=isolate_cache,
      outputs=options.output,
      install_named_caches=install_named_caches,
      leak_temp_dir=options.leak_temp_dir,
      root_dir=_to_unicode(options.root_dir),
      hard_timeout=options.hard_timeout,
      grace_period=options.grace_period,
      bot_file=options.bot_file,
      switch_to_account=options.switch_to_account,
      install_packages_fn=install_packages_fn,
      use_symlinks=options.use_symlinks,
      env=options.env,
      env_prefix=options.env_prefix)
  try:
    if options.isolate_server:
      storage = isolateserver.get_storage(
          options.isolate_server, options.namespace)
      with storage:
        data = data._replace(storage=storage)
        # Hashing schemes used by |storage| and |isolate_cache| MUST match.
        assert storage.hash_algo == isolate_cache.hash_algo
        return run_tha_test(data, options.json)
    return run_tha_test(data, options.json)
  except (cipd.Error, named_cache.Error) as ex:
    print >> sys.stderr, ex.message
    return 1
def main():
  colorama.init()

  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-I', '--isolate-server',
      metavar='URL', default='',
      help='Isolate server to use')
  parser.add_option(
      '--namespace', default='temporary%d-gzip' % time.time(), metavar='XX',
      help='Namespace to use on the server, default: %default')
  parser.add_option(
      '--threads', type='int', default=16, metavar='N',
      help='Parallel worker threads to use, default:%default')

  data_group = optparse.OptionGroup(parser, 'Amount of data')
  graph.unit_option(
      data_group, '--items', default=0, help='Number of items to upload')
  graph.unit_option(
      data_group, '--max-size', default=0,
      help='Loop until this amount of data was transferred')
  graph.unit_option(
      data_group, '--mid-size', default=100*1024,
      help='Rough average size of each item, default:%default')
  parser.add_option_group(data_group)

  ui_group = optparse.OptionGroup(parser, 'Result histogram')
  ui_group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='Width of histogram, default:%default')
  ui_group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of histogram\'s buckets, default:%default')
  parser.add_option_group(ui_group)

  log_group = optparse.OptionGroup(parser, 'Logging')
  log_group.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  log_group.add_option(
      '-v', '--verbose', action='store_true', help='Enable logging')
  parser.add_option_group(log_group)

  options, args = parser.parse_args()

  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  if bool(options.max_size) == bool(options.items):
    parser.error(
        'Use one of --max-size or --items.\n'
        '  Use --max-size if you want to run it until NN bytes where '
        'transfered.\n'
        '  Otherwise use --items to run it for NN items.')
  options.isolate_server = options.isolate_server.rstrip('/')
  if not options.isolate_server:
    parser.error('--isolate-server is required.')

  print(
      ' - Using %d thread,  items=%d,  max-size=%d,  mid-size=%d' % (
      options.threads, options.items, options.max_size, options.mid_size))

  start = time.time()

  random_pool = Randomness()
  print(' - Generated pool after %.1fs' % (time.time() - start))

  columns = [('index', 0), ('data', 0), ('size', options.items)]
  progress = Progress(columns)
  storage = isolateserver.get_storage(options.isolate_server, options.namespace)
  do_item = functools.partial(
      send_and_receive,
      random_pool,
      storage,
      progress)

  # TODO(maruel): Handle Ctrl-C should:
  # - Stop adding tasks.
  # - Stop scheduling tasks in ThreadPool.
  # - Wait for the remaining ungoing tasks to complete.
  # - Still print details and write the json file.
  with threading_utils.ThreadPoolWithProgress(
      progress, options.threads, options.threads, 0) as pool:
    if options.items:
      for _ in xrange(options.items):
        pool.add_task(0, do_item, gen_size(options.mid_size))
        progress.print_update()
    elif options.max_size:
      # This one is approximate.
      total = 0
      while True:
        size = gen_size(options.mid_size)
        progress.update_item('', size=1)
        progress.print_update()
        pool.add_task(0, do_item, size)
        total += size
        if total >= options.max_size:
          break
    results = sorted(pool.join())

  print('')
  print(' - Took %.1fs.' % (time.time() - start))
  print('')
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
Esempio n. 28
0
def main(args):
    parser = create_option_parser()
    options, args = parser.parse_args(args)

    cache = isolateserver.process_cache_options(options)
    if options.clean:
        if options.isolated:
            parser.error('Can\'t use --isolated with --clean.')
        if options.isolate_server:
            parser.error('Can\'t use --isolate-server with --clean.')
        if options.json:
            parser.error('Can\'t use --json with --clean.')
        cache.cleanup()
        return 0
    if not options.no_clean:
        cache.cleanup()

    if not options.isolated and not args:
        parser.error('--isolated or command to run is required.')

    auth.process_auth_options(parser, options)

    isolateserver.process_isolate_server_options(parser, options, True, False)
    if not options.isolate_server:
        if options.isolated:
            parser.error('--isolated requires --isolate-server')
        if ISOLATED_OUTDIR_PARAMETER in args:
            parser.error('%s in args requires --isolate-server' %
                         ISOLATED_OUTDIR_PARAMETER)

    if options.root_dir:
        options.root_dir = unicode(os.path.abspath(options.root_dir))
    if options.json:
        options.json = unicode(os.path.abspath(options.json))

    cipd.validate_cipd_options(parser, options)

    install_packages_fn = lambda run_dir: install_packages(
        run_dir,
        cipd.parse_package_args(options.cipd_packages),
        options.cipd_server,
        options.cipd_client_package,
        options.cipd_client_version,
        cache_dir=options.cipd_cache)

    try:
        command = [] if options.isolated else args
        if options.isolate_server:
            storage = isolateserver.get_storage(options.isolate_server,
                                                options.namespace)
            with storage:
                # Hashing schemes used by |storage| and |cache| MUST match.
                assert storage.hash_algo == cache.hash_algo
                return run_tha_test(command, options.isolated, storage, cache,
                                    options.leak_temp_dir, options.json,
                                    options.root_dir, options.hard_timeout,
                                    options.grace_period, options.bot_file,
                                    args, install_packages_fn,
                                    options.use_symlinks)
        return run_tha_test(command, options.isolated, None, cache,
                            options.leak_temp_dir, options.json,
                            options.root_dir, options.hard_timeout,
                            options.grace_period, options.bot_file, args,
                            install_packages_fn, options.use_symlinks)
    except cipd.Error as ex:
        print >> sys.stderr, ex.message
        return 1
Esempio n. 29
0
def CMDreproduce(parser, args):
  """Runs a task locally that was triggered on the server.

  This running locally the same commands that have been run on the bot. The data
  downloaded will be in a subdirectory named 'work' of the current working
  directory.

  You can pass further additional arguments to the target command by passing
  them after --.
  """
  options, args = parser.parse_args(args)
  extra_args = []
  if not args:
    parser.error('Must specify exactly one task id.')
  if len(args) > 1:
    if args[1] == '--':
      if len(args) > 2:
        extra_args = args[2:]
    else:
      extra_args = args[1:]

  url = options.swarming + '/_ah/api/swarming/v1/task/%s/request' % args[0]
  request = net.url_read_json(url)
  if not request:
    print >> sys.stderr, 'Failed to retrieve request data for the task'
    return 1

  workdir = unicode(os.path.abspath('work'))
  if not fs.isdir(workdir):
    fs.mkdir(workdir)

  properties = request['properties']
  env = None
  if properties.get('env'):
    env = os.environ.copy()
    logging.info('env: %r', properties['env'])
    for i in properties['env']:
      key = i['key'].encode('utf-8')
      if not i['value']:
        env.pop(key, None)
      else:
        env[key] = i['value'].encode('utf-8')

  if properties.get('inputs_ref'):
    # Create the tree.
    with isolateserver.get_storage(
          properties['inputs_ref']['isolatedserver'],
          properties['inputs_ref']['namespace']) as storage:
      bundle = isolateserver.fetch_isolated(
          properties['inputs_ref']['isolated'],
          storage,
          isolateserver.MemoryCache(file_mode_mask=0700),
          workdir,
          False)
      command = bundle.command
      if bundle.relative_cwd:
        workdir = os.path.join(workdir, bundle.relative_cwd)
  else:
    command = properties['command']
  try:
    return subprocess.call(command + extra_args, env=env, cwd=workdir)
  except OSError as e:
    print >> sys.stderr, 'Failed to run: %s' % ' '.join(command)
    print >> sys.stderr, str(e)
    return 1
Esempio n. 30
0
def main(args):
  (parser, options, args) = parse_args(args)

  isolate_cache = isolateserver.process_cache_options(options, trim=False)
  named_cache_manager = named_cache.process_named_cache_options(parser, options)
  if options.clean:
    if options.isolated:
      parser.error('Can\'t use --isolated with --clean.')
    if options.isolate_server:
      parser.error('Can\'t use --isolate-server with --clean.')
    if options.json:
      parser.error('Can\'t use --json with --clean.')
    if options.named_caches:
      parser.error('Can\t use --named-cache with --clean.')
    clean_caches(options, isolate_cache, named_cache_manager)
    return 0

  if not options.no_clean:
    clean_caches(options, isolate_cache, named_cache_manager)

  if not options.isolated and not args:
    parser.error('--isolated or command to run is required.')

  auth.process_auth_options(parser, options)

  isolateserver.process_isolate_server_options(
    parser, options, True, False)
  if not options.isolate_server:
    if options.isolated:
      parser.error('--isolated requires --isolate-server')
    if ISOLATED_OUTDIR_PARAMETER in args:
      parser.error(
        '%s in args requires --isolate-server' % ISOLATED_OUTDIR_PARAMETER)

  if options.root_dir:
    options.root_dir = unicode(os.path.abspath(options.root_dir))
  if options.json:
    options.json = unicode(os.path.abspath(options.json))

  cipd.validate_cipd_options(parser, options)

  install_packages_fn = noop_install_packages
  if options.cipd_enabled:
    install_packages_fn = lambda run_dir: install_client_and_packages(
        run_dir, cipd.parse_package_args(options.cipd_packages),
        options.cipd_server, options.cipd_client_package,
        options.cipd_client_version, cache_dir=options.cipd_cache)

  @contextlib.contextmanager
  def install_named_caches(run_dir):
    # WARNING: this function depends on "options" variable defined in the outer
    # function.
    caches = [
      (os.path.join(run_dir, unicode(relpath)), name)
      for name, relpath in options.named_caches
    ]
    with named_cache_manager.open():
      for path, name in caches:
        named_cache_manager.install(path, name)
    try:
      yield
    finally:
      # Uninstall each named cache, returning it to the cache pool. If an
      # uninstall fails for a given cache, it will remain in the task's
      # temporary space, get cleaned up by the Swarming bot, and be lost.
      #
      # If the Swarming bot cannot clean up the cache, it will handle it like
      # any other bot file that could not be removed.
      with named_cache_manager.open():
        for path, name in caches:
          try:
            named_cache_manager.uninstall(path, name)
          except named_cache.Error:
            logging.exception('Error while removing named cache %r at %r. '
                              'The cache will be lost.', path, name)

  try:
    if options.isolate_server:
      storage = isolateserver.get_storage(
          options.isolate_server, options.namespace)
      with storage:
        # Hashing schemes used by |storage| and |isolate_cache| MUST match.
        assert storage.hash_algo == isolate_cache.hash_algo
        return run_tha_test(
            args,
            options.isolated,
            storage,
            isolate_cache,
            options.output,
            install_named_caches,
            options.leak_temp_dir,
            options.json, options.root_dir,
            options.hard_timeout,
            options.grace_period,
            options.bot_file,
            options.switch_to_account,
            install_packages_fn,
            options.use_symlinks)
    return run_tha_test(
        args,
        options.isolated,
        None,
        isolate_cache,
        options.output,
        install_named_caches,
        options.leak_temp_dir,
        options.json,
        options.root_dir,
        options.hard_timeout,
        options.grace_period,
        options.bot_file,
        options.switch_to_account,
        install_packages_fn,
        options.use_symlinks)
  except (cipd.Error, named_cache.Error) as ex:
    print >> sys.stderr, ex.message
    return 1
def main():
  colorama.init()

  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-I', '--isolate-server',
      metavar='URL', default='',
      help='Isolate server to use')
  parser.add_option(
      '--namespace', default='temporary%d-gzip' % time.time(), metavar='XX',
      help='Namespace to use on the server, default: %default')
  parser.add_option(
      '--threads', type='int', default=16, metavar='N',
      help='Parallel worker threads to use, default:%default')

  data_group = optparse.OptionGroup(parser, 'Amount of data')
  graph.unit_option(
      data_group, '--items', default=0, help='Number of items to upload')
  graph.unit_option(
      data_group, '--max-size', default=0,
      help='Loop until this amount of data was transferred')
  graph.unit_option(
      data_group, '--mid-size', default=100*1024,
      help='Rough average size of each item, default:%default')
  parser.add_option_group(data_group)

  ui_group = optparse.OptionGroup(parser, 'Result histogram')
  ui_group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='Width of histogram, default:%default')
  ui_group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of histogram\'s buckets, default:%default')
  parser.add_option_group(ui_group)

  log_group = optparse.OptionGroup(parser, 'Logging')
  log_group.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  log_group.add_option(
      '-v', '--verbose', action='store_true', help='Enable logging')
  parser.add_option_group(log_group)

  options, args = parser.parse_args()

  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  if bool(options.max_size) == bool(options.items):
    parser.error(
        'Use one of --max-size or --items.\n'
        '  Use --max-size if you want to run it until NN bytes where '
        'transfered.\n'
        '  Otherwise use --items to run it for NN items.')
  options.isolate_server = options.isolate_server.rstrip('/')
  if not options.isolate_server:
    parser.error('--isolate-server is required.')

  print(
      ' - Using %d thread,  items=%d,  max-size=%d,  mid-size=%d' % (
      options.threads, options.items, options.max_size, options.mid_size))

  start = time.time()

  random_pool = Randomness()
  print(' - Generated pool after %.1fs' % (time.time() - start))

  columns = [('index', 0), ('data', 0), ('size', options.items)]
  progress = Progress(columns)
  storage = isolateserver.get_storage(options.isolate_server, options.namespace)
  do_item = functools.partial(
      send_and_receive,
      random_pool,
      storage,
      progress)

  # TODO(maruel): Handle Ctrl-C should:
  # - Stop adding tasks.
  # - Stop scheduling tasks in ThreadPool.
  # - Wait for the remaining ungoing tasks to complete.
  # - Still print details and write the json file.
  with threading_utils.ThreadPoolWithProgress(
      progress, options.threads, options.threads, 0) as pool:
    if options.items:
      for _ in xrange(options.items):
        pool.add_task(0, do_item, gen_size(options.mid_size))
        progress.print_update()
    elif options.max_size:
      # This one is approximate.
      total = 0
      while True:
        size = gen_size(options.mid_size)
        progress.update_item('', size=1)
        progress.print_update()
        pool.add_task(0, do_item, size)
        total += size
        if total >= options.max_size:
          break
    results = sorted(pool.join())

  print('')
  print(' - Took %.1fs.' % (time.time() - start))
  print('')
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
Esempio n. 32
0
def CMDreproduce(parser, args):
  """Runs a task locally that was triggered on the server.

  This running locally the same commands that have been run on the bot. The data
  downloaded will be in a subdirectory named 'work' of the current working
  directory.

  You can pass further additional arguments to the target command by passing
  them after --.
  """
  parser.add_option(
      '--output-dir', metavar='DIR', default='',
      help='Directory that will have results stored into')
  options, args = parser.parse_args(args)
  extra_args = []
  if not args:
    parser.error('Must specify exactly one task id.')
  if len(args) > 1:
    if args[1] == '--':
      if len(args) > 2:
        extra_args = args[2:]
    else:
      extra_args = args[1:]

  url = options.swarming + '/_ah/api/swarming/v1/task/%s/request' % args[0]
  request = net.url_read_json(url)
  if not request:
    print >> sys.stderr, 'Failed to retrieve request data for the task'
    return 1

  workdir = unicode(os.path.abspath('work'))
  if fs.isdir(workdir):
    parser.error('Please delete the directory \'work\' first')
  fs.mkdir(workdir)

  properties = request['properties']
  env = None
  if properties.get('env'):
    env = os.environ.copy()
    logging.info('env: %r', properties['env'])
    for i in properties['env']:
      key = i['key'].encode('utf-8')
      if not i['value']:
        env.pop(key, None)
      else:
        env[key] = i['value'].encode('utf-8')

  if properties.get('inputs_ref'):
    # Create the tree.
    with isolateserver.get_storage(
          properties['inputs_ref']['isolatedserver'],
          properties['inputs_ref']['namespace']) as storage:
      bundle = isolateserver.fetch_isolated(
          properties['inputs_ref']['isolated'],
          storage,
          isolateserver.MemoryCache(file_mode_mask=0700),
          workdir)
      command = bundle.command
      if bundle.relative_cwd:
        workdir = os.path.join(workdir, bundle.relative_cwd)
      command.extend(properties.get('extra_args') or [])
    # https://github.com/luci/luci-py/blob/master/appengine/swarming/doc/Magic-Values.md
    new_command = run_isolated.process_command(command, options.output_dir)
    if not options.output_dir and new_command != command:
      parser.error('The task has outputs, you must use --output-dir')
    command = new_command
  else:
    command = properties['command']
  try:
    return subprocess.call(command + extra_args, env=env, cwd=workdir)
  except OSError as e:
    print >> sys.stderr, 'Failed to run: %s' % ' '.join(command)
    print >> sys.stderr, str(e)
    return 1
Esempio n. 33
0
def main():
    tools.disable_buffering()
    parser = tools.OptionParserWithLogging(usage='%prog <options>',
                                           version=__version__,
                                           log_file=RUN_ISOLATED_LOG_FILE)

    group = optparse.OptionGroup(parser, 'Data source')
    group.add_option('-s',
                     '--isolated',
                     metavar='FILE',
                     help='File/url describing what to map or run')
    group.add_option('-H',
                     '--hash',
                     help='Hash of the .isolated to grab from the hash table')
    group.add_option('-I',
                     '--isolate-server',
                     metavar='URL',
                     default='',
                     help='Isolate server to use')
    group.add_option(
        '-n',
        '--namespace',
        default='default-gzip',
        help='namespace to use when using isolateserver, default: %default')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Cache management')
    group.add_option('--cache',
                     default='cache',
                     metavar='DIR',
                     help='Cache directory, default=%default')
    group.add_option(
        '--max-cache-size',
        type='int',
        metavar='NNN',
        default=20 * 1024 * 1024 * 1024,
        help='Trim if the cache gets larger than this value, default=%default')
    group.add_option(
        '--min-free-space',
        type='int',
        metavar='NNN',
        default=2 * 1024 * 1024 * 1024,
        help='Trim if disk free space becomes lower than this value, '
        'default=%default')
    group.add_option(
        '--max-items',
        type='int',
        metavar='NNN',
        default=100000,
        help='Trim if more than this number of items are in the cache '
        'default=%default')
    parser.add_option_group(group)

    options, args = parser.parse_args()

    if bool(options.isolated) == bool(options.hash):
        logging.debug('One and only one of --isolated or --hash is required.')
        parser.error('One and only one of --isolated or --hash is required.')
    if args:
        logging.debug('Unsupported args %s' % ' '.join(args))
        parser.error('Unsupported args %s' % ' '.join(args))
    if not options.isolate_server:
        parser.error('--isolate-server is required.')

    options.cache = os.path.abspath(options.cache)
    policies = CachePolicies(options.max_cache_size, options.min_free_space,
                             options.max_items)
    storage = isolateserver.get_storage(options.isolate_server,
                                        options.namespace)
    algo = isolateserver.get_hash_algo(options.namespace)

    try:
        # |options.cache| may not exist until DiskCache() instance is created.
        cache = DiskCache(options.cache, policies, algo)
        outdir = make_temp_dir('run_tha_test', options.cache)
        return run_tha_test(options.isolated or options.hash, storage, cache,
                            algo, outdir)
    except Exception as e:
        # Make sure any exception is logged.
        logging.exception(e)
        return 1
Esempio n. 34
0
def main(args):
  tools.disable_buffering()
  parser = tools.OptionParserWithLogging(
      usage='%prog <options>',
      version=__version__,
      log_file=RUN_ISOLATED_LOG_FILE)

  data_group = optparse.OptionGroup(parser, 'Data source')
  data_group.add_option(
      '-s', '--isolated',
      metavar='FILE',
      help='File/url describing what to map or run')
  data_group.add_option(
      '-H', '--hash',
      help='Hash of the .isolated to grab from the hash table')
  isolateserver.add_isolate_server_options(data_group, True)
  parser.add_option_group(data_group)

  cache_group = optparse.OptionGroup(parser, 'Cache management')
  cache_group.add_option(
      '--cache',
      default='cache',
      metavar='DIR',
      help='Cache directory, default=%default')
  cache_group.add_option(
      '--max-cache-size',
      type='int',
      metavar='NNN',
      default=20*1024*1024*1024,
      help='Trim if the cache gets larger than this value, default=%default')
  cache_group.add_option(
      '--min-free-space',
      type='int',
      metavar='NNN',
      default=2*1024*1024*1024,
      help='Trim if disk free space becomes lower than this value, '
           'default=%default')
  cache_group.add_option(
      '--max-items',
      type='int',
      metavar='NNN',
      default=100000,
      help='Trim if more than this number of items are in the cache '
           'default=%default')
  parser.add_option_group(cache_group)

  auth.add_auth_options(parser)
  options, args = parser.parse_args(args)
  auth.process_auth_options(parser, options)
  isolateserver.process_isolate_server_options(data_group, options)

  if bool(options.isolated) == bool(options.hash):
    logging.debug('One and only one of --isolated or --hash is required.')
    parser.error('One and only one of --isolated or --hash is required.')

  options.cache = os.path.abspath(options.cache)
  policies = CachePolicies(
      options.max_cache_size, options.min_free_space, options.max_items)

  try:
    # |options.cache| path may not exist until DiskCache() instance is created.
    cache = DiskCache(
        options.cache, policies, isolateserver.get_hash_algo(options.namespace))
    remote = options.isolate_server or options.indir
    with isolateserver.get_storage(remote, options.namespace) as storage:
      # Hashing schemes used by |storage| and |cache| MUST match.
      assert storage.hash_algo == cache.hash_algo
      return run_tha_test(
          options.isolated or options.hash, storage, cache, args)
  except Exception as e:
    # Make sure any exception is logged.
    tools.report_error(e)
    logging.exception(e)
    return 1
Esempio n. 35
0
class RunIsolatedTestOutputFiles(RunIsolatedTestBase):
    # Like RunIsolatedTestRun, but ensures that specific output files
    # (as opposed to anything in $(ISOLATED_OUTDIR)) are returned.
    def _run_test(self, isolated, command, extra_args):
        # Starts a full isolate server mock and have run_tha_test() uploads results
        # back after the task completed.
        server = isolateserver_mock.MockIsolateServer()
        try:
            # Output the following structure:
            #
            # foo1
            # foodir --> foo2_sl (symlink to "foo2_content" file)
            # bardir --> bar1
            #
            # Create the symlinks only on Linux.
            script = ('import os\n'
                      'import sys\n'
                      'open(sys.argv[1], "w").write("foo1")\n'
                      'bar1_path = os.path.join(sys.argv[3], "bar1")\n'
                      'open(bar1_path, "w").write("bar1")\n'
                      'if sys.platform.startswith("linux"):\n'
                      '  foo_realpath = os.path.abspath("foo2_content")\n'
                      '  open(foo_realpath, "w").write("foo2")\n'
                      '  os.symlink(foo_realpath, sys.argv[2])\n'
                      'else:\n'
                      '  open(sys.argv[2], "w").write("foo2")\n')
            script_hash = isolateserver_mock.hash_content(script)
            isolated['files']['cmd.py'] = {
                'h': script_hash,
                'm': 0700,
                's': len(script),
            }
            if sys.platform == 'win32':
                isolated['files']['cmd.py'].pop('m')
            isolated_data = json_dumps(isolated)
            isolated_hash = isolateserver_mock.hash_content(isolated_data)
            server.add_content('default-store', script)
            server.add_content('default-store', isolated_data)
            store = isolateserver.get_storage(server.url, 'default-store')

            self.mock(sys, 'stdout', StringIO.StringIO())
            data = run_isolated.TaskData(
                command=command,
                relative_cwd=None,
                extra_args=extra_args,
                isolated_hash=isolated_hash,
                storage=store,
                isolate_cache=local_caching.MemoryContentAddressedCache(),
                outputs=['foo1', 'foodir/foo2_sl', 'bardir/'],
                install_named_caches=init_named_caches_stub,
                leak_temp_dir=False,
                root_dir=None,
                hard_timeout=60,
                grace_period=30,
                bot_file=None,
                switch_to_account=False,
                install_packages_fn=run_isolated.noop_install_packages,
                use_symlinks=False,
                env={},
                env_prefix={})
            ret = run_isolated.run_tha_test(data, None)
            self.assertEqual(0, ret)

            # It uploaded back. Assert the store has a new item containing foo.
            hashes = {isolated_hash, script_hash}
            foo1_output_hash = isolateserver_mock.hash_content('foo1')
            foo2_output_hash = isolateserver_mock.hash_content('foo2')
            bar1_output_hash = isolateserver_mock.hash_content('bar1')
            hashes.add(foo1_output_hash)
            hashes.add(foo2_output_hash)
            hashes.add(bar1_output_hash)
            isolated = {
                u'algo': u'sha-1',
                u'files': {
                    u'foo1': {
                        u'h': foo1_output_hash,
                        # TODO(maruel): Handle umask.
                        u'm': 0640,
                        u's': 4,
                    },
                    u'foodir/foo2_sl': {
                        u'h': foo2_output_hash,
                        # TODO(maruel): Handle umask.
                        u'm': 0640,
                        u's': 4,
                    },
                    u'bardir/bar1': {
                        u'h': bar1_output_hash,
                        # TODO(maruel): Handle umask.
                        u'm': 0640,
                        u's': 4,
                    },
                },
                u'version': isolated_format.ISOLATED_FILE_VERSION,
            }
Esempio n. 36
0
class RunIsolatedTestRun(RunIsolatedTestBase):
    # Runs the actual command requested.
    def test_output(self):
        # Starts a full isolate server mock and have run_tha_test() uploads results
        # back after the task completed.
        server = isolateserver_mock.MockIsolateServer()
        try:
            script = ('import sys\n' 'open(sys.argv[1], "w").write("bar")\n')
            script_hash = isolateserver_mock.hash_content(script)
            isolated = {
                u'algo': u'sha-1',
                u'command': [u'cmd.py', u'${ISOLATED_OUTDIR}/foo'],
                u'files': {
                    u'cmd.py': {
                        u'h': script_hash,
                        u'm': 0700,
                        u's': len(script),
                    },
                },
                u'version': isolated_format.ISOLATED_FILE_VERSION,
            }
            if sys.platform == 'win32':
                isolated[u'files'][u'cmd.py'].pop(u'm')
            isolated_data = json_dumps(isolated)
            isolated_hash = isolateserver_mock.hash_content(isolated_data)
            server.add_content('default-store', script)
            server.add_content('default-store', isolated_data)
            store = isolateserver.get_storage(server.url, 'default-store')

            self.mock(sys, 'stdout', StringIO.StringIO())
            data = run_isolated.TaskData(
                command=[],
                relative_cwd=None,
                extra_args=[],
                isolated_hash=isolated_hash,
                storage=store,
                isolate_cache=local_caching.MemoryContentAddressedCache(),
                outputs=None,
                install_named_caches=init_named_caches_stub,
                leak_temp_dir=False,
                root_dir=None,
                hard_timeout=60,
                grace_period=30,
                bot_file=None,
                switch_to_account=False,
                install_packages_fn=run_isolated.noop_install_packages,
                use_symlinks=False,
                env={},
                env_prefix={})
            ret = run_isolated.run_tha_test(data, None)
            self.assertEqual(0, ret)

            # It uploaded back. Assert the store has a new item containing foo.
            hashes = {isolated_hash, script_hash}
            output_hash = isolateserver_mock.hash_content('bar')
            hashes.add(output_hash)
            isolated = {
                u'algo': u'sha-1',
                u'files': {
                    u'foo': {
                        u'h': output_hash,
                        # TODO(maruel): Handle umask.
                        u'm': 0640,
                        u's': 3,
                    },
                },
                u'version': isolated_format.ISOLATED_FILE_VERSION,
            }
Esempio n. 37
0
def main():
  tools.disable_buffering()
  parser = tools.OptionParserWithLogging(
      usage='%prog <options>',
      version=__version__,
      log_file=RUN_ISOLATED_LOG_FILE)

  group = optparse.OptionGroup(parser, 'Data source')
  group.add_option(
      '-s', '--isolated',
      metavar='FILE',
      help='File/url describing what to map or run')
  group.add_option(
      '-H', '--hash',
      help='Hash of the .isolated to grab from the hash table')
  group.add_option(
      '-I', '--isolate-server',
      metavar='URL', default='',
      help='Isolate server to use')
  group.add_option(
      '-n', '--namespace',
      default='default-gzip',
      help='namespace to use when using isolateserver, default: %default')
  parser.add_option_group(group)

  group = optparse.OptionGroup(parser, 'Cache management')
  group.add_option(
      '--cache',
      default='cache',
      metavar='DIR',
      help='Cache directory, default=%default')
  group.add_option(
      '--max-cache-size',
      type='int',
      metavar='NNN',
      default=20*1024*1024*1024,
      help='Trim if the cache gets larger than this value, default=%default')
  group.add_option(
      '--min-free-space',
      type='int',
      metavar='NNN',
      default=2*1024*1024*1024,
      help='Trim if disk free space becomes lower than this value, '
           'default=%default')
  group.add_option(
      '--max-items',
      type='int',
      metavar='NNN',
      default=100000,
      help='Trim if more than this number of items are in the cache '
           'default=%default')
  parser.add_option_group(group)

  options, args = parser.parse_args()

  if bool(options.isolated) == bool(options.hash):
    logging.debug('One and only one of --isolated or --hash is required.')
    parser.error('One and only one of --isolated or --hash is required.')
  if args:
    logging.debug('Unsupported args %s' % ' '.join(args))
    parser.error('Unsupported args %s' % ' '.join(args))
  if not options.isolate_server:
    parser.error('--isolate-server is required.')

  options.cache = os.path.abspath(options.cache)
  policies = CachePolicies(
      options.max_cache_size, options.min_free_space, options.max_items)
  storage = isolateserver.get_storage(options.isolate_server, options.namespace)
  algo = isolateserver.get_hash_algo(options.namespace)

  try:
    # |options.cache| may not exist until DiskCache() instance is created.
    cache = DiskCache(options.cache, policies, algo)
    outdir = make_temp_dir('run_tha_test', options.cache)
    return run_tha_test(
        options.isolated or options.hash, storage, cache, algo, outdir)
  except Exception as e:
    # Make sure any exception is logged.
    logging.exception(e)
    return 1
Esempio n. 38
0
def main(args):
    (parser, options, args) = parse_args(args)

    isolate_cache = isolateserver.process_cache_options(options, trim=False)
    named_cache_manager = named_cache.process_named_cache_options(
        parser, options)
    if options.clean:
        if options.isolated:
            parser.error('Can\'t use --isolated with --clean.')
        if options.isolate_server:
            parser.error('Can\'t use --isolate-server with --clean.')
        if options.json:
            parser.error('Can\'t use --json with --clean.')
        if options.named_caches:
            parser.error('Can\t use --named-cache with --clean.')
        clean_caches(options, isolate_cache, named_cache_manager)
        return 0

    if not options.no_clean:
        clean_caches(options, isolate_cache, named_cache_manager)

    if not options.isolated and not args:
        parser.error('--isolated or command to run is required.')

    auth.process_auth_options(parser, options)

    isolateserver.process_isolate_server_options(parser, options, True, False)
    if not options.isolate_server:
        if options.isolated:
            parser.error('--isolated requires --isolate-server')
        if ISOLATED_OUTDIR_PARAMETER in args:
            parser.error('%s in args requires --isolate-server' %
                         ISOLATED_OUTDIR_PARAMETER)

    if options.root_dir:
        options.root_dir = unicode(os.path.abspath(options.root_dir))
    if options.json:
        options.json = unicode(os.path.abspath(options.json))

    cipd.validate_cipd_options(parser, options)

    install_packages_fn = noop_install_packages
    if options.cipd_enabled:
        install_packages_fn = lambda run_dir: install_client_and_packages(
            run_dir,
            cipd.parse_package_args(options.cipd_packages),
            options.cipd_server,
            options.cipd_client_package,
            options.cipd_client_version,
            cache_dir=options.cipd_cache)

    @contextlib.contextmanager
    def init_named_caches(run_dir):
        # WARNING: this function depends on "options" variable defined in the outer
        # function.
        with named_cache_manager.open():
            named_cache_manager.create_symlinks(run_dir, options.named_caches)
        try:
            yield
        finally:
            if not options.leak_temp_dir:
                named_cache_manager.delete_symlinks(run_dir,
                                                    options.named_caches)

    try:
        if options.isolate_server:
            storage = isolateserver.get_storage(options.isolate_server,
                                                options.namespace)
            with storage:
                # Hashing schemes used by |storage| and |isolate_cache| MUST match.
                assert storage.hash_algo == isolate_cache.hash_algo
                return run_tha_test(args, options.isolated, storage,
                                    isolate_cache, options.output,
                                    init_named_caches, options.leak_temp_dir,
                                    options.json, options.root_dir,
                                    options.hard_timeout, options.grace_period,
                                    options.bot_file, install_packages_fn,
                                    options.use_symlinks)
        return run_tha_test(args, options.isolated, None, isolate_cache,
                            options.output, init_named_caches,
                            options.leak_temp_dir, options.json,
                            options.root_dir, options.hard_timeout,
                            options.grace_period, options.bot_file,
                            install_packages_fn, options.use_symlinks)
    except (cipd.Error, named_cache.Error) as ex:
        print >> sys.stderr, ex.message
        return 1
Esempio n. 39
0
def main(args):
  tools.disable_buffering()
  parser = tools.OptionParserWithLogging(
      usage='%prog <options>',
      version=__version__,
      log_file=RUN_ISOLATED_LOG_FILE)

  data_group = optparse.OptionGroup(parser, 'Data source')
  data_group.add_option(
      '-s', '--isolated',
      metavar='FILE',
      help='File/url describing what to map or run')
  data_group.add_option(
      '-H', '--hash',
      help='Hash of the .isolated to grab from the hash table')
  isolateserver.add_isolate_server_options(data_group, True)
  parser.add_option_group(data_group)

  cache_group = optparse.OptionGroup(parser, 'Cache management')
  cache_group.add_option(
      '--cache',
      default='cache',
      metavar='DIR',
      help='Cache directory, default=%default')
  cache_group.add_option(
      '--max-cache-size',
      type='int',
      metavar='NNN',
      default=20*1024*1024*1024,
      help='Trim if the cache gets larger than this value, default=%default')
  cache_group.add_option(
      '--min-free-space',
      type='int',
      metavar='NNN',
      default=2*1024*1024*1024,
      help='Trim if disk free space becomes lower than this value, '
           'default=%default')
  cache_group.add_option(
      '--max-items',
      type='int',
      metavar='NNN',
      default=100000,
      help='Trim if more than this number of items are in the cache '
           'default=%default')
  parser.add_option_group(cache_group)

  auth.add_auth_options(parser)
  options, args = parser.parse_args(args)
  auth.process_auth_options(parser, options)
  isolateserver.process_isolate_server_options(data_group, options)

  if bool(options.isolated) == bool(options.hash):
    logging.debug('One and only one of --isolated or --hash is required.')
    parser.error('One and only one of --isolated or --hash is required.')

  options.cache = os.path.abspath(options.cache)
  policies = CachePolicies(
      options.max_cache_size, options.min_free_space, options.max_items)
  algo = isolateserver.get_hash_algo(options.namespace)

  try:
    # |options.cache| may not exist until DiskCache() instance is created.
    cache = DiskCache(options.cache, policies, algo)
    remote = options.isolate_server or options.indir
    with isolateserver.get_storage(remote, options.namespace) as storage:
      return run_tha_test(
          options.isolated or options.hash, storage, cache, algo, args)
  except Exception as e:
    # Make sure any exception is logged.
    tools.report_error(e)
    logging.exception(e)
    return 1
Esempio n. 40
0
class RunIsolatedTestOutputFiles(RunIsolatedTestBase):
  def _run_test(self, isolated, command):
    # Starts a full isolate server mock and have run_tha_test() uploads results
    # back after the task completed.
    server = isolateserver_mock.MockIsolateServer()
    try:
      # Output two files. If we're on Linux, we'll try to make one of them a
      # symlink to ensure that we correctly follow symlinks. Note that this only
      # tests file symlinks, not directory symlinks.
      # TODO(aludwin): follow directory symlinks
      script = (
        'import os\n'
        'import sys\n'
        'open(sys.argv[1], "w").write("bar")\n'
        'if sys.platform.startswith("linux"):\n'
        '  realpath = os.path.abspath("contents_of_symlink")\n'
        '  open(realpath, "w").write("baz")\n'
        '  os.symlink(realpath, sys.argv[2])\n'
        'else:\n'
        '  open(sys.argv[2], "w").write("baz")\n')
      script_hash = isolateserver_mock.hash_content(script)
      isolated['files']['cmd.py'] = {
        'h': script_hash,
        'm': 0700,
        's': len(script),
      }
      if sys.platform == 'win32':
        isolated['files']['cmd.py'].pop('m')
      isolated_data = json_dumps(isolated)
      isolated_hash = isolateserver_mock.hash_content(isolated_data)
      server.add_content('default-store', script)
      server.add_content('default-store', isolated_data)
      store = isolateserver.get_storage(server.url, 'default-store')

      self.mock(sys, 'stdout', StringIO.StringIO())
      ret = run_isolated.run_tha_test(
          command,
          isolated_hash,
          store,
          isolateserver.MemoryCache(),
          ['foo', 'foodir/foo2'],
          init_named_caches_stub,
          False,
          None,
          None,
          None,
          None,
          None,
          run_isolated.noop_install_packages,
          False)
      self.assertEqual(0, ret)

      # It uploaded back. Assert the store has a new item containing foo.
      hashes = {isolated_hash, script_hash}
      foo_output_hash = isolateserver_mock.hash_content('bar')
      foo2_output_hash = isolateserver_mock.hash_content('baz')
      hashes.add(foo_output_hash)
      hashes.add(foo2_output_hash)
      isolated =  {
        'algo': 'sha-1',
        'files': {
          'foo': {
            'h': foo_output_hash,
            # TODO(maruel): Handle umask.
            'm': 0640,
            's': 3,
          },
          'foodir/foo2': {
            'h': foo2_output_hash,
            # TODO(maruel): Handle umask.
            'm': 0640,
            's': 3,
          },
        },
        'version': isolated_format.ISOLATED_FILE_VERSION,
      }