コード例 #1
0
 def expect(executable, test_cases, jobs, timeout, no_dump):
     self.assertEquals(exe, executable)
     self.assertEquals(["Foo.Bar1", "Foo.Bar3"], test_cases)
     self.assertEquals(run_test_cases.num_processors(), jobs)
     self.assertEquals(120, timeout)
     self.assertEquals(None, no_dump)
     return 89
コード例 #2
0
 def expect(
     executable, cwd, test_cases, jobs, timeout, retries, run_all,
     max_failures, no_cr, gtest_output, result_file, verbose):
   self.assertEqual(run_test_cases.fix_python_path([exe]), executable)
   self.assertEqual(os.getcwd(), cwd)
   # They are in reverse order due to test shuffling.
   self.assertEqual(['Foo.Bar1', 'Foo.Bar/3'], test_cases)
   self.assertEqual(run_test_cases.num_processors(), jobs)
   self.assertEqual(120, timeout)
   self.assertEqual(2, retries)
   self.assertEqual(None, run_all)
   self.assertEqual(None, no_cr)
   self.assertEqual('', gtest_output)
   self.assertEqual(None, max_failures)
   self.assertEqual(exe + '.run_test_cases', result_file)
   self.assertFalse(verbose)
   return 89
コード例 #3
0
 def expect(executable, cwd, test_cases, jobs, timeout, clusters,
            retries, run_all, max_failures, no_cr, gtest_output,
            result_file, verbose):
     self.assertEqual(run_test_cases.fix_python_path([exe]), executable)
     self.assertEqual(os.getcwd(), cwd)
     # They are in reverse order due to test shuffling.
     self.assertEqual(['Foo.Bar1', 'Foo.Bar/3'], test_cases)
     self.assertEqual(run_test_cases.num_processors(), jobs)
     self.assertEqual(75, timeout)
     self.assertEqual(None, clusters)
     self.assertEqual(2, retries)
     self.assertEqual(None, run_all)
     self.assertEqual(None, no_cr)
     self.assertEqual('', gtest_output)
     self.assertEqual(None, max_failures)
     self.assertEqual(exe + '.run_test_cases', result_file)
     self.assertFalse(verbose)
     return 89
コード例 #4
0
ファイル: isolateserver_archive.py プロジェクト: hinike/opera
def upload_sha1_tree(base_url, indir, infiles, namespace):
    """Uploads the given tree to the given url.

  Arguments:
    base_url:  The base url, it is assume that |base_url|/has/ can be used to
               query if an element was already uploaded, and |base_url|/store/
               can be used to upload a new element.
    indir:     Root directory the infiles are based in.
    infiles:   dict of files to upload files from |indir| to |base_url|.
    namespace: The namespace to use on the server.
  """
    logging.info("upload tree(base_url=%s, indir=%s, files=%d)" % (base_url, indir, len(infiles)))
    assert base_url.startswith("http"), base_url
    base_url = base_url.rstrip("/")

    # TODO(maruel): Make this request much earlier asynchronously while the files
    # are being enumerated.
    token = urllib.quote(url_open(base_url + "/content/get_token").read())

    # Create a pool of workers to zip and upload any files missing from
    # the server.
    num_threads = run_test_cases.num_processors()
    zipping_pool = run_isolated.ThreadPool(min(2, num_threads), num_threads, 0, "zip")
    remote_uploader = UploadRemote(namespace, base_url, token)

    # Starts the zip and upload process for files that are missing
    # from the server.
    contains_hash_url = "%s/content/contains/%s?token=%s" % (base_url, namespace, token)
    uploaded = []
    for relfile, metadata in get_files_to_upload(contains_hash_url, infiles):
        infile = os.path.join(indir, relfile)
        zipping_pool.add_task(0, zip_and_trigger_upload, infile, metadata, remote_uploader.add_item)
        uploaded.append((relfile, metadata))

    logging.info("Waiting for all files to finish zipping")
    zipping_pool.join()
    zipping_pool.close()
    logging.info("All files zipped.")

    logging.info("Waiting for all files to finish uploading")
    # Will raise if any exception occurred.
    remote_uploader.join()
    remote_uploader.close()
    logging.info("All files are uploaded")

    total = len(infiles)
    total_size = sum(metadata.get("s", 0) for metadata in infiles.itervalues())
    logging.info("Total:      %6d, %9.1fkb", total, sum(m.get("s", 0) for m in infiles.itervalues()) / 1024.0)
    cache_hit = set(infiles.iterkeys()) - set(x[0] for x in uploaded)
    cache_hit_size = sum(infiles[i].get("s", 0) for i in cache_hit)
    logging.info(
        "cache hit:  %6d, %9.1fkb, %6.2f%% files, %6.2f%% size",
        len(cache_hit),
        cache_hit_size / 1024.0,
        len(cache_hit) * 100.0 / total,
        cache_hit_size * 100.0 / total_size if total_size else 0,
    )
    cache_miss = uploaded
    cache_miss_size = sum(infiles[i[0]].get("s", 0) for i in cache_miss)
    logging.info(
        "cache miss: %6d, %9.1fkb, %6.2f%% files, %6.2f%% size",
        len(cache_miss),
        cache_miss_size / 1024.0,
        len(cache_miss) * 100.0 / total,
        cache_miss_size * 100.0 / total_size if total_size else 0,
    )
    return 0
コード例 #5
0
def upload_sha1_tree(base_url, indir, infiles, namespace):
    """Uploads the given tree to the given url.

  Arguments:
    base_url:  The base url, it is assume that |base_url|/has/ can be used to
               query if an element was already uploaded, and |base_url|/store/
               can be used to upload a new element.
    indir:     Root directory the infiles are based in.
    infiles:   dict of files to upload files from |indir| to |base_url|.
    namespace: The namespace to use on the server.
  """
    logging.info('upload tree(base_url=%s, indir=%s, files=%d)' %
                 (base_url, indir, len(infiles)))
    assert base_url.startswith('http'), base_url
    base_url = base_url.rstrip('/')

    # TODO(maruel): Make this request much earlier asynchronously while the files
    # are being enumerated.
    token = urllib.quote(url_open(base_url + '/content/get_token').read())

    # Create a pool of workers to zip and upload any files missing from
    # the server.
    num_threads = run_test_cases.num_processors()
    zipping_pool = run_isolated.ThreadPool(num_threads, num_threads, 0)
    remote_uploader = UploadRemote(namespace, base_url, token)

    # Starts the zip and upload process for a given query. The query is assumed
    # to be in the format (relfile, metadata).
    uploaded = []

    def zip_and_upload(query):
        relfile, metadata = query
        infile = os.path.join(indir, relfile)
        zipping_pool.add_task(0, zip_and_trigger_upload, infile, metadata,
                              remote_uploader.add_item)
        uploaded.append(query)

    contains_hash_url = '%s/content/contains/%s?token=%s' % (base_url,
                                                             namespace, token)
    process_items(contains_hash_url, infiles, zip_and_upload)

    logging.info('Waiting for all files to finish zipping')
    zipping_pool.join()
    logging.info('All files zipped.')

    logging.info('Waiting for all files to finish uploading')
    # Will raise if any exception occurred.
    remote_uploader.join()
    logging.info('All files are uploaded')

    total = len(infiles)
    total_size = sum(metadata.get('s', 0) for metadata in infiles.itervalues())
    logging.info('Total:      %6d, %9.1fkb', total,
                 sum(m.get('s', 0) for m in infiles.itervalues()) / 1024.)
    cache_hit = set(infiles.iterkeys()) - set(x[0] for x in uploaded)
    cache_hit_size = sum(infiles[i].get('s', 0) for i in cache_hit)
    logging.info('cache hit:  %6d, %9.1fkb, %6.2f%% files, %6.2f%% size',
                 len(cache_hit), cache_hit_size / 1024.,
                 len(cache_hit) * 100. / total,
                 cache_hit_size * 100. / total_size if total_size else 0)
    cache_miss = uploaded
    cache_miss_size = sum(infiles[i[0]].get('s', 0) for i in cache_miss)
    logging.info('cache miss: %6d, %9.1fkb, %6.2f%% files, %6.2f%% size',
                 len(cache_miss), cache_miss_size / 1024.,
                 len(cache_miss) * 100. / total,
                 cache_miss_size * 100. / total_size if total_size else 0)
    return 0
コード例 #6
0
def upload_sha1_tree(base_url, indir, infiles, namespace):
  """Uploads the given tree to the given url.

  Arguments:
    base_url:  The base url, it is assume that |base_url|/has/ can be used to
               query if an element was already uploaded, and |base_url|/store/
               can be used to upload a new element.
    indir:     Root directory the infiles are based in.
    infiles:   dict of files to upload files from |indir| to |base_url|.
    namespace: The namespace to use on the server.
  """
  logging.info('upload tree(base_url=%s, indir=%s, files=%d)' %
               (base_url, indir, len(infiles)))
  assert base_url.startswith('http'), base_url
  base_url = base_url.rstrip('/')

  # TODO(maruel): Make this request much earlier asynchronously while the files
  # are being enumerated.
  token = urllib.quote(url_open(base_url + '/content/get_token').read())

  # Create a pool of workers to zip and upload any files missing from
  # the server.
  num_threads = run_test_cases.num_processors()
  zipping_pool = run_isolated.ThreadPool(num_threads, num_threads, 0)
  remote_uploader = UploadRemote(namespace, base_url, token)

  # Starts the zip and upload process for a given query. The query is assumed
  # to be in the format (relfile, metadata).
  uploaded = []
  def zip_and_upload(query):
    relfile, metadata = query
    infile = os.path.join(indir, relfile)
    zipping_pool.add_task(0, zip_and_trigger_upload, infile, metadata,
                          remote_uploader.add_item)
    uploaded.append(query)

  contains_hash_url = '%s/content/contains/%s?token=%s' % (
      base_url, namespace, token)
  process_items(contains_hash_url, infiles, zip_and_upload)

  logging.info('Waiting for all files to finish zipping')
  zipping_pool.join()
  logging.info('All files zipped.')

  logging.info('Waiting for all files to finish uploading')
  # Will raise if any exception occurred.
  remote_uploader.join()
  logging.info('All files are uploaded')

  total = len(infiles)
  total_size = sum(metadata.get('s', 0) for metadata in infiles.itervalues())
  logging.info(
      'Total:      %6d, %9.1fkb',
      total,
      sum(m.get('s', 0) for m in infiles.itervalues()) / 1024.)
  cache_hit = set(infiles.iterkeys()) - set(x[0] for x in uploaded)
  cache_hit_size = sum(infiles[i].get('s', 0) for i in cache_hit)
  logging.info(
      'cache hit:  %6d, %9.1fkb, %6.2f%% files, %6.2f%% size',
      len(cache_hit),
      cache_hit_size / 1024.,
      len(cache_hit) * 100. / total,
      cache_hit_size * 100. / total_size if total_size else 0)
  cache_miss = uploaded
  cache_miss_size = sum(infiles[i[0]].get('s', 0) for i in cache_miss)
  logging.info(
      'cache miss: %6d, %9.1fkb, %6.2f%% files, %6.2f%% size',
      len(cache_miss),
      cache_miss_size / 1024.,
      len(cache_miss) * 100. / total,
      cache_miss_size * 100. / total_size if total_size else 0)
  return 0