示例#1
0
def archive(isolate_server, script):
  """Archives the tool and return the sha-1."""
  base_script = os.path.basename(script)
  isolate = {
    'variables': {
      'command': ['python', base_script],
      'files': [base_script],
    },
  }
  tempdir = tempfile.mkdtemp(prefix=u'run_on_bots')
  try:
    isolate_file = os.path.join(tempdir, 'tool.isolate')
    isolated_file = os.path.join(tempdir, 'tool.isolated')
    with open(isolate_file, 'wb') as f:
      f.write(str(isolate))
    shutil.copyfile(script, os.path.join(tempdir, base_script))
    cmd = [
      sys.executable, 'isolate.py', 'archive',
      '--isolate-server', isolate_server,
      '-i', isolate_file,
      '-s', isolated_file,
    ]
    return subprocess.check_output(cmd, cwd=ROOT_DIR).split()[0]
  finally:
    file_path.rmtree(tempdir)
 def tearDown(self):
   try:
     self.server.close_start()
     file_path.rmtree(self.tempdir)
     self.server.close_end()
   finally:
     super(RunIsolatedTest, self).tearDown()
示例#3
0
  def trim(self, min_free_space):
    """Purges cache.

    Removes cache directories that were not accessed for a long time
    until there is enough free space and the number of caches is sane.

    If min_free_space is None, disk free space is not checked.

    Requires NamedCache to be open.
    """
    self._lock.assert_locked()
    if not os.path.isdir(self.root_dir):
      return

    free_space = 0
    if min_free_space is not None:
      file_path.get_free_space(self.root_dir)
    while ((min_free_space is not None and free_space < min_free_space)
           or len(self._lru) > MAX_CACHE_SIZE):
      try:
        name, (path, _) = self._lru.get_oldest()
      except KeyError:
        return
      named_dir = self._get_named_path(name)
      if fs.islink(named_dir):
        fs.unlink(named_dir)
      path_abs = os.path.join(self.root_dir, path)
      if os.path.isdir(path_abs):
        file_path.rmtree(path_abs)
      if min_free_space is not None:
        free_space = file_path.get_free_space(self.root_dir)
      self._lru.pop(name)
示例#4
0
    def tearDown(self):
        super(FindExecutableTest, self).tearDown()

        # restore cwd
        os.chdir(self._orig_cwd)

        file_path.rmtree(self.playground)
示例#5
0
 def tearDown(self):
     try:
         self.server.close_start()
         file_path.rmtree(self.tempdir)
         self.server.close_end()
     finally:
         super(IsolateServerStorageSmokeTest, self).tearDown()
 def task_collect(self, task_id):
   """Collects the results for a task."""
   h, tmp = tempfile.mkstemp(prefix='swarming_smoke_test', suffix='.json')
   os.close(h)
   try:
     tmpdir = tempfile.mkdtemp(prefix='swarming_smoke_test')
     try:
       # swarming.py collect will return the exit code of the task.
       args = [
         '--task-summary-json', tmp, task_id, '--task-output-dir', tmpdir,
         '--timeout', '20', '--perf',
       ]
       self._run('collect', args)
       with open(tmp, 'rb') as f:
         content = f.read()
       try:
         summary = json.loads(content)
       except ValueError:
         print >> sys.stderr, 'Bad json:\n%s' % content
         raise
       outputs = {}
       for root, _, files in os.walk(tmpdir):
         for i in files:
           p = os.path.join(root, i)
           name = p[len(tmpdir)+1:]
           with open(p, 'rb') as f:
             outputs[name] = f.read()
       return summary, outputs
     finally:
       file_path.rmtree(tmpdir)
   finally:
     os.remove(tmp)
示例#7
0
    def open(self, time_fn=None):
        """Opens NamedCaches for mutation operations, such as install.

    Only one caller can open the cache manager at a time. If the same thread
    calls this function after opening it earlier, the call will deadlock.

    time_fn is a function that returns timestamp (float) and used to take
    timestamps when new caches are requested.

    Returns a context manager that must be closed as soon as possible.
    """
        with self._lock:
            state_path = os.path.join(self.root_dir, u'state.json')
            assert self._lru is None, 'acquired lock, but self._lru is not None'
            if os.path.isfile(state_path):
                try:
                    self._lru = lru.LRUDict.load(state_path)
                except ValueError:
                    logging.exception('failed to load named cache state file')
                    logging.warning('deleting named caches')
                    file_path.rmtree(self.root_dir)
            self._lru = self._lru or lru.LRUDict()
            if time_fn:
                self._lru.time_fn = time_fn
            try:
                yield
            finally:
                file_path.ensure_tree(self.root_dir)
                self._lru.save(state_path)
                self._lru = None
示例#8
0
 def tearDown(self):
   try:
     self.server.close_start()
     file_path.rmtree(self.tempdir)
     self.server.close_end()
   finally:
     super(IsolateServerStorageSmokeTest, self).tearDown()
示例#9
0
  def test_get_swarming_bot_zip(self):
    zipped_code = bot_code.get_swarming_bot_zip('http://localhost')
    # Ensure the zip is valid and all the expected files are present.
    with zipfile.ZipFile(StringIO.StringIO(zipped_code), 'r') as zip_file:
      for i in bot_archive.FILES:
        with zip_file.open(i) as f:
          content = f.read()
          if os.path.basename(i) != '__init__.py':
            self.assertTrue(content, i)

    temp_dir = tempfile.mkdtemp(prefix='swarming')
    try:
      # Try running the bot and ensure it can import the required files. (It
      # would crash if it failed to import them).
      bot_path = os.path.join(temp_dir, 'swarming_bot.zip')
      with open(bot_path, 'wb') as f:
        f.write(zipped_code)
      proc = subprocess.Popen(
          [sys.executable, bot_path, 'start_bot', '-h'],
          cwd=temp_dir,
          stdout=subprocess.PIPE,
          stderr=subprocess.STDOUT)
      out = proc.communicate()[0]
      self.assertEqual(0, proc.returncode, out)
    finally:
      file_path.rmtree(temp_dir)
示例#10
0
 def tearDown(self):
     # Remove mocks.
     super(RunIsolatedTestBase, self).tearDown()
     os.chdir(self._previous_dir)
     file_path.rmtree(self.tempdir)
     if self._cipd_server:
         self._cipd_server.close()
示例#11
0
        def test_native_case_alternate_datastream(self):
            # Create the file manually, since tempfile doesn't support ADS.
            tempdir = unicode(tempfile.mkdtemp(prefix=u'trace_inputs'))
            try:
                tempdir = file_path.get_native_path_case(tempdir)
                basename = 'foo.txt'
                filename = basename + ':Zone.Identifier'
                filepath = os.path.join(tempdir, filename)
                open(filepath, 'w').close()
                self.assertEqual(filepath,
                                 file_path.get_native_path_case(filepath))
                data_suffix = ':$DATA'
                self.assertEqual(
                    filepath + data_suffix,
                    file_path.get_native_path_case(filepath + data_suffix))

                open(filepath + '$DATA', 'w').close()
                self.assertEqual(
                    filepath + data_suffix,
                    file_path.get_native_path_case(filepath + data_suffix))
                # Ensure the ADS weren't created as separate file. You love NTFS, don't
                # you?
                self.assertEqual([basename], fs.listdir(tempdir))
            finally:
                file_path.rmtree(tempdir)
示例#12
0
 def tearDown(self):
     try:
         if self.auth_sys:
             self.auth_sys.stop()
     finally:
         file_path.rmtree(self.tmp_dir)
         super(AuthSystemTest, self).tearDown()
示例#13
0
 def task_collect(self, task_id):
   """Collects the results for a task."""
   h, tmp = tempfile.mkstemp(prefix='swarming_smoke_test', suffix='.json')
   os.close(h)
   try:
     tmpdir = tempfile.mkdtemp(prefix='swarming_smoke_test')
     try:
       # swarming.py collect will return the exit code of the task.
       args = [
         '--task-summary-json', tmp, task_id, '--task-output-dir', tmpdir,
         '--timeout', '20',
       ]
       self._run('collect', args)
       with open(tmp, 'rb') as f:
         content = f.read()
       try:
         summary = json.loads(content)
       except ValueError:
         print >> sys.stderr, 'Bad json:\n%s' % content
         raise
       outputs = {}
       for root, _, files in os.walk(tmpdir):
         for i in files:
           p = os.path.join(root, i)
           name = p[len(tmpdir)+1:]
           with open(p, 'rb') as f:
             outputs[name] = f.read()
       return summary, outputs
     finally:
       file_path.rmtree(tmpdir)
   finally:
     os.remove(tmp)
示例#14
0
    def test_get_swarming_bot_zip(self):
        zipped_code = bot_code.get_swarming_bot_zip('http://localhost')
        # Ensure the zip is valid and all the expected files are present.
        with zipfile.ZipFile(StringIO.StringIO(zipped_code), 'r') as zip_file:
            for i in bot_archive.FILES:
                with zip_file.open(i) as f:
                    content = f.read()
                    if os.path.basename(i) != '__init__.py':
                        self.assertTrue(content, i)

        temp_dir = tempfile.mkdtemp(prefix='swarming')
        try:
            # Try running the bot and ensure it can import the required files. (It
            # would crash if it failed to import them).
            bot_path = os.path.join(temp_dir, 'swarming_bot.zip')
            with open(bot_path, 'wb') as f:
                f.write(zipped_code)
            proc = subprocess.Popen(
                [sys.executable, bot_path, 'start_bot', '-h'],
                cwd=temp_dir,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT)
            out = proc.communicate()[0]
            self.assertEqual(0, proc.returncode, out)
        finally:
            file_path.rmtree(temp_dir)
 def tearDown(self):
     try:
         self.server.close_start()
         file_path.rmtree(self.tempdir)
         self.server.close_end()
     finally:
         super(RunIsolatedTest, self).tearDown()
示例#16
0
def archive(isolate_server, script):
  """Archives the tool and return the sha-1."""
  base_script = os.path.basename(script)
  isolate = {
    'variables': {
      'command': ['python', base_script],
      'files': [base_script],
    },
  }
  tempdir = tempfile.mkdtemp(prefix=u'run_on_bots')
  try:
    isolate_file = os.path.join(tempdir, 'tool.isolate')
    isolated_file = os.path.join(tempdir, 'tool.isolated')
    with open(isolate_file, 'wb') as f:
      f.write(str(isolate))
    shutil.copyfile(script, os.path.join(tempdir, base_script))
    cmd = [
      sys.executable, 'isolate.py', 'archive',
      '--isolate-server', isolate_server,
      '-i', isolate_file,
      '-s', isolated_file,
    ]
    return subprocess.check_output(cmd, cwd=ROOT_DIR).split()[0]
  finally:
    file_path.rmtree(tempdir)
示例#17
0
    def __init__(self, cache_dir, policies, time_fn=None):
        """Initializes NamedCaches.

    Arguments:
    - cache_dir is a directory for persistent cache storage.
    - policies is a CachePolicies instance.
    - time_fn is a function that returns timestamp (float) and used to take
      timestamps when new caches are requested. Used in unit tests.
    """
        super(NamedCache, self).__init__(cache_dir)
        self._policies = policies
        # LRU {cache_name -> tuple(cache_location, size)}
        self.state_file = os.path.join(cache_dir, self.STATE_FILE)
        self._lru = lru.LRUDict()
        if not fs.isdir(self.cache_dir):
            fs.makedirs(self.cache_dir)
        elif os.path.isfile(self.state_file):
            try:
                self._lru = lru.LRUDict.load(self.state_file)
            except ValueError:
                logging.exception('failed to load named cache state file')
                logging.warning('deleting named caches')
                file_path.rmtree(self.cache_dir)
            with self._lock:
                self._try_upgrade()
        if time_fn:
            self._lru.time_fn = time_fn
示例#18
0
  def test_putfile(self):
    tmpoutdir = None
    tmpindir = None

    try:
      tmpindir = tempfile.mkdtemp(prefix='isolateserver_test')
      infile = os.path.join(tmpindir, u'in')
      with fs.open(infile, 'wb') as f:
        f.write('data')

      tmpoutdir = tempfile.mkdtemp(prefix='isolateserver_test')

      # Copy as fileobj
      fo = os.path.join(tmpoutdir, u'fo')
      isolateserver.putfile(io.BytesIO('data'), fo)
      self.assertEqual(True, fs.exists(fo))
      self.assertEqual(False, fs.islink(fo))
      self.assertFile(fo, 'data')

      # Copy with partial fileobj
      pfo = os.path.join(tmpoutdir, u'pfo')
      fobj = io.BytesIO('adatab')
      fobj.read(1)  # Read the 'a'
      isolateserver.putfile(fobj, pfo, size=4)
      self.assertEqual(True, fs.exists(pfo))
      self.assertEqual(False, fs.islink(pfo))
      self.assertEqual('b', fobj.read())
      self.assertFile(pfo, 'data')

      # Copy as not readonly
      cp = os.path.join(tmpoutdir, u'cp')
      with fs.open(infile, 'rb') as f:
        isolateserver.putfile(f, cp, file_mode=0o755)
      self.assertEqual(True, fs.exists(cp))
      self.assertEqual(False, fs.islink(cp))
      self.assertFile(cp, 'data')

      # Use hardlink
      hl = os.path.join(tmpoutdir, u'hl')
      with fs.open(infile, 'rb') as f:
        isolateserver.putfile(f, hl, use_symlink=False)
      self.assertEqual(True, fs.exists(hl))
      self.assertEqual(False, fs.islink(hl))
      self.assertFile(hl, 'data')

      # Use symlink
      sl = os.path.join(tmpoutdir, u'sl')
      with fs.open(infile, 'rb') as f:
        isolateserver.putfile(f, sl, use_symlink=True)
      self.assertEqual(True, fs.exists(sl))
      self.assertEqual(True, fs.islink(sl))
      self.assertEqual('data', fs.open(sl, 'rb').read())
      self.assertFile(sl, 'data')

    finally:
      if tmpindir:
        file_path.rmtree(tmpindir)
      if tmpoutdir:
        file_path.rmtree(tmpoutdir)
示例#19
0
 def tearDown(self):
   os.chdir(test_env_bot_code.BOT_DIR)
   try:
     file_path.rmtree(self.root_dir)
   except OSError:
     print >> sys.stderr, 'Failed to delete %s' % self.root_dir
   finally:
     super(TestTaskRunnerBase, self).tearDown()
示例#20
0
 def test_rmtree_unicode(self):
     subdir = os.path.join(self.tempdir, "hi")
     fs.mkdir(subdir)
     filepath = os.path.join(subdir, u"\u0627\u0644\u0635\u064A\u0646\u064A\u0629")
     with fs.open(filepath, "wb") as f:
         f.write("hi")
     # In particular, it fails when the input argument is a str.
     file_path.rmtree(str(subdir))
示例#21
0
 def tearDown(self):
   try:
     file_path.rmtree(self.root_dir)
   except OSError:
     print >> sys.stderr, 'Failed to delete %s' % self.root_dir
     raise
   finally:
     super(TaskRunnerNoServer, self).tearDown()
 def tearDown(self):
   try:
     if self._tempdir:
       file_path.rmtree(self._tempdir)
     if not self.has_failed():
       self._check_output('', '')
   finally:
     super(NetTestCase, self).tearDown()
示例#23
0
 def tearDown(self):
     os.chdir(test_env_bot_code.BOT_DIR)
     try:
         file_path.rmtree(self.root_dir)
     except OSError:
         print >> sys.stderr, 'Failed to delete %s' % self.root_dir
     finally:
         super(TestTaskRunnerBase, self).tearDown()
示例#24
0
 def tearDown(self):
     try:
         if self._tempdir:
             file_path.rmtree(self._tempdir)
         if not self.has_failed():
             self._check_output("", "")
     finally:
         super(NetTestCase, self).tearDown()
示例#25
0
 def tearDown(self):
   for dirpath, dirnames, filenames in os.walk(self.tempdir, topdown=True):
     for filename in filenames:
       file_path.set_read_only(os.path.join(dirpath, filename), False)
     for dirname in dirnames:
       file_path.set_read_only(os.path.join(dirpath, dirname), False)
   file_path.rmtree(self.tempdir)
   super(RunIsolatedTestBase, self).tearDown()
示例#26
0
 def tearDown(self):
     try:
         os.chdir(self.old_cwd)
         if self._tempdir:
             file_path.rmtree(self._tempdir)
         if not self.has_failed():
             self.checkOutput('', '')
     finally:
         super(TestCase, self).tearDown()
示例#27
0
    def test_cleanup_missing(self):
        cache = self.get_cache(_get_policies())
        self._add_one_item(cache, 1)
        file_path.rmtree(os.path.join(cache.cache_dir, cache._lru[u'1'][0]))

        cache = self.get_cache(_get_policies())
        self.assertEqual([u'1'], list(cache))
        self.assertEqual(True, cache.cleanup())
        self.assertEqual([], list(cache))
示例#28
0
 def test_rmtree_unicode(self):
   subdir = os.path.join(self.tempdir, 'hi')
   os.mkdir(subdir)
   filepath = os.path.join(
       subdir, u'\u0627\u0644\u0635\u064A\u0646\u064A\u0629')
   with open(filepath, 'wb') as f:
     f.write('hi')
   # In particular, it fails when the input argument is a str.
   file_path.rmtree(str(subdir))
示例#29
0
 def test_rmtree_unicode(self):
   subdir = os.path.join(self.tempdir, 'hi')
   fs.mkdir(subdir)
   filepath = os.path.join(
       subdir, u'\u0627\u0644\u0635\u064A\u0646\u064A\u0629')
   with fs.open(filepath, 'wb') as f:
     f.write('hi')
   # In particular, it fails when the input argument is a str.
   file_path.rmtree(str(subdir))
示例#30
0
 def tearDown(self):
     for dirpath, dirnames, filenames in os.walk(self.tempdir,
                                                 topdown=True):
         for filename in filenames:
             file_path.set_read_only(os.path.join(dirpath, filename), False)
         for dirname in dirnames:
             file_path.set_read_only(os.path.join(dirpath, dirname), False)
     file_path.rmtree(self.tempdir)
     super(RunIsolatedTestBase, self).tearDown()
示例#31
0
 def tearDown(self):
   try:
     os.chdir(self.old_cwd)
     if self._tempdir:
       file_path.rmtree(self._tempdir)
     if not self.has_failed():
       self.checkOutput('', '')
   finally:
     super(TestCase, self).tearDown()
示例#32
0
 def tearDown(self):
   try:
     self._server.shutdown()
   finally:
     try:
       file_path.rmtree(self.root_dir)
     except OSError:
       print >> sys.stderr, 'Failed to delete %s' % self.root_dir
     finally:
       super(TaskRunnerSmoke, self).tearDown()
示例#33
0
 def test_undeleteable_chmod(self):
   # Create a file and a directory with an empty ACL. Then try to delete it.
   dirpath = os.path.join(self.tempdir, 'd')
   filepath = os.path.join(dirpath, 'f')
   os.mkdir(dirpath)
   with open(filepath, 'w') as f:
     f.write('hi')
   os.chmod(filepath, 0)
   os.chmod(dirpath, 0)
   file_path.rmtree(dirpath)
示例#34
0
 def tearDown(self):
     try:
         self._server.shutdown()
     finally:
         try:
             file_path.rmtree(self.root_dir)
         except OSError:
             print >> sys.stderr, 'Failed to delete %s' % self.root_dir
         finally:
             super(TaskRunnerSmoke, self).tearDown()
示例#35
0
 def test_undeleteable_chmod(self):
   # Create a file and a directory with an empty ACL. Then try to delete it.
   dirpath = os.path.join(self.tempdir, 'd')
   filepath = os.path.join(dirpath, 'f')
   os.mkdir(dirpath)
   with open(filepath, 'w') as f:
     f.write('hi')
   os.chmod(filepath, 0)
   os.chmod(dirpath, 0)
   file_path.rmtree(dirpath)
示例#36
0
def main():
    tools.disable_buffering()
    parser = optparse.OptionParser()
    parser.add_option('-s',
                      '--isolated',
                      help='.isolated file to profile with.')
    parser.add_option('--largest_files',
                      type='int',
                      help='If this is set, instead of compressing all the '
                      'files, only the large n files will be compressed')
    options, args = parser.parse_args()

    if args:
        parser.error('Unknown args passed in; %s' % args)
    if not options.isolated:
        parser.error('The .isolated file must be given.')

    temp_dir = None
    try:
        temp_dir = tempfile.mkdtemp(prefix=u'zip_profiler')

        # Create a directory of the required files
        subprocess.check_call([
            os.path.join(ROOT_DIR, 'isolate.py'), 'remap', '-s',
            options.isolated, '--outdir', temp_dir
        ])

        file_set = tree_files(temp_dir)

        if options.largest_files:
            sorted_by_size = sorted(file_set.iteritems(),
                                    key=lambda x: x[1],
                                    reverse=True)
            files_to_compress = sorted_by_size[:options.largest_files]

            for filename, size in files_to_compress:
                print('Compressing %s, uncompressed size %d' %
                      (filename, size))

                profile_compress('zlib', zlib.compressobj, range(10), zip_file,
                                 filename)
                profile_compress('bz2', bz2.BZ2Compressor, range(1, 10),
                                 zip_file, filename)
        else:
            print('Number of files: %s' % len(file_set))
            print('Total size: %s' % sum(file_set.itervalues()))

            # Profile!
            profile_compress('zlib', zlib.compressobj, range(10),
                             zip_directory, temp_dir)
            profile_compress('bz2', bz2.BZ2Compressor, range(1, 10),
                             zip_directory, temp_dir)
    finally:
        file_path.rmtree(temp_dir)
示例#37
0
 def tearDown(self):
     try:
         if self._tempdir:
             for dirpath, dirnames, filenames in fs.walk(self._tempdir, topdown=True):
                 for filename in filenames:
                     file_path.set_read_only(os.path.join(dirpath, filename), False)
                 for dirname in dirnames:
                     file_path.set_read_only(os.path.join(dirpath, dirname), False)
             file_path.rmtree(self._tempdir)
     finally:
         super(FilePathTest, self).tearDown()
示例#38
0
def put_to_named_cache(manager, cache_name, file_name, contents):
    """Puts files into named cache."""
    tdir = tempfile.mkdtemp(prefix=u'run_isolated_test')
    try:
        cache_dir = os.path.join(tdir, 'cache')
        manager.install(cache_dir, cache_name)
        with open(os.path.join(cache_dir, file_name), 'wb') as f:
            f.write(contents)
        manager.uninstall(cache_dir, cache_name)
    finally:
        file_path.rmtree(tdir)
示例#39
0
 def tearDown(self):
   try:
     if self._tempdir:
       for dirpath, dirnames, filenames in fs.walk(
           self._tempdir, topdown=True):
         for filename in filenames:
           file_path.set_read_only(os.path.join(dirpath, filename), False)
         for dirname in dirnames:
           file_path.set_read_only(os.path.join(dirpath, dirname), False)
       file_path.rmtree(self._tempdir)
   finally:
     super(FilePathTest, self).tearDown()
示例#40
0
 def tearDown(self):
   os.chdir(test_env_bot_code.BOT_DIR)
   try:
     logging.debug(self._logs)
     for i in os.listdir(self._logs):
       with open(os.path.join(self._logs, i), 'rb') as f:
         logging.debug('%s:\n%s', i, ''.join('  ' + line for line in f))
     file_path.rmtree(self.root_dir)
   except OSError:
     print >> sys.stderr, 'Failed to delete %s' % self.root_dir
   finally:
     super(TestTaskRunnerBase, self).tearDown()
示例#41
0
 def make_caches(self, cache, names):
     dest_dir = os.path.join(self.tempdir, 'dest')
     try:
         names = map(unicode, names)
         for n in names:
             cache.install(os.path.join(dest_dir, n), n)
         self.assertEqual(set(names), set(os.listdir(dest_dir)))
         for n in names:
             cache.uninstall(os.path.join(dest_dir, n), n)
         self.assertEqual([], os.listdir(dest_dir))
         self.assertTrue(cache.available.issuperset(names))
     finally:
         file_path.rmtree(dest_dir)
示例#42
0
 def make_caches(self, names):
   dest_dir = tempfile.mkdtemp(prefix=u'named_cache_test')
   try:
     names = map(unicode, names)
     for n in names:
       self.manager.install(os.path.join(dest_dir, n), n)
     self.assertEqual(set(names), set(os.listdir(dest_dir)))
     for n in names:
       self.manager.uninstall(os.path.join(dest_dir, n), n)
     self.assertEqual([], os.listdir(dest_dir))
     self.assertTrue(self.manager.available.issuperset(names))
   finally:
     file_path.rmtree(dest_dir)
示例#43
0
def gen_isolated(isolate, script, includes=None):
    """Archives a script to `isolate` server."""
    tmp = tempfile.mkdtemp(prefix='swarming_smoke')
    data = {
        'variables': {
            'command': ['python', '-u', 'script.py'],
            'files': ['script.py'],
        },
    }
    try:
        with open(os.path.join(tmp, 'script.py'), 'wb') as f:
            f.write(script)
        path = os.path.join(tmp, 'script.isolate')
        with open(path, 'wb') as f:
            # This file is actually python but it's #closeenough.
            json.dump(data, f, sort_keys=True, separators=(',', ':'))
        isolated = os.path.join(tmp, 'script.isolated')
        cmd = [
            os.path.join(CLIENT_DIR, 'isolate.py'),
            'archive',
            '-I',
            isolate,
            '-i',
            path,
            '-s',
            isolated,
        ]
        out = subprocess.check_output(cmd)
        if includes:
            # Mangle the .isolated to include another one. A bit hacky but works well.
            # In practice, we'd need to add a --include flag to isolate.py archive or
            # something.
            with open(isolated, 'rb') as f:
                data = json.load(f)
            data['includes'] = includes
            with open(isolated, 'wb') as f:
                json.dump(data, f, sort_keys=True, separators=(',', ':'))
            cmd = [
                os.path.join(CLIENT_DIR, 'isolateserver.py'),
                'archive',
                '-I',
                isolate,
                '--namespace',
                'default-gzip',
                isolated,
            ]
            out = subprocess.check_output(cmd)
        return out.split(' ', 1)[0]
    finally:
        file_path.rmtree(tmp)
示例#44
0
def main():
  tools.disable_buffering()
  parser = optparse.OptionParser()
  parser.add_option('-s', '--isolated', help='.isolated file to profile with.')
  parser.add_option('--largest_files', type='int',
                    help='If this is set, instead of compressing all the '
                    'files, only the large n files will be compressed')
  options, args = parser.parse_args()

  if args:
    parser.error('Unknown args passed in; %s' % args)
  if not options.isolated:
    parser.error('The .isolated file must be given.')

  temp_dir = None
  try:
    temp_dir = tempfile.mkdtemp(prefix=u'zip_profiler')

    # Create a directory of the required files
    subprocess.check_call([os.path.join(ROOT_DIR, 'isolate.py'),
                           'remap',
                           '-s', options.isolated,
                           '--outdir', temp_dir])

    file_set = tree_files(temp_dir)

    if options.largest_files:
      sorted_by_size = sorted(file_set.iteritems(),  key=lambda x: x[1],
                              reverse=True)
      files_to_compress = sorted_by_size[:options.largest_files]

      for filename, size in files_to_compress:
        print('Compressing %s, uncompressed size %d' % (filename, size))

        profile_compress('zlib', zlib.compressobj, range(10), zip_file,
                         filename)
        profile_compress('bz2', bz2.BZ2Compressor, range(1, 10), zip_file,
                         filename)
    else:
      print('Number of files: %s' % len(file_set))
      print('Total size: %s' % sum(file_set.itervalues()))

      # Profile!
      profile_compress('zlib', zlib.compressobj, range(10), zip_directory,
                       temp_dir)
      profile_compress('bz2', bz2.BZ2Compressor, range(1, 10), zip_directory,
                       temp_dir)
  finally:
    file_path.rmtree(temp_dir)
示例#45
0
    def test_get_swarming_bot_zip(self):
        get_self_config_orig = config.get_self_config

        def get_self_config_mock(path, revision=None, store_last_good=False):
            if path == 'settings.cfg':
                return get_self_config_orig(path, revision, store_last_good)
            self.assertEqual('scripts/bot_config.py', path)
            self.assertEqual(None, revision)
            self.assertEqual(True, store_last_good)
            return 'rev1', 'foo bar'

        self.mock(config, 'get_self_config', get_self_config_mock)
        local_mc = self.mock_memcache()

        self.assertEqual(0, local_mc['writes'])
        zipped_code = bot_code.get_swarming_bot_zip('http://localhost')
        self.assertEqual(0, local_mc['reads'])
        self.assertNotEqual(0, local_mc['writes'])

        # Make sure that we read from memcached if we get it again
        zipped_code_copy = bot_code.get_swarming_bot_zip('http://localhost')
        self.assertEqual(local_mc['writes'], local_mc['reads'])
        # Why not assertEqual? Don't want to dump ~1MB of data if this fails.
        self.assertTrue(zipped_code == zipped_code_copy)

        # Ensure the zip is valid and all the expected files are present.
        with zipfile.ZipFile(StringIO.StringIO(zipped_code), 'r') as zip_file:
            for i in bot_archive.FILES:
                with zip_file.open(i) as f:
                    content = f.read()
                    if os.path.basename(i) != '__init__.py':
                        self.assertTrue(content, i)

        temp_dir = tempfile.mkdtemp(prefix='swarming')
        try:
            # Try running the bot and ensure it can import the required files. (It
            # would crash if it failed to import them).
            bot_path = os.path.join(temp_dir, 'swarming_bot.zip')
            with open(bot_path, 'wb') as f:
                f.write(zipped_code)
            proc = subprocess.Popen(
                [sys.executable, bot_path, 'start_bot', '-h'],
                cwd=temp_dir,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT)
            out = proc.communicate()[0]
            self.assertEqual(0, proc.returncode, out)
        finally:
            file_path.rmtree(temp_dir)
示例#46
0
def CMDrun(parser, args):
    """Runs the test executable in an isolated (temporary) directory.

  All the dependencies are mapped into the temporary directory and the
  directory is cleaned up after the target exits.

  Argument processing stops at -- and these arguments are appended to the
  command line of the target to run. For example, use:
    isolate.py run --isolated foo.isolated -- --gtest_filter=Foo.Bar
  """
    add_isolate_options(parser)
    add_skip_refresh_option(parser)
    options, args = parser.parse_args(args)
    process_isolate_options(parser, options, require_isolated=False)
    complete_state = load_complete_state(options, os.getcwd(), None, options.skip_refresh)
    cmd = complete_state.saved_state.command + args
    if not cmd:
        raise ExecutionError("No command to run.")
    cmd = tools.fix_python_path(cmd)

    outdir = run_isolated.make_temp_dir(u"isolate-%s" % datetime.date.today(), os.path.dirname(complete_state.root_dir))
    try:
        # TODO(maruel): Use run_isolated.run_tha_test().
        cwd = create_isolate_tree(
            outdir,
            complete_state.root_dir,
            complete_state.saved_state.files,
            complete_state.saved_state.relative_cwd,
            complete_state.saved_state.read_only,
        )
        file_path.ensure_command_has_abs_path(cmd, cwd)
        logging.info("Running %s, cwd=%s" % (cmd, cwd))
        try:
            result = subprocess.call(cmd, cwd=cwd)
        except OSError:
            sys.stderr.write(
                "Failed to executed the command; executable is missing, maybe you\n"
                "forgot to map it in the .isolate file?\n  %s\n  in %s\n" % (" ".join(cmd), cwd)
            )
            result = 1
    finally:
        file_path.rmtree(outdir)

    if complete_state.isolated_filepath:
        complete_state.save_files()
    return result
示例#47
0
        def test_native_case_alternate_datastream(self):
            # Create the file manually, since tempfile doesn't support ADS.
            tempdir = unicode(tempfile.mkdtemp(prefix=u"trace_inputs"))
            try:
                tempdir = file_path.get_native_path_case(tempdir)
                basename = "foo.txt"
                filename = basename + ":Zone.Identifier"
                filepath = os.path.join(tempdir, filename)
                open(filepath, "w").close()
                self.assertEqual(filepath, file_path.get_native_path_case(filepath))
                data_suffix = ":$DATA"
                self.assertEqual(filepath + data_suffix, file_path.get_native_path_case(filepath + data_suffix))

                open(filepath + "$DATA", "w").close()
                self.assertEqual(filepath + data_suffix, file_path.get_native_path_case(filepath + data_suffix))
                # Ensure the ADS weren't created as separate file. You love NTFS, don't
                # you?
                self.assertEqual([basename], fs.listdir(tempdir))
            finally:
                file_path.rmtree(tempdir)
示例#48
0
 def _run_isolated(self, hello_world, name, args, expected_summary,
     expected_files):
   # Shared code for all test_isolated_* test cases.
   tmpdir = tempfile.mkdtemp(prefix='swarming_smoke')
   try:
     isolate_path = os.path.join(tmpdir, 'i.isolate')
     isolated_path = os.path.join(tmpdir, 'i.isolated')
     with open(isolate_path, 'wb') as f:
       json.dump(ISOLATE_HELLO_WORLD, f)
     with open(os.path.join(tmpdir, 'hello_world.py'), 'wb') as f:
       f.write(hello_world)
     isolated_hash = self.client.isolate(isolate_path, isolated_path)
     task_id = self.client.task_trigger_isolated(
         name, isolated_hash, extra=args)
     actual_summary, actual_files = self.client.task_collect(task_id)
     self.assertResults(expected_summary, actual_summary)
     actual_files.pop('summary.json')
     self.assertEqual(expected_files, actual_files)
   finally:
     file_path.rmtree(tmpdir)
def archive_isolated_triggers(isolate_server, tree_isolated, tests):
  """Creates and archives all the .isolated files for the tests at once.

  Archiving them in one batch is faster than archiving each file individually.
  Also the .isolated files can be reused across OSes, reducing the amount of
  I/O.

  Returns:
    list of (test, sha1) tuples.
  """
  logging.info('archive_isolated_triggers(%s, %s)', tree_isolated, tests)
  tempdir = tempfile.mkdtemp(prefix=u'run_swarming_tests_on_swarming_')
  try:
    isolateds = []
    for test in tests:
      test_name = os.path.basename(test)
      # Creates a manual .isolated file. See
      # https://code.google.com/p/swarming/wiki/IsolatedDesign for more details.
      isolated = {
        'algo': 'sha-1',
        'command': ['python', test],
        'includes': [tree_isolated],
        'read_only': 0,
        'version': '1.4',
      }
      v = os.path.join(tempdir, test_name + '.isolated')
      tools.write_json(v, isolated, True)
      isolateds.append(v)
    cmd = [
        'isolateserver.py', 'archive', '--isolate-server', isolate_server,
    ] + isolateds
    if logging.getLogger().isEnabledFor(logging.INFO):
      cmd.append('--verbose')
    items = [i.split() for i in check_output(cmd).splitlines()]
    assert len(items) == len(tests)
    assert all(
        items[i][1].endswith(os.path.basename(tests[i]) + '.isolated')
        for i in xrange(len(tests)))
    return zip(tests, [i[0] for i in items])
  finally:
    file_path.rmtree(tempdir)
示例#50
0
    def test_rmtree_win(self):
      # Mock our sleep for faster test case execution.
      sleeps = []
      self.mock(time, 'sleep', sleeps.append)
      self.mock(sys, 'stderr', StringIO.StringIO())

      # Open a child process, so the file is locked.
      subdir = os.path.join(self.tempdir, 'to_be_deleted')
      fs.mkdir(subdir)
      script = 'import time; open(\'a\', \'w\'); time.sleep(60)'
      proc = subprocess.Popen([sys.executable, '-c', script], cwd=subdir)
      try:
        # Wait until the file exist.
        while not fs.isfile(os.path.join(subdir, 'a')):
          self.assertEqual(None, proc.poll())
        file_path.rmtree(subdir)
        self.assertEqual([2, 4, 2], sleeps)
        # sys.stderr.getvalue() would return a fair amount of output but it is
        # not completely deterministic so we're not testing it here.
      finally:
        proc.wait()
示例#51
0
 def test_undeleteable_owner(self):
   # Create a file and a directory with an empty ACL. Then try to delete it.
   dirpath = os.path.join(self.tempdir, 'd')
   filepath = os.path.join(dirpath, 'f')
   os.mkdir(dirpath)
   with open(filepath, 'w') as f:
     f.write('hi')
   import win32security
   user, _domain, _type = win32security.LookupAccountName(
       '', getpass.getuser())
   sd = win32security.SECURITY_DESCRIPTOR()
   sd.Initialize()
   sd.SetSecurityDescriptorOwner(user, False)
   # Create an empty DACL, which removes all rights.
   dacl = win32security.ACL()
   dacl.Initialize()
   sd.SetSecurityDescriptorDacl(1, dacl, 0)
   win32security.SetFileSecurity(
       fs.extend(filepath), win32security.DACL_SECURITY_INFORMATION, sd)
   win32security.SetFileSecurity(
       fs.extend(dirpath), win32security.DACL_SECURITY_INFORMATION, sd)
   file_path.rmtree(dirpath)
示例#52
0
def gen_isolated(isolate, script, includes=None):
  """Archives a script to `isolate` server."""
  tmp = tempfile.mkdtemp(prefix='swarming_smoke')
  data = {
    'variables': {
      'command': ['python', '-u', 'script.py'],
      'files': ['script.py'],
    },
  }
  try:
    with open(os.path.join(tmp, 'script.py'), 'wb') as f:
      f.write(script)
    path = os.path.join(tmp, 'script.isolate')
    with open(path, 'wb') as f:
      # This file is actually python but it's #closeenough.
      json.dump(data, f, sort_keys=True, separators=(',', ':'))
    isolated = os.path.join(tmp, 'script.isolated')
    cmd = [
      os.path.join(CLIENT_DIR, 'isolate.py'), 'archive',
      '-I', isolate, '-i', path, '-s', isolated,
    ]
    out = subprocess.check_output(cmd)
    if includes:
      # Mangle the .isolated to include another one. A bit hacky but works well.
      # In practice, we'd need to add a --include flag to isolate.py archive or
      # something.
      with open(isolated, 'rb') as f:
        data = json.load(f)
      data['includes'] = includes
      with open(isolated, 'wb') as f:
        json.dump(data, f, sort_keys=True, separators=(',', ':'))
      cmd = [
        os.path.join(CLIENT_DIR, 'isolateserver.py'), 'archive',
        '-I', isolate, '--namespace', 'default-gzip', isolated,
      ]
      out = subprocess.check_output(cmd)
    return out.split(' ', 1)[0]
  finally:
    file_path.rmtree(tmp)
示例#53
0
def delete_and_upload(storage, out_dir, leak_temp_dir):
  """Deletes the temporary run directory and uploads results back.

  Returns:
    tuple(outputs_ref, success, cold, hot)
    - outputs_ref: a dict referring to the results archived back to the isolated
          server, if applicable.
    - success: False if something occurred that means that the task must
          forcibly be considered a failure, e.g. zombie processes were left
          behind.
    - cold: list of size of cold items, they had to be uploaded.
    - hot: list of size of hot items, they didn't have to be uploaded.
  """

  # Upload out_dir and generate a .isolated file out of this directory. It is
  # only done if files were written in the directory.
  outputs_ref = None
  cold = []
  hot = []
  if fs.isdir(out_dir) and fs.listdir(out_dir):
    with tools.Profiler('ArchiveOutput'):
      try:
        results, f_cold, f_hot = isolateserver.archive_files_to_storage(
            storage, [out_dir], None)
        outputs_ref = {
          'isolated': results[0][0],
          'isolatedserver': storage.location,
          'namespace': storage.namespace,
        }
        cold = sorted(i.size for i in f_cold)
        hot = sorted(i.size for i in f_hot)
      except isolateserver.Aborted:
        # This happens when a signal SIGTERM was received while uploading data.
        # There is 2 causes:
        # - The task was too slow and was about to be killed anyway due to
        #   exceeding the hard timeout.
        # - The amount of data uploaded back is very large and took too much
        #   time to archive.
        sys.stderr.write('Received SIGTERM while uploading')
        # Re-raise, so it will be treated as an internal failure.
        raise
  try:
    if (not leak_temp_dir and fs.isdir(out_dir) and
        not file_path.rmtree(out_dir)):
      logging.error('Had difficulties removing out_dir %s', out_dir)
      return outputs_ref, False, cold, hot
  except OSError as e:
    # When this happens, it means there's a process error.
    logging.exception('Had difficulties removing out_dir %s: %s', out_dir, e)
    return outputs_ref, False, cold, hot
  return outputs_ref, True, cold, hot
示例#54
0
 def stop(self, leak):
   """Stops the local Swarming bot. Returns the process exit code."""
   if not self._proc:
     return None
   if self._proc.poll() is None:
     try:
       self._proc.send_signal(signal.SIGTERM)
       # TODO(maruel): SIGKILL after N seconds.
       self._proc.wait()
     except OSError:
       pass
   exit_code = self._proc.returncode
   if self._tmpdir:
     for i in sorted(glob.glob(os.path.join(self._tmpdir, 'logs', '*.log'))):
       self._read_log(i)
     if not leak:
       try:
         file_path.rmtree(self._tmpdir)
       except OSError:
         print >> sys.stderr, 'Leaking %s' % self._tmpdir
     self._tmpdir = None
   self._proc = None
   return exit_code
示例#55
0
    def stop(self, leak=False):
        """Stops dev_appserver, collects its log.

    Returns the process error code if applicable.
    """
        if not self._proc:
            return None
        exit_code = self._proc.poll()
        try:
            logging.info("Stopping %s", self.app_id)
            if self._proc.poll() is None:
                try:
                    # Send SIGTERM.
                    self._proc.terminate()
                except OSError:
                    pass
                deadline = time.time() + 5
                while self._proc.poll() is None and time.time() < deadline:
                    time.sleep(0.05)
                exit_code = self._proc.poll()
                if exit_code is None:
                    logging.error("Leaking PID %d", self._proc.pid)
        finally:
            with open(os.path.join(self._temp_root, "dev_appserver.log"), "r") as f:
                self._log = f.read()
            if not leak:
                try:
                    file_path.rmtree(self._temp_root)
                except OSError as e:
                    # Log but ignore it to not mask other errors.
                    print >>sys.stderr, str(e)
            self._client = None
            self._port = None
            self._proc = None
            self._serving = False
            self._temp_root = None
        return exit_code
def delete_and_upload(storage, out_dir, leak_temp_dir):
    """Deletes the temporary run directory and uploads results back.

  Returns:
    tuple(outputs_ref, success)
    - outputs_ref is a dict referring to the results archived back to the
      isolated server, if applicable.
    - success is False if something occurred that means that the task must
      forcibly be considered a failure, e.g. zombie processes were left behind.
  """

    # Upload out_dir and generate a .isolated file out of this directory. It is
    # only done if files were written in the directory.
    outputs_ref = None
    if os.path.isdir(out_dir) and os.listdir(out_dir):
        with tools.Profiler("ArchiveOutput"):
            try:
                results = isolateserver.archive_files_to_storage(storage, [out_dir], None)
                outputs_ref = {
                    "isolated": results[0][0],
                    "isolatedserver": storage.location,
                    "namespace": storage.namespace,
                }
            except isolateserver.Aborted:
                # This happens when a signal SIGTERM was received while uploading data.
                # There is 2 causes:
                # - The task was too slow and was about to be killed anyway due to
                #   exceeding the hard timeout.
                # - The amount of data uploaded back is very large and took too much
                #   time to archive.
                sys.stderr.write("Received SIGTERM while uploading")
                # Re-raise, so it will be treated as an internal failure.
                raise
    try:
        if not leak_temp_dir and os.path.isdir(out_dir) and not file_path.rmtree(out_dir):
            logging.error("Had difficulties removing out_dir %s", out_dir)
            return outputs_ref, False
    except OSError as e:
        # When this happens, it means there's a process error.
        logging.error("Had difficulties removing out_dir %s: %s", out_dir, e)
        return outputs_ref, False
    return outputs_ref, True
 def tearDown(self):
   try:
     os.chdir(self.old_cwd)
     file_path.rmtree(self.cwd)
   finally:
     super(SymlinkTest, self).tearDown()
示例#58
0
def map_and_run(
    isolated_hash, storage, cache, leak_temp_dir, root_dir, hard_timeout,
    grace_period, extra_args):
  """Maps and run the command. Returns metadata about the result."""
  # TODO(maruel): Include performance statistics.
  result = {
    'exit_code': None,
    'had_hard_timeout': False,
    'internal_failure': None,
    'outputs_ref': None,
    'version': 2,
  }
  if root_dir:
    if not fs.isdir(root_dir):
      fs.makedirs(root_dir, 0700)
    prefix = u''
  else:
    root_dir = os.path.dirname(cache.cache_dir) if cache.cache_dir else None
    prefix = u'isolated_'
  run_dir = make_temp_dir(prefix + u'run', root_dir)
  out_dir = make_temp_dir(prefix + u'out', root_dir)
  tmp_dir = make_temp_dir(prefix + u'tmp', root_dir)
  try:
    bundle = isolateserver.fetch_isolated(
        isolated_hash=isolated_hash,
        storage=storage,
        cache=cache,
        outdir=run_dir,
        require_command=True)

    change_tree_read_only(run_dir, bundle.read_only)
    cwd = os.path.normpath(os.path.join(run_dir, bundle.relative_cwd))
    command = bundle.command + extra_args
    file_path.ensure_command_has_abs_path(command, cwd)
    result['exit_code'], result['had_hard_timeout'] = run_command(
        process_command(command, out_dir), cwd, tmp_dir, hard_timeout,
        grace_period)
  except Exception as e:
    # An internal error occured. Report accordingly so the swarming task will be
    # retried automatically.
    logging.exception('internal failure: %s', e)
    result['internal_failure'] = str(e)
    on_error.report(None)
  finally:
    try:
      if leak_temp_dir:
        logging.warning(
            'Deliberately leaking %s for later examination', run_dir)
      else:
        # On Windows rmtree(run_dir) call above has a synchronization effect: it
        # finishes only when all task child processes terminate (since a running
        # process locks *.exe file). Examine out_dir only after that call
        # completes (since child processes may write to out_dir too and we need
        # to wait for them to finish).
        if fs.isdir(run_dir):
          try:
            success = file_path.rmtree(run_dir)
          except OSError as e:
            logging.error('Failure with %s', e)
            success = False
          if not success:
            print >> sys.stderr, (
                'Failed to delete the run directory, forcibly failing\n'
                'the task because of it. No zombie process can outlive a\n'
                'successful task run and still be marked as successful.\n'
                'Fix your stuff.')
            if result['exit_code'] == 0:
              result['exit_code'] = 1
        if fs.isdir(tmp_dir):
          try:
            success = file_path.rmtree(tmp_dir)
          except OSError as e:
            logging.error('Failure with %s', e)
            success = False
          if not success:
            print >> sys.stderr, (
                'Failed to delete the temporary directory, forcibly failing\n'
                'the task because of it. No zombie process can outlive a\n'
                'successful task run and still be marked as successful.\n'
                'Fix your stuff.')
            if result['exit_code'] == 0:
              result['exit_code'] = 1

      # This deletes out_dir if leak_temp_dir is not set.
      result['outputs_ref'], success = delete_and_upload(
          storage, out_dir, leak_temp_dir)
      if not success and result['exit_code'] == 0:
        result['exit_code'] = 1
    except Exception as e:
      # Swallow any exception in the main finally clause.
      logging.exception('Leaking out_dir %s: %s', out_dir, e)
      result['internal_failure'] = str(e)
  return result
示例#59
0
def rmtree(path):
  """Removes a directory the bold way."""
  file_path.rmtree(path)
示例#60
0
 def tearDown(self):
     os.environ.pop("SWARMING_BOT_ID", None)
     os.chdir(self.old_cwd)
     file_path.rmtree(self.root_dir)
     super(TestBotMain, self).tearDown()