Example #1
0
  def test_cancel(self):
    started_event = threading.Event()
    waiting_event = threading.Event()

    def task_run():
      started_event.set()
      waiting_event.wait()

    cancel_event = threading.Event()

    def task_cancel_run():
      cancel_event.set()

    with concurrent.ThreadPoolExecutor(max_workers=1) as executor:
      future1 = executor.submit(task_run)
      future2 = executor.submit(task_cancel_run)
      started_event.wait()
      self.assertTrue(future1.running())
      self.assertFalse(future2.running())

      # Cancel the second task. So it shouldn't run.
      self.assertTrue(future2.cancel())
      waiting_event.set()

    # The task_cancel_run should not be called.
    self.assertFalse(cancel_event.is_set())
Example #2
0
  def test_multi_worker(self):
    started_event1 = threading.Event()
    waiting_event1 = threading.Event()
    started_event2 = threading.Event()
    waiting_event2 = threading.Event()

    def task_run(started_event, waiting_event):
      started_event.set()
      waiting_event.wait()

    with concurrent.ThreadPoolExecutor(max_workers=2) as executor:
      future1 = executor.submit(task_run, started_event1, waiting_event1)
      future2 = executor.submit(task_run, started_event2, waiting_event2)
      future3 = executor.submit(lambda: None)
      started_event1.wait()
      started_event2.wait()

      # Since the executor has two workers, two tasks should be running
      # in parallel.
      self.assertTrue(future1.running())
      self.assertTrue(future2.running())
      self.assertFalse(future3.running())
      self.assertFalse(future3.done())

      waiting_event1.set()
      waiting_event2.set()

    # Finally, all tasks should be done.
    self.assertTrue(future1.done())
    self.assertTrue(future2.done())
    self.assertTrue(future3.done())
Example #3
0
  def test_precancelled_future(self):
    # Our own concurrent implementation has a bug that concurrent.wait() hangs
    # if cancelled futures are passed. This test checks a workaround of the bug.
    started_event = threading.Event()
    waiting_event = threading.Event()

    def job1():
      started_event.set()
      # Wait until job2 gets cancelled.
      waiting_event.wait()
      return 111

    def job2():
      return 222

    with concurrent.CheckedExecutor(
        concurrent.ThreadPoolExecutor(max_workers=1)) as executor:
      future1 = executor.submit(job1)
      future2 = executor.submit(job2)
      # Wait until job1 starts running.
      started_event.wait()
      self.assertTrue(future2.cancel())
      waiting_event.set()

    self.assertEquals(111, future1.result())
    self.assertRaises(concurrent.CancelledError, future2.result)
Example #4
0
def _extract_symbols():
  # Extract symbols in parallel.
  with concurrent.CheckedExecutor(concurrent.ThreadPoolExecutor(
      max_workers=multiprocessing.cpu_count(), daemon=True)) as executor:
    for root, _, filenames in os.walk(build_common.get_load_library_path()):
      for filename in filenames:
        if os.path.splitext(filename)[1] in ['.so', '.nexe']:
          executor.submit(_extract_symbols_from_one_binary,
                          os.path.join(root, filename))
Example #5
0
  def test_success_scenario(self):
    def job1():
      return 111

    def job2():
      return 222

    with concurrent.CheckedExecutor(
        concurrent.ThreadPoolExecutor(max_workers=1)) as executor:
      future1 = executor.submit(job1)
      future2 = executor.submit(job2)

    self.assertEquals(111, future1.result())
    self.assertEquals(222, future2.result())
Example #6
0
def _run_suites(test_driver_list, args, prepare_only=False):
    """Runs the indicated suites."""
    setup_output_directory(args.output_dir)

    suite_results.initialize(test_driver_list, args, prepare_only)

    if not test_driver_list:
        return False

    timeout = (args.total_timeout
               if args.total_timeout and not prepare_only else None)

    try:
        with concurrent.ThreadPoolExecutor(args.jobs, daemon=True) as executor:
            futures = [
                executor.submit(_run_driver, driver, args, prepare_only)
                for driver in test_driver_list
            ]
            done, not_done = concurrent.wait(futures, timeout,
                                             concurrent.FIRST_EXCEPTION)
            try:
                # Iterate over the results to propagate an exception if any of the tasks
                # aborted by an error in the test drivers. Since such an error is due to
                # broken script rather than normal failure in tests, we prefer just to
                # die similarly as when Python errors occurred in the main thread.
                for future in done:
                    future.result()

                # No exception was raised but some timed-out tasks are remaining.
                if not_done:
                    print '@@@STEP_TEXT@Integration test timed out@@@'
                    debug.write_frames(sys.stdout)
                    if args.warn_on_failure:
                        print '@@@STEP_WARNINGS@@@'
                    else:
                        print '@@@STEP_FAILURE@@@'
                    return False

                # All tests passed (or failed) in time.
                return True
            finally:
                if not_done:
                    _shutdown_unfinished_drivers_gracefully(
                        not_done, test_driver_list)
    finally:
        for driver in test_driver_list:
            driver.finalize(args)
Example #7
0
  def test_exception_and_cancel_scenario(self):
    started_event = threading.Event()
    waiting_event = threading.Event()

    class MyException(Exception):
      pass

    def job1():
      return 111

    def job2():
      raise MyException()

    def job3():
      started_event.set()
      # Wait until future4 gets visible.
      waiting_event.wait()
      # Though this job seems to block forever, it finishes when future4 is
      # cancelled by CheckedExecutor.__exit__().
      while not future4.done():
        pass
      return 333

    def job4():
      raise MyException()

    with self.assertRaises(MyException):
      with concurrent.CheckedExecutor(
          concurrent.ThreadPoolExecutor(max_workers=1)) as executor:
        future1 = executor.submit(job1)
        future2 = executor.submit(job2)
        future3 = executor.submit(job3)
        future4 = executor.submit(job4)
        # Wait until job3 starts running.
        started_event.wait()
        waiting_event.set()

    self.assertTrue(future1.done())
    self.assertTrue(future2.done())
    self.assertTrue(future3.done())
    self.assertTrue(future4.done())

    self.assertEquals(111, future1.result())
    self.assertRaises(MyException, future2.result)
    self.assertEquals(333, future3.result())
    self.assertRaises(concurrent.CancelledError, future4.result)
Example #8
0
  def test_exception_scenario(self):
    class MyException(Exception):
      pass

    def job1():
      return 111

    def job2():
      raise MyException()

    with self.assertRaises(MyException):
      with concurrent.CheckedExecutor(
          concurrent.ThreadPoolExecutor(max_workers=1)) as executor:
        future1 = executor.submit(job1)
        future2 = executor.submit(job2)

    self.assertEquals(111, future1.result())
    self.assertRaises(MyException, future2.result)
Example #9
0
def main():
  args = parse_args()
  builders_info = get_builders_info()
  all_builders = sorted(builders_info.keys())
  builders = args.builders
  if args.list or not validate_builders(builders, all_builders):
    print 'Select any of the following builders:'
    print '\n'.join(all_builders)
    return
  if builders:
    builders_info = dict(
        [(k, v) for k, v in builders_info.iteritems() if k in builders])

  download_args_list = make_download_args_list(
      builders_info, args.outdir, args.number_of_logs)
  with concurrent.CheckedExecutor(concurrent.ThreadPoolExecutor(
      args.jobs, daemon=True)) as executor:
    for download_args in download_args_list:
      executor.submit(download_log, *download_args)
  print 'Downloaded logs in %s' % args.outdir
Example #10
0
  def test_simple_scenario(self):
    started_event = threading.Event()
    waiting_event = threading.Event()

    def task_run():
      started_event.set()
      waiting_event.wait()

    with concurrent.ThreadPoolExecutor(max_workers=1) as executor:
      future1 = executor.submit(task_run)
      future2 = executor.submit(task_run)
      started_event.wait()
      # Here the task is actually started. The worker is one, so only the first
      # task should be running.
      self.assertTrue(future1.running())
      self.assertFalse(future2.running())
      waiting_event.set()

    # Finally, all tasks should be done.
    self.assertTrue(future1.done())
    self.assertTrue(future2.done())