示例#1
0
 def test_normal(self):
     mapper = lambda value: -value
     with worker_pool.ThreadPool(8) as pool:
         for i in range(32):
             pool.add_task(mapper, i)
         results = pool.join()
     self.assertEquals(range(-31, 1), sorted(results))
示例#2
0
def run_test_cases(executable, whitelist, blacklist, jobs, timeout, stats_only,
                   no_dump):
    """Traces test cases one by one."""
    test_cases = get_test_cases(executable, whitelist, blacklist)
    if not test_cases:
        return

    progress = worker_pool.Progress(len(test_cases))
    with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
        function = Runner(executable, os.getcwd(), timeout, progress).map
        for test_case in test_cases:
            pool.add_task(function, test_case)
        results = pool.join(progress, 0.1)
        duration = time.time() - progress.start
    results = dict((item[0]['test_case'], item) for item in results)
    if not no_dump:
        trace_inputs.write_json('%s.run_test_cases' % executable, results,
                                False)
    sys.stderr.write('\n')
    total = len(results)
    if not total:
        return 1

    # Classify the results
    success = []
    flaky = []
    fail = []
    nb_runs = 0
    for test_case in sorted(results):
        items = results[test_case]
        nb_runs += len(items)
        if not any(not i['returncode'] for i in items):
            fail.append(test_case)
        elif len(items) > 1 and any(not i['returncode'] for i in items):
            flaky.append(test_case)
        elif len(items) == 1 and items[0]['returncode'] == 0:
            success.append(test_case)
        else:
            assert False, items

    if not stats_only:
        for test_case in sorted(fail):
            # Failed, print the last one:
            items = results[test_case]
            print items[-1]['output']

        for test_case in sorted(flaky):
            items = results[test_case]
            print '%s is flaky (tried %d times)' % (test_case, len(items))

    print 'Success: %4d %5.2f%%' % (len(success), len(success) * 100. / total)
    print 'Flaky:   %4d %5.2f%%' % (len(flaky), len(flaky) * 100. / total)
    print 'Fail:    %4d %5.2f%%' % (len(fail), len(fail) * 100. / total)
    print '%.1fs Done running %d tests with %d executions. %.1f test/s' % (
        duration, len(results), nb_runs, nb_runs / duration)
    return 0
示例#3
0
    def test_exception(self):
        class FearsomeException(Exception):
            pass

        def mapper(value):
            raise FearsomeException(value)

        task_added = False
        try:
            with worker_pool.ThreadPool(8) as pool:
                pool.add_task(mapper, 0)
                task_added = True
                pool.join()
                self.fail()
        except FearsomeException:
            self.assertEquals(True, task_added)
示例#4
0
def trace_test_cases(executable, root_dir, cwd_dir, variables, whitelist,
                     blacklist, jobs, index, shards, output_file):
    """Traces test cases one by one."""
    test_cases = get_test_cases(executable, whitelist, blacklist, index,
                                shards)
    if not test_cases:
        return

    # Resolve any symlink.
    root_dir = os.path.realpath(root_dir)
    full_cwd_dir = os.path.join(root_dir, cwd_dir)
    logname = output_file + '.logs'

    progress = worker_pool.Progress(len(test_cases))
    with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
        api = trace_inputs.get_api()
        api.clean_trace(logname)
        with api.get_tracer(logname) as tracer:
            function = Tracer(tracer, executable, full_cwd_dir, progress).map
            for test_case in test_cases:
                pool.add_task(function, test_case)

            values = pool.join(progress, 0.1)

    print ''
    print '%.1fs Done post-processing logs. Parsing logs.' % (time.time() -
                                                              progress.start)
    results = api.parse_log(logname, isolate_common.default_blacklist)
    print '%.1fs Done parsing logs.' % (time.time() - progress.start)

    # Strips to root_dir.
    results_processed = {}
    for item in results:
        if 'results' in item:
            item = item.copy()
            item['results'] = item['results'].strip_root(root_dir)
            results_processed[item['trace']] = item
        else:
            print >> sys.stderr, 'Got exception while tracing %s: %s' % (
                item['trace'], item['exception'])
    print '%.1fs Done stripping root.' % (time.time() - progress.start)

    # Flatten.
    flattened = {}
    for item_list in values:
        for item in item_list:
            if item['valid']:
                test_case = item['test_case']
                tracename = test_case.replace('/', '-')
                flattened[test_case] = results_processed[tracename].copy()
                item_results = flattened[test_case]['results']
                flattened[test_case].update({
                    'processes':
                    len(list(item_results.process.all)),
                    'results':
                    item_results.flatten(),
                    'duration':
                    item['duration'],
                    'returncode':
                    item['returncode'],
                    'valid':
                    item['valid'],
                    'variables':
                    isolate_common.generate_simplified(item_results.existent,
                                                       root_dir, variables,
                                                       cwd_dir),
                })
                del flattened[test_case]['trace']
    print '%.1fs Done flattening.' % (time.time() - progress.start)

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(output_file, flattened, False)

    # Also write the .isolate file.
    # First, get all the files from all results. Use a map to remove dupes.
    files = {}
    for item in results_processed.itervalues():
        files.update((f.full_path, f) for f in item['results'].existent)
    # Convert back to a list, discard the keys.
    files = files.values()

    value = isolate_common.generate_isolate(files, root_dir, variables,
                                            cwd_dir)
    with open('%s.isolate' % output_file, 'wb') as f:
        isolate_common.pretty_print(value, f)
    return 0
示例#5
0
  def test_trace_multiple(self):
    # Starts PARALLEL threads and trace PARALLEL child processes simultaneously.
    # Some are started from 'data' directory, others from this script's
    # directory. One trace fails. Verify everything still goes one.
    PARALLEL = 8

    def trace(tracer, cmd, cwd, tracename):
      resultcode, output = tracer.trace(
          cmd, cwd, tracename, True)
      return (tracename, resultcode, output)

    with worker_pool.ThreadPool(PARALLEL) as pool:
      api = self.trace_inputs.get_api()
      with api.get_tracer(self.log) as tracer:
        pool.add_task(
            trace, tracer, self.get_child_command(False), ROOT_DIR, 'trace1')
        pool.add_task(
            trace, tracer, self.get_child_command(True), self.cwd, 'trace2')
        pool.add_task(
            trace, tracer, self.get_child_command(False), ROOT_DIR, 'trace3')
        pool.add_task(
            trace, tracer, self.get_child_command(True), self.cwd, 'trace4')
        # Have this one fail since it's started from the wrong directory.
        pool.add_task(
            trace, tracer, self.get_child_command(False), self.cwd, 'trace5')
        pool.add_task(
            trace, tracer, self.get_child_command(True), self.cwd, 'trace6')
        pool.add_task(
            trace, tracer, self.get_child_command(False), ROOT_DIR, 'trace7')
        pool.add_task(
            trace, tracer, self.get_child_command(True), self.cwd, 'trace8')
        trace_results = pool.join()
    def blacklist(f):
      return f.endswith('.pyc')
    actual_results = api.parse_log(self.log, blacklist)
    self.assertEquals(8, len(trace_results))
    self.assertEquals(8, len(actual_results))

    # Convert to dict keyed on the trace name, simpler to verify.
    trace_results = dict((i[0], i[1:]) for i in trace_results)
    actual_results = dict((x.pop('trace'), x) for x in actual_results)
    self.assertEquals(sorted(trace_results), sorted(actual_results))

    # It'd be nice to start different kinds of processes.
    expected_results = [
      self._gen_dict_full(),
      self._gen_dict_full_gyp(),
      self._gen_dict_full(),
      self._gen_dict_full_gyp(),
      self._gen_dict_wrong_path(),
      self._gen_dict_full_gyp(),
      self._gen_dict_full(),
      self._gen_dict_full_gyp(),
    ]
    self.assertEquals(len(expected_results), len(trace_results))

    # See the comment above about the trace that fails because it's started from
    # the wrong directory.
    BUSTED = 4
    for index, key in enumerate(sorted(actual_results)):
      self.assertEquals('trace%d' % (index + 1), key)
      self.assertEquals(2, len(trace_results[key]))
      # returncode
      self.assertEquals(0 if index != BUSTED else 2, trace_results[key][0])
      # output
      self.assertEquals(actual_results[key]['output'], trace_results[key][1])

      self.assertEquals(['output', 'results'], sorted(actual_results[key]))
      results = actual_results[key]['results']
      results = results.strip_root(ROOT_DIR)
      actual = results.flatten()
      self.assertTrue(actual['root'].pop('pid'))
      if index != BUSTED:
        self.assertTrue(actual['root']['children'][0].pop('pid'))
      self.assertEquals(expected_results[index], actual)