Example #1
0
def write_details(logname, outfile, root_dir, blacklist, results):
    """Writes an .test_cases file with all the information about each test
  case.
  """
    api = trace_inputs.get_api()
    logs = dict(
        (i.pop('trace'), i) for i in api.parse_log(logname, blacklist, None))
    results_processed = {}
    exception = None
    for items in results:
        item = items[-1]
        assert item['valid']
        # Load the results;
        log_dict = logs[item['tracename']]
        if log_dict.get('exception'):
            exception = exception or log_dict['exception']
            continue
        trace_result = log_dict['results']
        if root_dir:
            trace_result = trace_result.strip_root(root_dir)
        results_processed[item['test_case']] = {
            'trace': trace_result.flatten(),
            'duration': item['duration'],
            'output': item['output'],
            'returncode': item['returncode'],
        }

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(outfile, results_processed,
                            len(results_processed) > 20)
    if exception:
        raise exception[0], exception[1], exception[2]
Example #2
0
def write_details(logname, outfile, root_dir, blacklist, results):
    """Writes an .test_cases file with all the information about each test
  case.
  """
    api = trace_inputs.get_api()
    logs = dict((i.pop("trace"), i) for i in api.parse_log(logname, blacklist, None))
    results_processed = {}
    exception = None
    for items in results:
        item = items[-1]
        assert item["valid"]
        # Load the results;
        log_dict = logs[item["tracename"]]
        if log_dict.get("exception"):
            exception = exception or log_dict["exception"]
            continue
        trace_result = log_dict["results"]
        if root_dir:
            trace_result = trace_result.strip_root(root_dir)
        results_processed[item["test_case"]] = {
            "trace": trace_result.flatten(),
            "duration": item["duration"],
            "output": item["output"],
            "returncode": item["returncode"],
        }

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(outfile, results_processed, len(results_processed) > 20)
    if exception:
        raise exception[0], exception[1], exception[2]
Example #3
0
def write_details(logname, outfile, root_dir, blacklist, results):
  """Writes an .test_cases file with all the information about each test
  case.
  """
  api = trace_inputs.get_api()
  logs = dict(
      (i.pop('trace'), i) for i in api.parse_log(logname, blacklist, None))
  results_processed = {}
  exception = None
  for items in results:
    item = items[-1]
    assert item['valid']
    # Load the results;
    log_dict = logs[item['tracename']]
    if log_dict.get('exception'):
      exception = exception or log_dict['exception']
      continue
    trace_result = log_dict['results']
    if root_dir:
      trace_result = trace_result.strip_root(root_dir)
    results_processed[item['test_case']] = {
      'trace': trace_result.flatten(),
      'duration': item['duration'],
      'output': item['output'],
      'returncode': item['returncode'],
    }

  # Make it dense if there is more than 20 results.
  trace_inputs.write_json(
      outfile,
      results_processed,
      len(results_processed) > 20)
  if exception:
    raise exception[0], exception[1], exception[2]
Example #4
0
def run_test_cases(
    executable, whitelist, blacklist, jobs, timeout, stats_only, no_dump):
  """Traces test cases one by one."""
  test_cases = get_test_cases(executable, whitelist, blacklist)
  if not test_cases:
    return

  progress = worker_pool.Progress(len(test_cases))
  with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
    function = Runner(executable, os.getcwd(), timeout, progress).map
    for test_case in test_cases:
      pool.add_task(function, test_case)
    results = pool.join(progress, 0.1)
    duration = time.time() - progress.start
  results = dict((item[0]['test_case'], item) for item in results)
  if not no_dump:
    trace_inputs.write_json('%s.run_test_cases' % executable, results, False)
  sys.stderr.write('\n')
  total = len(results)
  if not total:
    return 1

  # Classify the results
  success = []
  flaky = []
  fail = []
  nb_runs = 0
  for test_case in sorted(results):
    items = results[test_case]
    nb_runs += len(items)
    if not any(not i['returncode'] for i in items):
      fail.append(test_case)
    elif len(items) > 1 and any(not i['returncode'] for i in items):
      flaky.append(test_case)
    elif len(items) == 1 and items[0]['returncode'] == 0:
      success.append(test_case)
    else:
      assert False, items

  if not stats_only:
    for test_case in sorted(fail):
      # Failed, print the last one:
      items = results[test_case]
      print items[-1]['output']

    for test_case in sorted(flaky):
      items = results[test_case]
      print '%s is flaky (tried %d times)' % (test_case, len(items))

  print 'Success: %4d %5.2f%%' % (len(success), len(success) * 100. / total)
  print 'Flaky:   %4d %5.2f%%' % (len(flaky), len(flaky) * 100. / total)
  print 'Fail:    %4d %5.2f%%' % (len(fail), len(fail) * 100. / total)
  print '%.1fs Done running %d tests with %d executions. %.1f test/s' % (
      duration,
      len(results),
      nb_runs,
      nb_runs / duration)
  return 0
Example #5
0
 def save_files(self):
     """Saves both self.result and self.saved_state."""
     trace_inputs.write_json(self.result_file, self.result.flatten(), False)
     total_bytes = sum(
         i.get('size', 0) for i in self.result.files.itervalues())
     if total_bytes:
         logging.debug('Total size: %d bytes' % total_bytes)
     trace_inputs.write_json(result_to_state(self.result_file),
                             self.saved_state.flatten(), False)
Example #6
0
def run_test_cases(executable, whitelist, blacklist, jobs, timeout, stats_only,
                   no_dump):
    """Traces test cases one by one."""
    test_cases = get_test_cases(executable, whitelist, blacklist)
    if not test_cases:
        return

    progress = worker_pool.Progress(len(test_cases))
    with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
        function = Runner(executable, os.getcwd(), timeout, progress).map
        for test_case in test_cases:
            pool.add_task(function, test_case)
        results = pool.join(progress, 0.1)
        duration = time.time() - progress.start
    results = dict((item[0]['test_case'], item) for item in results)
    if not no_dump:
        trace_inputs.write_json('%s.run_test_cases' % executable, results,
                                False)
    sys.stderr.write('\n')
    total = len(results)
    if not total:
        return 1

    # Classify the results
    success = []
    flaky = []
    fail = []
    nb_runs = 0
    for test_case in sorted(results):
        items = results[test_case]
        nb_runs += len(items)
        if not any(not i['returncode'] for i in items):
            fail.append(test_case)
        elif len(items) > 1 and any(not i['returncode'] for i in items):
            flaky.append(test_case)
        elif len(items) == 1 and items[0]['returncode'] == 0:
            success.append(test_case)
        else:
            assert False, items

    if not stats_only:
        for test_case in sorted(fail):
            # Failed, print the last one:
            items = results[test_case]
            print items[-1]['output']

        for test_case in sorted(flaky):
            items = results[test_case]
            print '%s is flaky (tried %d times)' % (test_case, len(items))

    print 'Success: %4d %5.2f%%' % (len(success), len(success) * 100. / total)
    print 'Flaky:   %4d %5.2f%%' % (len(flaky), len(flaky) * 100. / total)
    print 'Fail:    %4d %5.2f%%' % (len(fail), len(fail) * 100. / total)
    print '%.1fs Done running %d tests with %d executions. %.1f test/s' % (
        duration, len(results), nb_runs, nb_runs / duration)
    return 0
Example #7
0
 def save_files(self):
   """Saves both self.result and self.saved_state."""
   logging.debug('Dumping to %s' % self.result_file)
   trace_inputs.write_json(self.result_file, self.result.flatten(), False)
   total_bytes = sum(i.get('size', 0) for i in self.result.files.itervalues())
   if total_bytes:
     logging.debug('Total size: %d bytes' % total_bytes)
   saved_state_file = result_to_state(self.result_file)
   logging.debug('Dumping to %s' % saved_state_file)
   trace_inputs.write_json(saved_state_file, self.saved_state.flatten(), False)
Example #8
0
def trace_test_cases(
    executable, root_dir, cwd_dir, variables, test_cases, jobs, output_file):
  """Traces test cases one by one."""
  assert not os.path.isabs(cwd_dir)
  assert os.path.isabs(root_dir) and os.path.isdir(root_dir)
  assert os.path.isfile(executable) and os.path.isabs(executable)

  if not test_cases:
    return 0

  # Resolve any symlink.
  root_dir = os.path.realpath(root_dir)
  full_cwd_dir = os.path.normpath(os.path.join(root_dir, cwd_dir))
  assert os.path.isdir(full_cwd_dir)
  logname = output_file + '.logs'

  progress = worker_pool.Progress(len(test_cases))
  with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
    api = trace_inputs.get_api()
    api.clean_trace(logname)
    with api.get_tracer(logname) as tracer:
      function = Tracer(tracer, executable, full_cwd_dir, progress).map
      for test_case in test_cases:
        pool.add_task(function, test_case)

      values = pool.join(progress, 0.1)

  print ''
  print '%.1fs Done post-processing logs. Parsing logs.' % (
      time.time() - progress.start)
  results = api.parse_log(logname, isolate_common.default_blacklist)
  print '%.1fs Done parsing logs.' % (
      time.time() - progress.start)

  # Strips to root_dir.
  results_processed = {}
  for item in results:
    if 'results' in item:
      item = item.copy()
      item['results'] = item['results'].strip_root(root_dir)
      results_processed[item['trace']] = item
    else:
      print >> sys.stderr, 'Got exception while tracing %s: %s' % (
          item['trace'], item['exception'])
  print '%.1fs Done stripping root.' % (
      time.time() - progress.start)

  # Flatten.
  flattened = {}
  for item_list in values:
    for item in item_list:
      if item['valid']:
        test_case = item['test_case']
        tracename = test_case.replace('/', '-')
        flattened[test_case] = results_processed[tracename].copy()
        item_results = flattened[test_case]['results']
        tracked, touched = isolate_common.split_touched(item_results.existent)
        flattened[test_case].update({
            'processes': len(list(item_results.process.all)),
            'results': item_results.flatten(),
            'duration': item['duration'],
            'returncode': item['returncode'],
            'valid': item['valid'],
            'variables':
              isolate_common.generate_simplified(
                  tracked,
                  [],
                  touched,
                  root_dir,
                  variables,
                  cwd_dir),
          })
        del flattened[test_case]['trace']
  print '%.1fs Done flattening.' % (
      time.time() - progress.start)

  # Make it dense if there is more than 20 results.
  trace_inputs.write_json(
      output_file,
      flattened,
      False)

  # Also write the .isolate file.
  # First, get all the files from all results. Use a map to remove dupes.
  files = {}
  for item in results_processed.itervalues():
    files.update((f.full_path, f) for f in item['results'].existent)
  # Convert back to a list, discard the keys.
  files = files.values()
  tracked, touched = isolate_common.split_touched(files)
  value = isolate_common.generate_isolate(
      tracked,
      [],
      touched,
      root_dir,
      variables,
      cwd_dir)
  with open('%s.isolate' % output_file, 'wb') as f:
    isolate_common.pretty_print(value, f)
  return 0
Example #9
0
def trace_test_cases(executable, root_dir, cwd_dir, variables, whitelist,
                     blacklist, jobs, index, shards, output_file):
    """Traces test cases one by one."""
    test_cases = get_test_cases(executable, whitelist, blacklist, index,
                                shards)
    if not test_cases:
        return

    # Resolve any symlink.
    root_dir = os.path.realpath(root_dir)
    full_cwd_dir = os.path.join(root_dir, cwd_dir)
    logname = output_file + '.logs'

    progress = worker_pool.Progress(len(test_cases))
    with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
        api = trace_inputs.get_api()
        api.clean_trace(logname)
        with api.get_tracer(logname) as tracer:
            function = Tracer(tracer, executable, full_cwd_dir, progress).map
            for test_case in test_cases:
                pool.add_task(function, test_case)

            values = pool.join(progress, 0.1)

    print ''
    print '%.1fs Done post-processing logs. Parsing logs.' % (time.time() -
                                                              progress.start)
    results = api.parse_log(logname, isolate_common.default_blacklist)
    print '%.1fs Done parsing logs.' % (time.time() - progress.start)

    # Strips to root_dir.
    results_processed = {}
    for item in results:
        if 'results' in item:
            item = item.copy()
            item['results'] = item['results'].strip_root(root_dir)
            results_processed[item['trace']] = item
        else:
            print >> sys.stderr, 'Got exception while tracing %s: %s' % (
                item['trace'], item['exception'])
    print '%.1fs Done stripping root.' % (time.time() - progress.start)

    # Flatten.
    flattened = {}
    for item_list in values:
        for item in item_list:
            if item['valid']:
                test_case = item['test_case']
                tracename = test_case.replace('/', '-')
                flattened[test_case] = results_processed[tracename].copy()
                item_results = flattened[test_case]['results']
                flattened[test_case].update({
                    'processes':
                    len(list(item_results.process.all)),
                    'results':
                    item_results.flatten(),
                    'duration':
                    item['duration'],
                    'returncode':
                    item['returncode'],
                    'valid':
                    item['valid'],
                    'variables':
                    isolate_common.generate_simplified(item_results.existent,
                                                       root_dir, variables,
                                                       cwd_dir),
                })
                del flattened[test_case]['trace']
    print '%.1fs Done flattening.' % (time.time() - progress.start)

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(output_file, flattened, False)

    # Also write the .isolate file.
    # First, get all the files from all results. Use a map to remove dupes.
    files = {}
    for item in results_processed.itervalues():
        files.update((f.full_path, f) for f in item['results'].existent)
    # Convert back to a list, discard the keys.
    files = files.values()

    value = isolate_common.generate_isolate(files, root_dir, variables,
                                            cwd_dir)
    with open('%s.isolate' % output_file, 'wb') as f:
        isolate_common.pretty_print(value, f)
    return 0
Example #10
0
def trace_test_cases(executable, root_dir, cwd_dir, variables, test_cases, jobs, output_file):
    """Traces test cases one by one."""
    if not test_cases:
        return 0

    # Resolve any symlink.
    root_dir = os.path.realpath(root_dir)
    full_cwd_dir = os.path.join(root_dir, cwd_dir)
    logname = output_file + ".logs"

    progress = worker_pool.Progress(len(test_cases))
    with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
        api = trace_inputs.get_api()
        api.clean_trace(logname)
        with api.get_tracer(logname) as tracer:
            function = Tracer(tracer, executable, full_cwd_dir, progress).map
            for test_case in test_cases:
                pool.add_task(function, test_case)

            values = pool.join(progress, 0.1)

    print ""
    print "%.1fs Done post-processing logs. Parsing logs." % (time.time() - progress.start)
    results = api.parse_log(logname, isolate_common.default_blacklist)
    print "%.1fs Done parsing logs." % (time.time() - progress.start)

    # Strips to root_dir.
    results_processed = {}
    for item in results:
        if "results" in item:
            item = item.copy()
            item["results"] = item["results"].strip_root(root_dir)
            results_processed[item["trace"]] = item
        else:
            print >> sys.stderr, "Got exception while tracing %s: %s" % (item["trace"], item["exception"])
    print "%.1fs Done stripping root." % (time.time() - progress.start)

    # Flatten.
    flattened = {}
    for item_list in values:
        for item in item_list:
            if item["valid"]:
                test_case = item["test_case"]
                tracename = test_case.replace("/", "-")
                flattened[test_case] = results_processed[tracename].copy()
                item_results = flattened[test_case]["results"]
                flattened[test_case].update(
                    {
                        "processes": len(list(item_results.process.all)),
                        "results": item_results.flatten(),
                        "duration": item["duration"],
                        "returncode": item["returncode"],
                        "valid": item["valid"],
                        "variables": isolate_common.generate_simplified(
                            item_results.existent, root_dir, variables, cwd_dir
                        ),
                    }
                )
                del flattened[test_case]["trace"]
    print "%.1fs Done flattening." % (time.time() - progress.start)

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(output_file, flattened, False)

    # Also write the .isolate file.
    # First, get all the files from all results. Use a map to remove dupes.
    files = {}
    for item in results_processed.itervalues():
        files.update((f.full_path, f) for f in item["results"].existent)
    # Convert back to a list, discard the keys.
    files = files.values()

    value = isolate_common.generate_isolate(files, root_dir, variables, cwd_dir)
    with open("%s.isolate" % output_file, "wb") as f:
        isolate_common.pretty_print(value, f)
    return 0