Example #1
0
def print_all(comment, data, stream):
    """Prints a complete .isolate file and its top-level file comment into a
  stream.
  """
    if comment:
        stream.write(comment)
    pretty_print(data, stream)
Example #2
0
def CMDread(args):
  """Reads the trace file generated with command 'trace'.

  Ignores --outdir.
  """
  parser = OptionParserIsolate(command='read', require_result=False)
  options, _ = parser.parse_args(args)
  complete_state = load_complete_state(options, NO_INFO)

  api = trace_inputs.get_api()
  logfile = complete_state.result_file + '.log'
  if not os.path.isfile(logfile):
    raise ExecutionError(
        'No log file \'%s\' to read, did you forget to \'trace\'?' % logfile)
  try:
    results = trace_inputs.load_trace(
        logfile, complete_state.root_dir, api, isolate_common.default_blacklist)
    value = isolate_common.generate_isolate(
        results.existent,
        complete_state.root_dir,
        complete_state.saved_state.variables,
        complete_state.result.relative_cwd)
    isolate_common.pretty_print(value, sys.stdout)
  except trace_inputs.TracingFailure, e:
    raise ExecutionError(
        'Reading traces failed for: %s\n%s' %
          (' '.join(complete_state.result.command), str(e)))
Example #3
0
def print_all(comment, data, stream):
  """Prints a complete .isolate file and its top-level file comment into a
  stream.
  """
  if comment:
    stream.write(comment)
  pretty_print(data, stream)
Example #4
0
def CMDread(args):
  """Reads the trace file generated with command 'trace'.

  Ignores --outdir.
  """
  parser = OptionParserIsolate(command='read', require_result=False)
  options, _ = parser.parse_args(args)
  complete_state = load_complete_state(options, NO_INFO)
  value = read(complete_state)
  isolate_common.pretty_print(value, sys.stdout)
  return 0
Example #5
0
def MODEread(_outdir, state):
    """Reads the trace file generated with --mode=trace."""
    api = trace_inputs.get_api()
    logfile = state.result_file + '.log'
    if not os.path.isfile(logfile):
        return 1
    try:
        results = trace_inputs.load_trace(logfile, state.root_dir, api,
                                          isolate_common.default_blacklist)
        value = isolate_common.generate_isolate(results.existent,
                                                state.root_dir,
                                                state.saved_state.variables,
                                                state.result.relative_cwd)
        isolate_common.pretty_print(value, sys.stdout)
        return 0
    except trace_inputs.TracingFailure, e:
        print >> sys.stderr, ('\nReading traces failed for: %s' %
                              ' '.join(state.result.command))
        print >> sys.stderr, str(e)
        return 1
Example #6
0
def trace_test_cases(
    executable, root_dir, cwd_dir, variables, test_cases, jobs, output_file):
  """Traces test cases one by one."""
  assert not os.path.isabs(cwd_dir)
  assert os.path.isabs(root_dir) and os.path.isdir(root_dir)
  assert os.path.isfile(executable) and os.path.isabs(executable)

  if not test_cases:
    return 0

  # Resolve any symlink.
  root_dir = os.path.realpath(root_dir)
  full_cwd_dir = os.path.normpath(os.path.join(root_dir, cwd_dir))
  assert os.path.isdir(full_cwd_dir)
  logname = output_file + '.logs'

  progress = worker_pool.Progress(len(test_cases))
  with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
    api = trace_inputs.get_api()
    api.clean_trace(logname)
    with api.get_tracer(logname) as tracer:
      function = Tracer(tracer, executable, full_cwd_dir, progress).map
      for test_case in test_cases:
        pool.add_task(function, test_case)

      values = pool.join(progress, 0.1)

  print ''
  print '%.1fs Done post-processing logs. Parsing logs.' % (
      time.time() - progress.start)
  results = api.parse_log(logname, isolate_common.default_blacklist)
  print '%.1fs Done parsing logs.' % (
      time.time() - progress.start)

  # Strips to root_dir.
  results_processed = {}
  for item in results:
    if 'results' in item:
      item = item.copy()
      item['results'] = item['results'].strip_root(root_dir)
      results_processed[item['trace']] = item
    else:
      print >> sys.stderr, 'Got exception while tracing %s: %s' % (
          item['trace'], item['exception'])
  print '%.1fs Done stripping root.' % (
      time.time() - progress.start)

  # Flatten.
  flattened = {}
  for item_list in values:
    for item in item_list:
      if item['valid']:
        test_case = item['test_case']
        tracename = test_case.replace('/', '-')
        flattened[test_case] = results_processed[tracename].copy()
        item_results = flattened[test_case]['results']
        tracked, touched = isolate_common.split_touched(item_results.existent)
        flattened[test_case].update({
            'processes': len(list(item_results.process.all)),
            'results': item_results.flatten(),
            'duration': item['duration'],
            'returncode': item['returncode'],
            'valid': item['valid'],
            'variables':
              isolate_common.generate_simplified(
                  tracked,
                  [],
                  touched,
                  root_dir,
                  variables,
                  cwd_dir),
          })
        del flattened[test_case]['trace']
  print '%.1fs Done flattening.' % (
      time.time() - progress.start)

  # Make it dense if there is more than 20 results.
  trace_inputs.write_json(
      output_file,
      flattened,
      False)

  # Also write the .isolate file.
  # First, get all the files from all results. Use a map to remove dupes.
  files = {}
  for item in results_processed.itervalues():
    files.update((f.full_path, f) for f in item['results'].existent)
  # Convert back to a list, discard the keys.
  files = files.values()
  tracked, touched = isolate_common.split_touched(files)
  value = isolate_common.generate_isolate(
      tracked,
      [],
      touched,
      root_dir,
      variables,
      cwd_dir)
  with open('%s.isolate' % output_file, 'wb') as f:
    isolate_common.pretty_print(value, f)
  return 0
Example #7
0
def trace_test_cases(executable, root_dir, cwd_dir, variables, whitelist,
                     blacklist, jobs, index, shards, output_file):
    """Traces test cases one by one."""
    test_cases = get_test_cases(executable, whitelist, blacklist, index,
                                shards)
    if not test_cases:
        return

    # Resolve any symlink.
    root_dir = os.path.realpath(root_dir)
    full_cwd_dir = os.path.join(root_dir, cwd_dir)
    logname = output_file + '.logs'

    progress = worker_pool.Progress(len(test_cases))
    with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
        api = trace_inputs.get_api()
        api.clean_trace(logname)
        with api.get_tracer(logname) as tracer:
            function = Tracer(tracer, executable, full_cwd_dir, progress).map
            for test_case in test_cases:
                pool.add_task(function, test_case)

            values = pool.join(progress, 0.1)

    print ''
    print '%.1fs Done post-processing logs. Parsing logs.' % (time.time() -
                                                              progress.start)
    results = api.parse_log(logname, isolate_common.default_blacklist)
    print '%.1fs Done parsing logs.' % (time.time() - progress.start)

    # Strips to root_dir.
    results_processed = {}
    for item in results:
        if 'results' in item:
            item = item.copy()
            item['results'] = item['results'].strip_root(root_dir)
            results_processed[item['trace']] = item
        else:
            print >> sys.stderr, 'Got exception while tracing %s: %s' % (
                item['trace'], item['exception'])
    print '%.1fs Done stripping root.' % (time.time() - progress.start)

    # Flatten.
    flattened = {}
    for item_list in values:
        for item in item_list:
            if item['valid']:
                test_case = item['test_case']
                tracename = test_case.replace('/', '-')
                flattened[test_case] = results_processed[tracename].copy()
                item_results = flattened[test_case]['results']
                flattened[test_case].update({
                    'processes':
                    len(list(item_results.process.all)),
                    'results':
                    item_results.flatten(),
                    'duration':
                    item['duration'],
                    'returncode':
                    item['returncode'],
                    'valid':
                    item['valid'],
                    'variables':
                    isolate_common.generate_simplified(item_results.existent,
                                                       root_dir, variables,
                                                       cwd_dir),
                })
                del flattened[test_case]['trace']
    print '%.1fs Done flattening.' % (time.time() - progress.start)

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(output_file, flattened, False)

    # Also write the .isolate file.
    # First, get all the files from all results. Use a map to remove dupes.
    files = {}
    for item in results_processed.itervalues():
        files.update((f.full_path, f) for f in item['results'].existent)
    # Convert back to a list, discard the keys.
    files = files.values()

    value = isolate_common.generate_isolate(files, root_dir, variables,
                                            cwd_dir)
    with open('%s.isolate' % output_file, 'wb') as f:
        isolate_common.pretty_print(value, f)
    return 0
Example #8
0
 def _test(self, value, expected):
     actual = cStringIO.StringIO()
     isolate_common.pretty_print(value, actual)
     self.assertEquals(expected, actual.getvalue())
Example #9
0
def trace_test_cases(executable, root_dir, cwd_dir, variables, test_cases, jobs, output_file):
    """Traces test cases one by one."""
    if not test_cases:
        return 0

    # Resolve any symlink.
    root_dir = os.path.realpath(root_dir)
    full_cwd_dir = os.path.join(root_dir, cwd_dir)
    logname = output_file + ".logs"

    progress = worker_pool.Progress(len(test_cases))
    with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
        api = trace_inputs.get_api()
        api.clean_trace(logname)
        with api.get_tracer(logname) as tracer:
            function = Tracer(tracer, executable, full_cwd_dir, progress).map
            for test_case in test_cases:
                pool.add_task(function, test_case)

            values = pool.join(progress, 0.1)

    print ""
    print "%.1fs Done post-processing logs. Parsing logs." % (time.time() - progress.start)
    results = api.parse_log(logname, isolate_common.default_blacklist)
    print "%.1fs Done parsing logs." % (time.time() - progress.start)

    # Strips to root_dir.
    results_processed = {}
    for item in results:
        if "results" in item:
            item = item.copy()
            item["results"] = item["results"].strip_root(root_dir)
            results_processed[item["trace"]] = item
        else:
            print >> sys.stderr, "Got exception while tracing %s: %s" % (item["trace"], item["exception"])
    print "%.1fs Done stripping root." % (time.time() - progress.start)

    # Flatten.
    flattened = {}
    for item_list in values:
        for item in item_list:
            if item["valid"]:
                test_case = item["test_case"]
                tracename = test_case.replace("/", "-")
                flattened[test_case] = results_processed[tracename].copy()
                item_results = flattened[test_case]["results"]
                flattened[test_case].update(
                    {
                        "processes": len(list(item_results.process.all)),
                        "results": item_results.flatten(),
                        "duration": item["duration"],
                        "returncode": item["returncode"],
                        "valid": item["valid"],
                        "variables": isolate_common.generate_simplified(
                            item_results.existent, root_dir, variables, cwd_dir
                        ),
                    }
                )
                del flattened[test_case]["trace"]
    print "%.1fs Done flattening." % (time.time() - progress.start)

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(output_file, flattened, False)

    # Also write the .isolate file.
    # First, get all the files from all results. Use a map to remove dupes.
    files = {}
    for item in results_processed.itervalues():
        files.update((f.full_path, f) for f in item["results"].existent)
    # Convert back to a list, discard the keys.
    files = files.values()

    value = isolate_common.generate_isolate(files, root_dir, variables, cwd_dir)
    with open("%s.isolate" % output_file, "wb") as f:
        isolate_common.pretty_print(value, f)
    return 0
Example #10
0
 def _test(self, value, expected):
   actual = cStringIO.StringIO()
   isolate_common.pretty_print(value, actual)
   self.assertEquals(expected, actual.getvalue())