Пример #1
0
    def test_trace_tricky_filename(self):
        # TODO(maruel):  On Windows, it's using the current code page so some
        # characters can't be represented. As a nice North American, hard code the
        # string to something representable in code page 1252. The exact code page
        # depends on the user system.
        if sys.platform == 'win32':
            filename = u'foo, bar,  ~p#o,,ué^t%t .txt'
        else:
            filename = u'foo, bar,  ~p#o,,ué^t%t 和平.txt'

        exe = os.path.join(self.tempdir, 'tricky_filename.py')
        shutil.copyfile(
            os.path.join(self.cwd, 'trace_inputs', 'tricky_filename.py'), exe)
        expected = {
            'root': {
                'children': [],
                'command': [
                    self.executable,
                    exe,
                ],
                'executable':
                self.real_executable,
                'files': [
                    {
                        'mode': MODE_W,
                        'path': filename,
                        'size': long(len('Bingo!')),
                    },
                    {
                        'mode': MODE_R,
                        'path': u'tricky_filename.py',
                        'size': self._size(REL_DATA, 'tricky_filename.py'),
                    },
                ],
                'initial_cwd':
                self.tempdir if sys.platform != 'win32' else None,
            },
        }

        api = trace_inputs.get_api()
        returncode, output = trace_inputs.trace(self.log, [exe], self.tempdir,
                                                api, True)
        self.assertEqual('', output)
        self.assertEqual(0, returncode)
        data = api.parse_log(self.log, lambda _: False, None)
        self.assertEqual(1, len(data))
        if 'exception' in data[0]:
            raise data[0]['exception'][0], \
                data[0]['exception'][1], \
                data[0]['exception'][2]
        actual = data[0]['results'].strip_root(self.tempdir).flatten()
        self.assertTrue(actual['root'].pop('pid'))
        self.assertEqual(expected, actual)
        trace_inputs.get_api().clean_trace(self.log)
        files = sorted(
            unicodedata.normalize('NFC', i)
            for i in os.listdir(unicode(self.tempdir)))
        self.assertEqual([filename, 'tricky_filename.py'], files)
  def test_trace_tricky_filename(self):
    # TODO(maruel):  On Windows, it's using the current code page so some
    # characters can't be represented. As a nice North American, hard code the
    # string to something representable in code page 1252. The exact code page
    # depends on the user system.
    if sys.platform == 'win32':
      filename = u'foo, bar,  ~p#o,,ué^t%t .txt'
    else:
      filename = u'foo, bar,  ~p#o,,ué^t%t 和平.txt'

    exe = os.path.join(self.tempdir, 'tricky_filename.py')
    shutil.copyfile(
        os.path.join(self.cwd, 'trace_inputs', 'tricky_filename.py'), exe)
    expected = {
      'root': {
        'children': [],
        'command': [
          self.executable,
          exe,
        ],
        'executable': self.real_executable,
        'files': [
          {
            'mode': MODE_W,
            'path':  filename,
            'size': long(len('Bingo!')),
          },
          {
            'mode': MODE_R,
            'path': u'tricky_filename.py',
            'size': self._size(REL_DATA, 'tricky_filename.py'),
          },
        ],
        'initial_cwd': self.tempdir if sys.platform != 'win32' else None,
      },
    }

    api = trace_inputs.get_api()
    returncode, output = trace_inputs.trace(
        self.log, [exe], self.tempdir, api, True)
    self.assertEqual('', output)
    self.assertEqual(0, returncode)
    data = api.parse_log(self.log, lambda _: False, None)
    self.assertEqual(1, len(data))
    if 'exception' in data[0]:
      raise data[0]['exception'][0], \
          data[0]['exception'][1], \
          data[0]['exception'][2]
    actual = data[0]['results'].strip_root(self.tempdir).flatten()
    self.assertTrue(actual['root'].pop('pid'))
    self.assertEqual(expected, actual)
    trace_inputs.get_api().clean_trace(self.log)
    files = sorted(
        unicodedata.normalize('NFC', i)
        for i in os.listdir(unicode(self.tempdir)))
    self.assertEqual([filename, 'tricky_filename.py'], files)
Пример #3
0
def write_details(logname, outfile, root_dir, blacklist, results):
  """Writes an .test_cases file with all the information about each test
  case.
  """
  api = trace_inputs.get_api()
  logs = dict(
      (i.pop('trace'), i) for i in api.parse_log(logname, blacklist, None))
  results_processed = {}
  exception = None
  for items in results:
    item = items[-1]
    assert item['valid']
    # Load the results;
    log_dict = logs[item['tracename']]
    if log_dict.get('exception'):
      exception = exception or log_dict['exception']
      continue
    trace_result = log_dict['results']
    if root_dir:
      trace_result = trace_result.strip_root(root_dir)
    results_processed[item['test_case']] = {
      'trace': trace_result.flatten(),
      'duration': item['duration'],
      'output': item['output'],
      'returncode': item['returncode'],
    }

  # Make it dense if there is more than 20 results.
  tools.write_json(
      outfile,
      results_processed,
      len(results_processed) > 20)
  if exception:
    raise exception[0], exception[1], exception[2]
Пример #4
0
def read_trace(logname, root_dir, cwd_dir, product_dir):
  # Resolve any symlink
  root_dir = os.path.realpath(root_dir)
  api = trace_inputs.get_api()
  _, _, _, _, simplified = trace_inputs.load_trace(logname, root_dir, api)
  variables = trace_inputs.generate_dict(simplified, cwd_dir, product_dir)
  trace_inputs.pretty_print(variables, sys.stdout)
Пример #5
0
def CMDtrace(args):
  """Traces the target using trace_inputs.py.

  It runs the executable without remapping it, and traces all the files it and
  its child processes access. Then the 'read' command can be used to generate an
  updated .isolate file out of it.

  Argument processing stops at the first non-recognized argument and these
  arguments are appended to the command line of the target to run. For example,
  use: isolate.py -r foo.results -- --gtest_filter=Foo.Bar
  """
  parser = OptionParserIsolate(command='trace')
  parser.enable_interspersed_args()
  options, args = parser.parse_args(args)
  complete_state = load_complete_state(options, STATS_ONLY)
  cmd = complete_state.result.command + args
  if not cmd:
    raise ExecutionError('No command to run')
  cmd = trace_inputs.fix_python_path(cmd)
  cwd = os.path.normpath(os.path.join(
      complete_state.root_dir, complete_state.result.relative_cwd))
  logging.info('Running %s, cwd=%s' % (cmd, cwd))
  api = trace_inputs.get_api()
  logfile = complete_state.result_file + '.log'
  api.clean_trace(logfile)
  try:
    with api.get_tracer(logfile) as tracer:
      result, _ = tracer.trace(
          cmd,
          cwd,
          'default',
          True)
  except trace_inputs.TracingFailure, e:
    raise ExecutionError('Tracing failed for: %s\n%s' % (' '.join(cmd), str(e)))
Пример #6
0
def CMDtrace(args):
  """Traces the target using trace_inputs.py.

  It runs the executable without remapping it, and traces all the files it and
  its child processes access. Then the 'read' command can be used to generate an
  updated .isolate file out of it.
  """
  parser = OptionParserIsolate(command='trace')
  options, _ = parser.parse_args(args)
  complete_state = load_complete_state(options, STATS_ONLY)

  cwd = os.path.join(
      complete_state.root_dir, complete_state.result.relative_cwd)
  logging.info('Running %s, cwd=%s' % (complete_state.result.command, cwd))
  if not complete_state.result.command:
    raise ExecutionError('No command to run')
  api = trace_inputs.get_api()
  logfile = complete_state.result_file + '.log'
  try:
    with api.get_tracer(logfile) as tracer:
      result, _ = tracer.trace(
          complete_state.result.command,
          cwd,
          'default',
          True)
  except trace_inputs.TracingFailure, e:
    raise ExecutionError(
        'Tracing failed for: %s\n%s' %
          (' '.join(complete_state.result.command, str(e))))
Пример #7
0
def CMDread(args):
  """Reads the trace file generated with command 'trace'.

  Ignores --outdir.
  """
  parser = OptionParserIsolate(command='read', require_result=False)
  options, _ = parser.parse_args(args)
  complete_state = load_complete_state(options, NO_INFO)

  api = trace_inputs.get_api()
  logfile = complete_state.result_file + '.log'
  if not os.path.isfile(logfile):
    raise ExecutionError(
        'No log file \'%s\' to read, did you forget to \'trace\'?' % logfile)
  try:
    results = trace_inputs.load_trace(
        logfile, complete_state.root_dir, api, isolate_common.default_blacklist)
    value = isolate_common.generate_isolate(
        results.existent,
        complete_state.root_dir,
        complete_state.saved_state.variables,
        complete_state.result.relative_cwd)
    isolate_common.pretty_print(value, sys.stdout)
  except trace_inputs.TracingFailure, e:
    raise ExecutionError(
        'Reading traces failed for: %s\n%s' %
          (' '.join(complete_state.result.command), str(e)))
Пример #8
0
def trace_test_cases(cmd, cwd_dir, test_cases, jobs, logname):
    """Traces each test cases individually but all in parallel."""
    assert os.path.isabs(cwd_dir) and os.path.isdir(cwd_dir), cwd_dir

    if not test_cases:
        return []

    # Resolve any symlink.
    cwd_dir = os.path.realpath(cwd_dir)
    assert os.path.isdir(cwd_dir)

    api = trace_inputs.get_api()
    api.clean_trace(logname)

    jobs = jobs or multiprocessing.cpu_count()
    # Try to do black magic here by guessing a few of the run_test_cases.py
    # flags. It's cheezy but it works.
    for i, v in enumerate(cmd):
        if v.endswith("run_test_cases.py"):
            # Found it. Process the arguments here.
            _, options, _ = run_test_cases.process_args(cmd[i:])
            # Always override with the lowest value.
            jobs = min(options.jobs, jobs)
            break

    progress = run_test_cases.Progress(len(test_cases))
    with run_test_cases.ThreadPool(progress, jobs, jobs, len(test_cases)) as pool:
        with api.get_tracer(logname) as tracer:
            function = Tracer(tracer, cmd, cwd_dir, progress).map
            for test_case in test_cases:
                pool.add_task(0, function, test_case)

            results = pool.join()
    print("")
    return results
Пример #9
0
def write_details(logname, outfile, root_dir, blacklist, results):
    """Writes an .test_cases file with all the information about each test
  case.
  """
    api = trace_inputs.get_api()
    logs = dict((i.pop("trace"), i) for i in api.parse_log(logname, blacklist, None))
    results_processed = {}
    exception = None
    for items in results:
        item = items[-1]
        assert item["valid"]
        # Load the results;
        log_dict = logs[item["tracename"]]
        if log_dict.get("exception"):
            exception = exception or log_dict["exception"]
            continue
        trace_result = log_dict["results"]
        if root_dir:
            trace_result = trace_result.strip_root(root_dir)
        results_processed[item["test_case"]] = {
            "trace": trace_result.flatten(),
            "duration": item["duration"],
            "output": item["output"],
            "returncode": item["returncode"],
        }

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(outfile, results_processed, len(results_processed) > 20)
    if exception:
        raise exception[0], exception[1], exception[2]
Пример #10
0
def isolate_test_cases(
    cmd, test_cases, jobs, isolated_file, isolate_file,
    root_dir, reldir, variables):
  assert os.path.isabs(root_dir) and os.path.isdir(root_dir), root_dir

  logname = isolated_file + '.log'
  basename = isolated_file.rsplit('.', 1)[0]
  cwd_dir = os.path.join(root_dir, reldir)
  # Do the actual tracing.
  results = trace_test_cases.trace_test_cases(
      cmd, cwd_dir, test_cases, jobs, logname)

  api = trace_inputs.get_api()
  logs = dict(
      (i.pop('trace'), i)
      for i in api.parse_log(logname, isolate.chromium_default_blacklist, None))
  exception = None
  try:
    inputs = []
    for items in results:
      item = items[-1]
      assert item['valid']
      # Load the results;
      log_dict = logs[item['tracename']]
      if log_dict.get('exception'):
        exception = exception or log_dict['exception']
        logging.error('Got exception: %s', exception)
        continue
      files = log_dict['results'].strip_root(root_dir).files
      tracked, touched = isolate.split_touched(files)
      value = isolate.generate_isolate(
          tracked,
          [],
          touched,
          root_dir,
          variables,
          reldir)
      # item['test_case'] could be an invalid file name.
      out = basename + '.' + item['tracename'] + '.isolate'
      with open(out, 'w') as f:
        isolate.pretty_print(value, f)
      inputs.append(out)

    # Merges back. Note that it is possible to blow up the command line
    # argument length here but writing the files is still useful. Convert to
    # importing the module instead if necessary.
    merge_cmd = [
      sys.executable,
      os.path.join(BASE_DIR, 'isolate_merge.py'),
      isolate_file,
      '-o', isolate_file,
    ]
    merge_cmd.extend(inputs)
    logging.info(merge_cmd)
    proc = subprocess.Popen(merge_cmd)
    proc.communicate()
    return proc.returncode
  finally:
    if exception:
      raise exception[0], exception[1], exception[2]
Пример #11
0
def read_trace(logname, root_dir, cwd_dir, product_dir):
    # Resolve any symlink
    root_dir = os.path.realpath(root_dir)
    api = trace_inputs.get_api()
    _, _, _, _, simplified = trace_inputs.load_trace(logname, root_dir, api)
    variables = trace_inputs.generate_dict(simplified, cwd_dir, product_dir)
    trace_inputs.pretty_print(variables, sys.stdout)
Пример #12
0
def isolate_test_cases(cmd, test_cases, jobs, isolated_file, isolate_file,
                       root_dir, reldir, path_variables, config_variables,
                       extra_variables, trace_blacklist):
    assert os.path.isabs(root_dir) and os.path.isdir(root_dir), root_dir

    logname = isolated_file + '.log'
    basename = isolated_file.rsplit('.', 1)[0]
    cwd_dir = os.path.join(root_dir, reldir)
    # Do the actual tracing.
    results = trace_test_cases.trace_test_cases(cmd, cwd_dir, test_cases, jobs,
                                                logname)
    api = trace_inputs.get_api()
    blacklist = tools.gen_blacklist(trace_blacklist)
    logs = dict(
        (i.pop('trace'), i) for i in api.parse_log(logname, blacklist, None))
    exception = None
    try:
        inputs = []
        for items in results:
            item = items[-1]
            assert item['valid']
            # Load the results;
            log_dict = logs[item['tracename']]
            if log_dict.get('exception'):
                exception = exception or log_dict['exception']
                logging.error('Got exception: %s', exception)
                continue
            files = log_dict['results'].strip_root(root_dir).files
            tracked, touched = isolate.isolate_format.split_touched(files)
            value = isolate.generate_isolate(tracked, [], touched, root_dir,
                                             path_variables, config_variables,
                                             extra_variables, reldir,
                                             blacklist)
            # item['test_case'] could be an invalid file name.
            out = basename + '.' + item['tracename'] + '.isolate'
            with open(out, 'w') as f:
                isolate.isolate_format.pretty_print(value, f)
            inputs.append(out)

        # Merges back. Note that it is possible to blow up the command line
        # argument length here but writing the files is still useful. Convert to
        # importing the module instead if necessary.
        merge_cmd = [
            sys.executable,
            os.path.join(ROOT_DIR, 'isolate_merge.py'),
            isolate_file,
            '-o',
            isolate_file,
        ]
        merge_cmd.extend(inputs)
        logging.info(merge_cmd)
        proc = subprocess.Popen(merge_cmd)
        proc.communicate()
        return proc.returncode
    finally:
        if exception:
            raise exception[0], exception[1], exception[2]
Пример #13
0
def trace_test_case(test_case, executable, root_dir, cwd_dir, product_dir,
                    leak):
    """Traces a single test case and returns the .isolate compatible variable
  dict.
  """
    # Resolve any symlink
    root_dir = os.path.realpath(root_dir)

    api = trace_inputs.get_api()
    cmd = [executable, '--gtest_filter=%s' % test_case]

    if not leak:
        f, logname = tempfile.mkstemp(prefix='trace')
        os.close(f)
    else:
        logname = '%s.%s.log' % (executable, test_case.replace('/', '-'))
        f = None

    try:
        simplified = None
        processes = 0
        for i in range(10):
            start = time.time()
            returncode, output = trace_inputs.trace(
                logname, cmd, os.path.join(root_dir, cwd_dir), api, True)
            if returncode and i < 5:
                print '\nFailed while running: %s' % ' '.join(cmd)
                continue
            duration = time.time() - start
            try:
                results, simplified = trace_inputs.load_trace(
                    logname, root_dir, api)
                break
            except Exception:
                print '\nFailed loading the trace for: %s' % ' '.join(cmd)
        if simplified:
            variables = trace_inputs.generate_dict(simplified, cwd_dir,
                                                   product_dir)
        else:
            variables = {}
        return {
            'case': test_case,
            'duration': duration,
            'output': output,
            'processes': processes,
            'result': returncode,
            'variables': variables,
            'results': results.flatten(),
        }
    finally:
        if f:
            os.remove(logname)
Пример #14
0
def trace_test_case(
    test_case, executable, root_dir, cwd_dir, product_dir, leak):
  """Traces a single test case and returns the .isolate compatible variable
  dict.
  """
  # Resolve any symlink
  root_dir = os.path.realpath(root_dir)

  api = trace_inputs.get_api()
  cmd = [executable, '--gtest_filter=%s' % test_case]

  if not leak:
    f, logname = tempfile.mkstemp(prefix='trace')
    os.close(f)
  else:
    logname = '%s.%s.log' % (executable, test_case.replace('/', '-'))
    f = None

  try:
    simplified = None
    processes = 0
    for i in range(10):
      start = time.time()
      returncode, output = trace_inputs.trace(
          logname, cmd, os.path.join(root_dir, cwd_dir), api, True)
      if returncode and i < 5:
        print '\nFailed while running: %s' % ' '.join(cmd)
        continue
      duration = time.time() - start
      try:
        results, simplified = trace_inputs.load_trace(logname, root_dir, api)
        break
      except Exception:
        print '\nFailed loading the trace for: %s' % ' '.join(cmd)
    if simplified:
      variables = trace_inputs.generate_dict(simplified, cwd_dir, product_dir)
    else:
      variables = {}
    return {
      'case': test_case,
      'duration': duration,
      'output': output,
      'processes': processes,
      'result': returncode,
      'variables': variables,
      'results': results.flatten(),
    }
  finally:
    if f:
      os.remove(logname)
Пример #15
0
  def _execute_trace(self, command):
    # Similar to what trace_test_cases.py does.
    api = trace_inputs.get_api()
    _, _ = trace_inputs.trace(self.log, command, self.cwd, api, True)
    # TODO(maruel): Check
    #self.assertEqual(0, returncode)
    #self.assertEqual('', output)
    def blacklist(f):
      return f.endswith(('.pyc', '.svn', 'do_not_care.txt'))
    data = api.parse_log(self.log, blacklist, None)
    self.assertEqual(1, len(data))
    if 'exception' in data[0]:
      raise data[0]['exception'][0], \
          data[0]['exception'][1], \
          data[0]['exception'][2]

    return data[0]['results'].strip_root(unicode(ROOT_DIR))
Пример #16
0
  def _execute_trace(self, command):
    # Similar to what trace_test_cases.py does.
    api = trace_inputs.get_api()
    _, _ = trace_inputs.trace(self.log, command, self.cwd, api, True)
    # TODO(maruel): Check
    #self.assertEqual(0, returncode)
    #self.assertEqual('', output)
    def blacklist(f):
      return f.endswith(('.pyc', '.svn', 'do_not_care.txt'))
    data = api.parse_log(self.log, blacklist, None)
    self.assertEqual(1, len(data))
    if 'exception' in data[0]:
      raise data[0]['exception'][0], \
          data[0]['exception'][1], \
          data[0]['exception'][2]

    return data[0]['results'].strip_root(unicode(ROOT_DIR))
Пример #17
0
def MODEread(_outdir, state):
    """Reads the trace file generated with --mode=trace."""
    api = trace_inputs.get_api()
    logfile = state.result_file + '.log'
    if not os.path.isfile(logfile):
        return 1
    try:
        results = trace_inputs.load_trace(logfile, state.root_dir, api,
                                          isolate_common.default_blacklist)
        value = isolate_common.generate_isolate(results.existent,
                                                state.root_dir,
                                                state.saved_state.variables,
                                                state.result.relative_cwd)
        isolate_common.pretty_print(value, sys.stdout)
        return 0
    except trace_inputs.TracingFailure, e:
        print >> sys.stderr, ('\nReading traces failed for: %s' %
                              ' '.join(state.result.command))
        print >> sys.stderr, str(e)
        return 1
Пример #18
0
def read(complete_state):
  """Reads a trace and returns the .isolate dictionary."""
  api = trace_inputs.get_api()
  logfile = complete_state.result_file + '.log'
  if not os.path.isfile(logfile):
    raise ExecutionError(
        'No log file \'%s\' to read, did you forget to \'trace\'?' % logfile)
  try:
    results = trace_inputs.load_trace(
        logfile, complete_state.root_dir, api, isolate_common.default_blacklist)
    tracked, touched = isolate_common.split_touched(results.existent)
    value = isolate_common.generate_isolate(
        tracked,
        [],
        touched,
        complete_state.root_dir,
        complete_state.saved_state.variables,
        complete_state.result.relative_cwd)
    return value
  except trace_inputs.TracingFailure, e:
    raise ExecutionError(
        'Reading traces failed for: %s\n%s' %
          (' '.join(complete_state.result.command), str(e)))
Пример #19
0
def MODEtrace(_outdir, state):
    """Traces the target using trace_inputs.py."""
    logging.info('Running %s, cwd=%s' %
                 (state.result.command,
                  os.path.join(state.root_dir, state.result.relative_cwd)))
    if not state.result.command:
        print 'No command to run'
        return 1
    api = trace_inputs.get_api()
    logfile = state.result_file + '.log'
    try:
        result = 0
        with api.get_tracer(logfile) as tracer:
            result, _ = tracer.trace(
                state.result.command,
                os.path.join(state.root_dir, state.result.relative_cwd),
                'default', True)
        return result
    except trace_inputs.TracingFailure, e:
        print >> sys.stderr, ('\nTracing failed for: %s' %
                              ' '.join(state.result.command))
        print >> sys.stderr, str(e)
        return 1
Пример #20
0
def trace_test_cases(cmd, cwd_dir, test_cases, jobs, logname):
  """Traces test cases one by one."""
  assert os.path.isabs(cwd_dir) and os.path.isdir(cwd_dir)

  if not test_cases:
    return 0

  # Resolve any symlink.
  cwd_dir = os.path.realpath(cwd_dir)
  assert os.path.isdir(cwd_dir)

  progress = run_test_cases.Progress(len(test_cases))
  with run_test_cases.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
    api = trace_inputs.get_api()
    api.clean_trace(logname)
    with api.get_tracer(logname) as tracer:
      function = Tracer(tracer, cmd, cwd_dir, progress).map
      for test_case in test_cases:
        pool.add_task(function, test_case)

      pool.join(progress, 0.1)
  print('')
  return 0
Пример #21
0
def trace_test_cases(cmd, cwd_dir, test_cases, jobs, logname):
  """Traces each test cases individually but all in parallel."""
  assert os.path.isabs(cwd_dir) and os.path.isdir(cwd_dir), cwd_dir

  if not test_cases:
    return []

  # Resolve any symlink.
  cwd_dir = os.path.realpath(cwd_dir)
  assert os.path.isdir(cwd_dir)

  api = trace_inputs.get_api()
  api.clean_trace(logname)

  jobs = jobs or multiprocessing.cpu_count()
  # Try to do black magic here by guessing a few of the run_test_cases.py
  # flags. It's cheezy but it works.
  for i, v in enumerate(cmd):
    if v.endswith('run_test_cases.py'):
      # Found it. Process the arguments here.
      _, options, _ = run_test_cases.process_args(cmd[i:])
      # Always override with the lowest value.
      jobs = min(options.jobs, jobs)
      break

  columns = [('index', 0), ('size', len(test_cases))]
  progress = threading_utils.Progress(columns)
  with threading_utils.ThreadPoolWithProgress(
      progress, jobs, jobs, len(test_cases)) as pool:
    with api.get_tracer(logname) as tracer:
      function = Tracer(tracer, cmd, cwd_dir, progress).map
      for test_case in test_cases:
        pool.add_task(0, function, test_case)

      results = pool.join()
  print('')
  return results
Пример #22
0
def trace_test_cases(cmd, cwd_dir, test_cases, jobs, logname):
  """Traces each test cases individually but all in parallel."""
  assert os.path.isabs(cwd_dir) and os.path.isdir(cwd_dir), cwd_dir

  if not test_cases:
    return []

  # Resolve any symlink.
  cwd_dir = os.path.realpath(cwd_dir)
  assert os.path.isdir(cwd_dir)

  api = trace_inputs.get_api()
  api.clean_trace(logname)

  threads = jobs or multiprocessing.cpu_count()
  with run_test_cases.ThreadPool(threads, len(test_cases)) as pool:
    with api.get_tracer(logname) as tracer:
      function = Tracer(tracer, cmd, cwd_dir, pool.tasks.progress).map
      for test_case in test_cases:
        pool.add_task(function, test_case)

      results = pool.join()
  print('')
  return results
Пример #23
0
    def test_trace_multiple(self):
        # Starts parallel threads and trace parallel child processes simultaneously.
        # Some are started from 'tests' directory, others from this script's
        # directory. One trace fails. Verify everything still goes one.
        parallel = 8

        def trace(tracer, cmd, cwd, tracename):
            resultcode, output = tracer.trace(cmd, cwd, tracename, True)
            return (tracename, resultcode, output)

        with threading_utils.ThreadPool(parallel, parallel, 0) as pool:
            api = trace_inputs.get_api()
            with api.get_tracer(self.log) as tracer:
                pool.add_task(0, trace, tracer, self.get_child_command(False),
                              ROOT_DIR, 'trace1')
                pool.add_task(0, trace, tracer, self.get_child_command(True),
                              self.cwd, 'trace2')
                pool.add_task(0, trace, tracer, self.get_child_command(False),
                              ROOT_DIR, 'trace3')
                pool.add_task(0, trace, tracer, self.get_child_command(True),
                              self.cwd, 'trace4')
                # Have this one fail since it's started from the wrong directory.
                pool.add_task(0, trace, tracer, self.get_child_command(False),
                              self.cwd, 'trace5')
                pool.add_task(0, trace, tracer, self.get_child_command(True),
                              self.cwd, 'trace6')
                pool.add_task(0, trace, tracer, self.get_child_command(False),
                              ROOT_DIR, 'trace7')
                pool.add_task(0, trace, tracer, self.get_child_command(True),
                              self.cwd, 'trace8')
                trace_results = pool.join()

        def blacklist(f):
            return f.endswith(('.pyc', 'do_not_care.txt', '.git', '.svn'))

        actual_results = api.parse_log(self.log, blacklist, None)
        self.assertEqual(8, len(trace_results))
        self.assertEqual(8, len(actual_results))

        # Convert to dict keyed on the trace name, simpler to verify.
        trace_results = dict((i[0], i[1:]) for i in trace_results)
        actual_results = dict((x.pop('trace'), x) for x in actual_results)
        self.assertEqual(sorted(trace_results), sorted(actual_results))

        # It'd be nice to start different kinds of processes.
        expected_results = [
            self._gen_dict_full(),
            self._gen_dict_full_gyp(),
            self._gen_dict_full(),
            self._gen_dict_full_gyp(),
            self._gen_dict_wrong_path(),
            self._gen_dict_full_gyp(),
            self._gen_dict_full(),
            self._gen_dict_full_gyp(),
        ]
        self.assertEqual(len(expected_results), len(trace_results))

        # See the comment above about the trace that fails because it's started from
        # the wrong directory.
        busted = 4
        for index, key in enumerate(sorted(actual_results)):
            self.assertEqual('trace%d' % (index + 1), key)
            self.assertEqual(2, len(trace_results[key]))
            # returncode
            self.assertEqual(0 if index != busted else 2,
                             trace_results[key][0])
            # output
            self.assertEqual(actual_results[key]['output'],
                             trace_results[key][1])

            self.assertEqual(['output', 'results'],
                             sorted(actual_results[key]))
            results = actual_results[key]['results']
            results = results.strip_root(unicode(ROOT_DIR))
            actual = results.flatten()
            self.assertTrue(actual['root'].pop('pid'))
            if index != busted:
                self.assertTrue(actual['root']['children'][0].pop('pid'))
            self.assertEqual(expected_results[index], actual)
Пример #24
0
def trace_test_cases(
    executable, root_dir, cwd_dir, variables, test_cases, jobs, output_file):
  """Traces test cases one by one."""
  assert not os.path.isabs(cwd_dir)
  assert os.path.isabs(root_dir) and os.path.isdir(root_dir)
  assert os.path.isfile(executable) and os.path.isabs(executable)

  if not test_cases:
    return 0

  # Resolve any symlink.
  root_dir = os.path.realpath(root_dir)
  full_cwd_dir = os.path.normpath(os.path.join(root_dir, cwd_dir))
  assert os.path.isdir(full_cwd_dir)
  logname = output_file + '.logs'

  progress = worker_pool.Progress(len(test_cases))
  with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
    api = trace_inputs.get_api()
    api.clean_trace(logname)
    with api.get_tracer(logname) as tracer:
      function = Tracer(tracer, executable, full_cwd_dir, progress).map
      for test_case in test_cases:
        pool.add_task(function, test_case)

      values = pool.join(progress, 0.1)

  print ''
  print '%.1fs Done post-processing logs. Parsing logs.' % (
      time.time() - progress.start)
  results = api.parse_log(logname, isolate_common.default_blacklist)
  print '%.1fs Done parsing logs.' % (
      time.time() - progress.start)

  # Strips to root_dir.
  results_processed = {}
  for item in results:
    if 'results' in item:
      item = item.copy()
      item['results'] = item['results'].strip_root(root_dir)
      results_processed[item['trace']] = item
    else:
      print >> sys.stderr, 'Got exception while tracing %s: %s' % (
          item['trace'], item['exception'])
  print '%.1fs Done stripping root.' % (
      time.time() - progress.start)

  # Flatten.
  flattened = {}
  for item_list in values:
    for item in item_list:
      if item['valid']:
        test_case = item['test_case']
        tracename = test_case.replace('/', '-')
        flattened[test_case] = results_processed[tracename].copy()
        item_results = flattened[test_case]['results']
        tracked, touched = isolate_common.split_touched(item_results.existent)
        flattened[test_case].update({
            'processes': len(list(item_results.process.all)),
            'results': item_results.flatten(),
            'duration': item['duration'],
            'returncode': item['returncode'],
            'valid': item['valid'],
            'variables':
              isolate_common.generate_simplified(
                  tracked,
                  [],
                  touched,
                  root_dir,
                  variables,
                  cwd_dir),
          })
        del flattened[test_case]['trace']
  print '%.1fs Done flattening.' % (
      time.time() - progress.start)

  # Make it dense if there is more than 20 results.
  trace_inputs.write_json(
      output_file,
      flattened,
      False)

  # Also write the .isolate file.
  # First, get all the files from all results. Use a map to remove dupes.
  files = {}
  for item in results_processed.itervalues():
    files.update((f.full_path, f) for f in item['results'].existent)
  # Convert back to a list, discard the keys.
  files = files.values()
  tracked, touched = isolate_common.split_touched(files)
  value = isolate_common.generate_isolate(
      tracked,
      [],
      touched,
      root_dir,
      variables,
      cwd_dir)
  with open('%s.isolate' % output_file, 'wb') as f:
    isolate_common.pretty_print(value, f)
  return 0
Пример #25
0
  def test_trace_multiple(self):
    # Starts parallel threads and trace parallel child processes simultaneously.
    # Some are started from 'tests' directory, others from this script's
    # directory. One trace fails. Verify everything still goes one.
    parallel = 8

    def trace(tracer, cmd, cwd, tracename):
      resultcode, output = tracer.trace(cmd, cwd, tracename, True)
      return (tracename, resultcode, output)

    with threading_utils.ThreadPool(parallel, parallel, 0) as pool:
      api = trace_inputs.get_api()
      with api.get_tracer(self.log) as tracer:
        pool.add_task(
            0, trace, tracer, self.get_child_command(False), ROOT_DIR, 'trace1')
        pool.add_task(
            0, trace, tracer, self.get_child_command(True), self.cwd, 'trace2')
        pool.add_task(
            0, trace, tracer, self.get_child_command(False), ROOT_DIR, 'trace3')
        pool.add_task(
            0, trace, tracer, self.get_child_command(True), self.cwd, 'trace4')
        # Have this one fail since it's started from the wrong directory.
        pool.add_task(
            0, trace, tracer, self.get_child_command(False), self.cwd, 'trace5')
        pool.add_task(
            0, trace, tracer, self.get_child_command(True), self.cwd, 'trace6')
        pool.add_task(
            0, trace, tracer, self.get_child_command(False), ROOT_DIR, 'trace7')
        pool.add_task(
            0, trace, tracer, self.get_child_command(True), self.cwd, 'trace8')
        trace_results = pool.join()
    def blacklist(f):
      return f.endswith(('.pyc', 'do_not_care.txt', '.git', '.svn'))
    actual_results = api.parse_log(self.log, blacklist, None)
    self.assertEqual(8, len(trace_results))
    self.assertEqual(8, len(actual_results))

    # Convert to dict keyed on the trace name, simpler to verify.
    trace_results = dict((i[0], i[1:]) for i in trace_results)
    actual_results = dict((x.pop('trace'), x) for x in actual_results)
    self.assertEqual(sorted(trace_results), sorted(actual_results))

    # It'd be nice to start different kinds of processes.
    expected_results = [
      self._gen_dict_full(),
      self._gen_dict_full_gyp(),
      self._gen_dict_full(),
      self._gen_dict_full_gyp(),
      self._gen_dict_wrong_path(),
      self._gen_dict_full_gyp(),
      self._gen_dict_full(),
      self._gen_dict_full_gyp(),
    ]
    self.assertEqual(len(expected_results), len(trace_results))

    # See the comment above about the trace that fails because it's started from
    # the wrong directory.
    busted = 4
    for index, key in enumerate(sorted(actual_results)):
      self.assertEqual('trace%d' % (index + 1), key)
      self.assertEqual(2, len(trace_results[key]))
      # returncode
      self.assertEqual(0 if index != busted else 2, trace_results[key][0])
      # output
      self.assertEqual(actual_results[key]['output'], trace_results[key][1])

      self.assertEqual(['output', 'results'], sorted(actual_results[key]))
      results = actual_results[key]['results']
      results = results.strip_root(unicode(ROOT_DIR))
      actual = results.flatten()
      self.assertTrue(actual['root'].pop('pid'))
      if index != busted:
        self.assertTrue(actual['root']['children'][0].pop('pid'))
      self.assertEqual(expected_results[index], actual)
Пример #26
0
def trace_test_cases(executable, root_dir, cwd_dir, variables, whitelist,
                     blacklist, jobs, index, shards, output_file):
    """Traces test cases one by one."""
    test_cases = get_test_cases(executable, whitelist, blacklist, index,
                                shards)
    if not test_cases:
        return

    # Resolve any symlink.
    root_dir = os.path.realpath(root_dir)
    full_cwd_dir = os.path.join(root_dir, cwd_dir)
    logname = output_file + '.logs'

    progress = worker_pool.Progress(len(test_cases))
    with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
        api = trace_inputs.get_api()
        api.clean_trace(logname)
        with api.get_tracer(logname) as tracer:
            function = Tracer(tracer, executable, full_cwd_dir, progress).map
            for test_case in test_cases:
                pool.add_task(function, test_case)

            values = pool.join(progress, 0.1)

    print ''
    print '%.1fs Done post-processing logs. Parsing logs.' % (time.time() -
                                                              progress.start)
    results = api.parse_log(logname, isolate_common.default_blacklist)
    print '%.1fs Done parsing logs.' % (time.time() - progress.start)

    # Strips to root_dir.
    results_processed = {}
    for item in results:
        if 'results' in item:
            item = item.copy()
            item['results'] = item['results'].strip_root(root_dir)
            results_processed[item['trace']] = item
        else:
            print >> sys.stderr, 'Got exception while tracing %s: %s' % (
                item['trace'], item['exception'])
    print '%.1fs Done stripping root.' % (time.time() - progress.start)

    # Flatten.
    flattened = {}
    for item_list in values:
        for item in item_list:
            if item['valid']:
                test_case = item['test_case']
                tracename = test_case.replace('/', '-')
                flattened[test_case] = results_processed[tracename].copy()
                item_results = flattened[test_case]['results']
                flattened[test_case].update({
                    'processes':
                    len(list(item_results.process.all)),
                    'results':
                    item_results.flatten(),
                    'duration':
                    item['duration'],
                    'returncode':
                    item['returncode'],
                    'valid':
                    item['valid'],
                    'variables':
                    isolate_common.generate_simplified(item_results.existent,
                                                       root_dir, variables,
                                                       cwd_dir),
                })
                del flattened[test_case]['trace']
    print '%.1fs Done flattening.' % (time.time() - progress.start)

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(output_file, flattened, False)

    # Also write the .isolate file.
    # First, get all the files from all results. Use a map to remove dupes.
    files = {}
    for item in results_processed.itervalues():
        files.update((f.full_path, f) for f in item['results'].existent)
    # Convert back to a list, discard the keys.
    files = files.values()

    value = isolate_common.generate_isolate(files, root_dir, variables,
                                            cwd_dir)
    with open('%s.isolate' % output_file, 'wb') as f:
        isolate_common.pretty_print(value, f)
    return 0
Пример #27
0
def trace_test_cases(executable, root_dir, cwd_dir, variables, test_cases, jobs, output_file):
    """Traces test cases one by one."""
    if not test_cases:
        return 0

    # Resolve any symlink.
    root_dir = os.path.realpath(root_dir)
    full_cwd_dir = os.path.join(root_dir, cwd_dir)
    logname = output_file + ".logs"

    progress = worker_pool.Progress(len(test_cases))
    with worker_pool.ThreadPool(jobs or multiprocessing.cpu_count()) as pool:
        api = trace_inputs.get_api()
        api.clean_trace(logname)
        with api.get_tracer(logname) as tracer:
            function = Tracer(tracer, executable, full_cwd_dir, progress).map
            for test_case in test_cases:
                pool.add_task(function, test_case)

            values = pool.join(progress, 0.1)

    print ""
    print "%.1fs Done post-processing logs. Parsing logs." % (time.time() - progress.start)
    results = api.parse_log(logname, isolate_common.default_blacklist)
    print "%.1fs Done parsing logs." % (time.time() - progress.start)

    # Strips to root_dir.
    results_processed = {}
    for item in results:
        if "results" in item:
            item = item.copy()
            item["results"] = item["results"].strip_root(root_dir)
            results_processed[item["trace"]] = item
        else:
            print >> sys.stderr, "Got exception while tracing %s: %s" % (item["trace"], item["exception"])
    print "%.1fs Done stripping root." % (time.time() - progress.start)

    # Flatten.
    flattened = {}
    for item_list in values:
        for item in item_list:
            if item["valid"]:
                test_case = item["test_case"]
                tracename = test_case.replace("/", "-")
                flattened[test_case] = results_processed[tracename].copy()
                item_results = flattened[test_case]["results"]
                flattened[test_case].update(
                    {
                        "processes": len(list(item_results.process.all)),
                        "results": item_results.flatten(),
                        "duration": item["duration"],
                        "returncode": item["returncode"],
                        "valid": item["valid"],
                        "variables": isolate_common.generate_simplified(
                            item_results.existent, root_dir, variables, cwd_dir
                        ),
                    }
                )
                del flattened[test_case]["trace"]
    print "%.1fs Done flattening." % (time.time() - progress.start)

    # Make it dense if there is more than 20 results.
    trace_inputs.write_json(output_file, flattened, False)

    # Also write the .isolate file.
    # First, get all the files from all results. Use a map to remove dupes.
    files = {}
    for item in results_processed.itervalues():
        files.update((f.full_path, f) for f in item["results"].existent)
    # Convert back to a list, discard the keys.
    files = files.values()

    value = isolate_common.generate_isolate(files, root_dir, variables, cwd_dir)
    with open("%s.isolate" % output_file, "wb") as f:
        isolate_common.pretty_print(value, f)
    return 0