示例#1
0
文件: util.py 项目: axitkhurana/stem
def python3_run_tests():
  println()
  println()

  python3_runner = os.path.join(get_python3_destination(), "run_tests.py")
  exit_status = os.system("python3 %s %s" % (python3_runner, " ".join(sys.argv[1:])))
  sys.exit(exit_status)
示例#2
0
文件: util.py 项目: axitkhurana/stem
  def run(self):
    println("  %s..." % self.label, STATUS, NO_NL)

    padding = 50 - len(self.label)
    println(" " * padding, NO_NL)

    try:
      if self.args:
        result = self.runner(*self.args)
      else:
        result = self.runner()

      output_msg = "done"

      if isinstance(result, str):
        output_msg = result

      println(output_msg, STATUS)

      if isinstance(result, (list, tuple)):
        for line in result:
          println("    %s" % line, STATUS)
    except Exception as exc:
      output_msg = str(exc)

      if not output_msg or self.is_required:
        output_msg = "failed"

      println(output_msg, ERROR)
      self.error = exc
示例#3
0
  def run(self):
    println('  %s...' % self.label, STATUS, NO_NL)

    padding = 50 - len(self.label)
    println(' ' * padding, NO_NL)

    try:
      if self.args:
        self.result = self.runner(*self.args)
      else:
        self.result = self.runner()

      self.is_successful = True
      output_msg = 'done'

      if self.print_result and isinstance(self.result, str):
        output_msg = self.result

      println(output_msg, STATUS)

      if self.print_result and isinstance(self.result, (list, tuple)):
        for line in self.result:
          println('    %s' % line, STATUS)
    except Exception as exc:
      output_msg = str(exc)

      if not output_msg or self.is_required:
        output_msg = 'failed'

      println(output_msg, ERROR)
      self.error = exc
示例#4
0
文件: runner.py 项目: sree-dev/stem
  def _start_tor(self, tor_cmd):
    """
    Initializes a tor process. This blocks until initialization completes or we
    error out.

    :param str tor_cmd: command to start tor with

    :raises: OSError if we either fail to create the tor process or reached a timeout without success
    """

    println("Starting tor...\n", STATUS)
    start_time = time.time()

    try:
      # wait to fully complete if we're running tests with network activity,
      # otherwise finish after local bootstraping
      complete_percent = 100 if Target.ONLINE in self.attribute_targets else 5

      # prints output from tor's stdout while it starts up
      print_init_line = lambda line: println("  %s" % line, SUBSTATUS)

      torrc_dst = os.path.join(self._test_dir, "torrc")
      self._tor_process = stem.process.launch_tor(tor_cmd, None, torrc_dst, complete_percent, print_init_line)

      runtime = time.time() - start_time
      println("  done (%i seconds)\n" % runtime, STATUS)
    except OSError, exc:
      test.output.print_error("  failed to start tor: %s\n" % exc)
      raise exc
示例#5
0
def main():
  start_time = time.time()

  try:
    stem.prereq.check_requirements()
  except ImportError, exc:
    println("%s\n" % exc)
    sys.exit(1)
示例#6
0
def _run_test(test_class, output_filters, logging_buffer):
  test.output.print_divider(test_class.__module__)
  suite = unittest.TestLoader().loadTestsFromTestCase(test_class)

  test_results = StringIO.StringIO()
  run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)

  sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
  println()
  test.output.print_logging(logging_buffer)

  return run_result
示例#7
0
文件: runner.py 项目: patrickod/stem
  def stop(self):
    """
    Stops our tor test instance and cleans up any temporary resources.
    """

    with self._runner_lock:
      println('Shutting down tor... ', STATUS, NO_NL)

      if self._owner_controller:
        self._owner_controller.close()
        self._owner_controller = None

      if self._tor_process:
        # if the tor process has stopped on its own then the following raises
        # an OSError ([Errno 3] No such process)

        try:
          self._tor_process.kill()
        except OSError:
          pass

        self._tor_process.wait()  # blocks until the process is done

      # if we've made a temporary data directory then clean it up
      if self._test_dir and CONFIG['integ.test_directory'] == '':
        shutil.rmtree(self._test_dir, ignore_errors = True)

      # reverts any mocking of stem.socket.recv_message
      if self._original_recv_message:
        stem.socket.recv_message = self._original_recv_message
        self._original_recv_message = None

      # clean up our socket directory if we made one
      socket_dir = os.path.dirname(CONTROL_SOCKET_PATH)

      if os.path.exists(socket_dir):
        shutil.rmtree(socket_dir, ignore_errors = True)

      self._test_dir = ''
      self._tor_cmd = None
      self._tor_cwd = ''
      self._torrc_contents = ''
      self._custom_opts = None
      self._tor_process = None

      println('done', STATUS)
示例#8
0
文件: runner.py 项目: soult/stem
    def is_running(self):
        """
    Checks if we're running a tor test instance and that it's alive.

    :returns: True if we have a running tor test instance, False otherwise
    """

        with self._runner_lock:
            # Check for an unexpected shutdown by calling subprocess.Popen.poll(),
            # which returns the exit code or None if we're still running.

            if self._tor_process and self._tor_process.poll() is not None:
                # clean up the temporary resources and note the unexpected shutdown
                self.stop()
                println("tor shut down unexpectedly", ERROR)

            return bool(self._tor_process)
示例#9
0
文件: runner.py 项目: patrickod/stem
  def _start_tor(self, tor_cmd):
    """
    Initializes a tor process. This blocks until initialization completes or we
    error out.

    :param str tor_cmd: command to start tor with

    :raises: OSError if we either fail to create the tor process or reached a timeout without success
    """

    println('Starting %s...\n' % tor_cmd, STATUS)
    start_time = time.time()

    try:
      self._tor_process = stem.process.launch_tor(
        tor_cmd = tor_cmd,
        torrc_path = os.path.join(self._test_dir, 'torrc'),
        completion_percent = 100 if test.Target.ONLINE in self.attribute_targets else 5,
        init_msg_handler = lambda line: println('  %s' % line, SUBSTATUS),
        take_ownership = True,
      )

      runtime = time.time() - start_time
      println('  done (%i seconds)\n' % runtime, STATUS)
    except OSError as exc:
      println('  failed to start tor: %s\n' % exc, ERROR)
      raise exc
示例#10
0
文件: runner.py 项目: jt-wang/stem
    def stop(self):
        """
    Stops our tor test instance and cleans up any temporary resources.
    """

        with self._runner_lock:
            println("Shutting down tor... ", STATUS, NO_NL)

            if self._tor_process:
                # if the tor process has stopped on its own then the following raises
                # an OSError ([Errno 3] No such process)

                try:
                    self._tor_process.kill()
                except OSError:
                    pass

                self._tor_process.communicate(
                )  # blocks until the process is done

            # if we've made a temporary data directory then clean it up
            if self._test_dir and CONFIG["integ.test_directory"] == "":
                shutil.rmtree(self._test_dir, ignore_errors=True)

            # reverts any mocking of stem.socket.recv_message
            if self._original_recv_message:
                stem.socket.recv_message = self._original_recv_message
                self._original_recv_message = None

            # clean up our socket directory if we made one
            socket_dir = os.path.dirname(CONTROL_SOCKET_PATH)

            if os.path.exists(socket_dir):
                shutil.rmtree(socket_dir, ignore_errors=True)

            self._test_dir = ""
            self._tor_cmd = None
            self._tor_cwd = ""
            self._torrc_contents = ""
            self._custom_opts = None
            self._tor_process = None

            println("done", STATUS)
示例#11
0
文件: util.py 项目: 5l1v3r1/stem
def run_tasks(category, *tasks):
  """
  Runs a series of :class:`test.util.Task` instances. This simply prints 'done'
  or 'failed' for each unless we fail one that is marked as being required. If
  that happens then we print its error message and call sys.exit().

  :param str category: label for the series of tasks
  :param list tasks: **Task** instances to be ran
  """

  test.output.print_divider(category, True)

  for task in tasks:
    task.run()

    if task.is_required and task.error:
      println("\n%s\n" % task.error, ERROR)
      sys.exit(1)

  println()
示例#12
0
文件: util.py 项目: axitkhurana/stem
def run_tasks(category, *tasks):
  """
  Runs a series of :class:`test.util.Task` instances. This simply prints 'done'
  or 'failed' for each unless we fail one that is marked as being required. If
  that happens then we print its error message and call sys.exit().

  :param str category: label for the series of tasks
  :param list tasks: **Task** instances to be ran
  """

  test.output.print_divider(category, True)

  for task in tasks:
    task.run()

    if task.is_required and task.error:
      println("\n%s\n" % task.error, ERROR)
      sys.exit(1)

  println()
示例#13
0
文件: task.py 项目: patrickod/stem
  def run(self):
    start_time = time.time()
    println('  %s...' % self.label, STATUS, NO_NL)

    padding = 50 - len(self.label)
    println(' ' * padding, NO_NL)

    try:
      if self._is_background_task:
        def _run_wrapper(conn, runner, args):
          os.nice(15)
          conn.send(runner(*args) if args else runner())
          conn.close()

        self._background_pipe, child_pipe = multiprocessing.Pipe()
        self._background_process = multiprocessing.Process(target = _run_wrapper, args = (child_pipe, self.runner, self.args))
        self._background_process.start()
      else:
        self.result = self.runner(*self.args) if self.args else self.runner()

      self.is_successful = True
      output_msg = 'running' if self._is_background_task else 'done'

      if self.result and self.print_result and isinstance(self.result, str):
        output_msg = self.result
      elif self.print_runtime:
        output_msg += ' (%0.1fs)' % (time.time() - start_time)

      println(output_msg, STATUS)

      if self.print_result and isinstance(self.result, (list, tuple)):
        for line in self.result:
          println('    %s' % line, STATUS)
    except Exception as exc:
      output_msg = str(exc)

      if not output_msg or self.is_required:
        output_msg = 'failed'

      println(output_msg, ERROR)
      self.error = exc
示例#14
0
文件: runner.py 项目: sammyshj/stem
  def _start_tor(self, tor_cmd):
    """
    Initializes a tor process. This blocks until initialization completes or we
    error out.

    :param str tor_cmd: command to start tor with

    :raises: OSError if we either fail to create the tor process or reached a timeout without success
    """

    println('Starting %s...\n' % tor_cmd, STATUS)
    start_time = time.time()

    try:
      # wait to fully complete if we're running tests with network activity,
      # otherwise finish after local bootstraping

      complete_percent = 100 if Target.ONLINE in self.attribute_targets else 5

      def print_init_line(line):
        println('  %s' % line, SUBSTATUS)

      torrc_dst = os.path.join(self._test_dir, 'torrc')
      self._tor_process = stem.process.launch_tor(tor_cmd, None, torrc_dst, complete_percent, print_init_line, take_ownership = True)

      runtime = time.time() - start_time
      println('  done (%i seconds)\n' % runtime, STATUS)
    except OSError as exc:
      println('  failed to start tor: %s\n' % exc, ERROR)
      raise exc
示例#15
0
文件: task.py 项目: patrickod/stem
 def run(self):
   if self.is_available:
     return super(StaticCheckTask, self).run()
   else:
     println('  %s...' % self.label, STATUS, NO_NL)
     println(' ' * (50 - len(self.label)), NO_NL)
     println('unavailable', STATUS)
示例#16
0
 def run(self):
     if self.is_available:
         return super(StaticCheckTask, self).run()
     else:
         println('  %s...' % self.label, STATUS, NO_NL)
         println(' ' * (50 - len(self.label)), NO_NL)
         println('unavailable', STATUS)
示例#17
0
    def _start_tor(self, tor_cmd):
        """
    Initializes a tor process. This blocks until initialization completes or we
    error out.

    :param str tor_cmd: command to start tor with

    :raises: OSError if we either fail to create the tor process or reached a timeout without success
    """

        println('Starting %s...\n' % tor_cmd, STATUS)
        start_time = time.time()

        try:
            # wait to fully complete if we're running tests with network activity,
            # otherwise finish after local bootstraping

            complete_percent = 100 if Target.ONLINE in self.attribute_targets else 5

            def print_init_line(line):
                println('  %s' % line, SUBSTATUS)

            torrc_dst = os.path.join(self._test_dir, 'torrc')
            self._tor_process = stem.process.launch_tor(tor_cmd,
                                                        None,
                                                        torrc_dst,
                                                        complete_percent,
                                                        print_init_line,
                                                        take_ownership=True)

            runtime = time.time() - start_time
            println('  done (%i seconds)\n' % runtime, STATUS)
        except OSError as exc:
            println('  failed to start tor: %s\n' % exc, ERROR)
            raise exc
示例#18
0
    def assert_tor_is_running(self):
        """
    Checks if our tor process is running. If not, this prints an error and
    provides **False**.
    """

        if not self._tor_process:
            println('Tor process failed to initialize', ERROR)
            return False

        process_status = self._tor_process.poll()  # None if running

        if process_status is None:
            return True
        else:
            process_output = stem.util.str_tools._to_unicode(
                self._tor_process.stdout.read() + b'\n\n' +
                self._tor_process.stderr.read()).strip()
            println(
                '\n%s\nOur tor process ended prematurely with exit status %s\n%s\n\n%s'
                % ('=' * 60, process_status, '=' * 60, process_output), ERROR)
            return False
示例#19
0
    def run(self):
        start_time = time.time()
        println('  %s...' % self.label, STATUS, NO_NL)

        padding = 50 - len(self.label)
        println(' ' * padding, NO_NL)

        try:
            if self._is_background_task:
                self._background_process = stem.util.system.DaemonTask(
                    self.runner, self.args, start=True)
            else:
                self.result = self.runner(
                    *self.args) if self.args else self.runner()

            self.is_successful = True
            output_msg = 'running' if self._is_background_task else 'done'

            if self.result and self.print_result and isinstance(
                    self.result, str):
                output_msg = self.result
            elif self.print_runtime:
                output_msg += ' (%0.1fs)' % (time.time() - start_time)

            println(output_msg, STATUS)

            if self.print_result and isinstance(self.result, (list, tuple)):
                for line in self.result:
                    println('    %s' % line, STATUS)
        except Exception as exc:
            output_msg = str(exc)

            if not output_msg or self.is_required:
                output_msg = 'failed'

            println(output_msg, ERROR)
            self.error = exc
示例#20
0
文件: runner.py 项目: sree-dev/stem
  def stop(self):
    """
    Stops our tor test instance and cleans up any temporary resources.
    """

    with self._runner_lock:
      println("Shutting down tor... ", STATUS, NO_NL)

      if self._tor_process:
        # if the tor process has stopped on its own then the following raises
        # an OSError ([Errno 3] No such process)

        try:
          self._tor_process.kill()
        except OSError:
          pass

        self._tor_process.communicate()  # blocks until the process is done

      # if we've made a temporary data directory then clean it up
      if self._test_dir and CONFIG["integ.test_directory"] == "":
        shutil.rmtree(self._test_dir, ignore_errors = True)

      # reverts any mocking of stem.socket.recv_message
      if self._original_recv_message:
        stem.socket.recv_message = self._original_recv_message
        self._original_recv_message = None

      self._test_dir = ""
      self._tor_cmd = None
      self._tor_cwd = ""
      self._torrc_contents = ""
      self._custom_opts = None
      self._tor_process = None

      println("done", STATUS)
示例#21
0
def _print_static_issues(static_check_issues):
  if static_check_issues:
    println('STATIC CHECKS', STATUS)

    for file_path in static_check_issues:
      println('* %s' % file_path, STATUS)

      # Make a dict of line numbers to its issues. This is so we can both sort
      # by the line number and clear any duplicate messages.

      line_to_issues = {}

      for issue in static_check_issues[file_path]:
        line_to_issues.setdefault(issue.line_number, set()).add((issue.message, issue.line))

      for line_number in sorted(line_to_issues.keys()):
        for msg, line in line_to_issues[line_number]:
          line_count = '%-4s' % line_number
          println('  line %s - %-40s %s' % (line_count, msg, line.strip()))

      println()
示例#22
0
def _print_static_issues(static_check_issues):
    if static_check_issues:
        println("STATIC CHECKS", STATUS)

        for file_path in static_check_issues:
            println("* %s" % file_path, STATUS)

            # Make a dict of line numbers to its issues. This is so we can both sort
            # by the line number and clear any duplicate messages.

            line_to_issues = {}

            for line_number, msg in static_check_issues[file_path]:
                line_to_issues.setdefault(line_number, set()).add(msg)

            for line_number in sorted(line_to_issues.keys()):
                for msg in line_to_issues[line_number]:
                    line_count = "%-4s" % line_number
                    println("  line %s - %s" % (line_count, msg))

            println()
示例#23
0
def _run_test(args, test_class, output_filters, logging_buffer):
    start_time = time.time()

    if args.verbose:
        test.output.print_divider(test_class.__module__)
    else:
        label = test_class.__module__

        if label.startswith('test.unit.'):
            label = label[10:]
        elif label.startswith('test.integ.'):
            label = label[11:]

        label = "  %s..." % label
        label = "%-54s" % label

        println(label, STATUS, NO_NL)

    suite = unittest.TestLoader().loadTestsFromTestCase(test_class)

    test_results = StringIO.StringIO()
    run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)

    if args.verbose:
        sys.stdout.write(
            test.output.apply_filters(test_results.getvalue(),
                                      *output_filters))
        println()
    elif not run_result.failures and not run_result.errors:
        println(" success (%0.2fs)" % (time.time() - start_time), SUCCESS)
    else:
        println(" failed (%0.2fs)" % (time.time() - start_time), ERROR)
        sys.stdout.write(
            test.output.apply_filters(test_results.getvalue(),
                                      *output_filters))

    test.output.print_logging(logging_buffer)

    return run_result
示例#24
0
def _print_static_issues(static_check_issues):
  if static_check_issues:
    println('STATIC CHECKS', STATUS)

    for file_path in static_check_issues:
      println('* %s' % file_path, STATUS)

      # Make a dict of line numbers to its issues. This is so we can both sort
      # by the line number and clear any duplicate messages.

      line_to_issues = {}

      for issue in static_check_issues[file_path]:
        line_to_issues.setdefault(issue.line_number, set()).add((issue.message, issue.line))

      for line_number in sorted(line_to_issues.keys()):
        for msg, line in line_to_issues[line_number]:
          line_count = '%-4s' % line_number
          content = ' | %s' % line.strip() if line.strip() else ''
          println('  line %s - %-40s%s' % (line_count, msg, content))

      println()
示例#25
0
  def _run_setup(self):
    """
    Makes a temporary runtime resources of our integration test instance.

    :raises: OSError if unsuccessful
    """

    # makes a temporary data directory if needed
    try:
      println("  making test directory (%s)... " % self._test_dir, STATUS, NO_NL)

      if os.path.exists(self._test_dir):
        println("skipped", STATUS)
      else:
        os.makedirs(self._test_dir)
        println("done", STATUS)
    except OSError, exc:
      test.output.print_error("failed (%s)" % exc)
      raise exc
示例#26
0
文件: run_tests.py 项目: tkitki/stem
def _run_test(args, test_class, output_filters):
    start_time = time.time()

    # Test classes look like...
    #
    #   test.unit.util.conf.TestConf.test_parse_enum_csv
    #
    # We want to strip the 'test.unit.' or 'test.integ.' prefix since it's
    # redundant. We also want to drop the test class name. The individual test
    # name at the end it optional (only present if we used the '--test'
    # argument).

    label_comp = test_class.split('.')[2:]
    del label_comp[-1 if label_comp[-1][0].isupper() else -2]
    test_label = '  %-52s' % ('.'.join(label_comp) + '...')

    if args.verbose:
        test.output.print_divider(test_class)
    else:
        println(test_label, STATUS, NO_NL)

    try:
        suite = unittest.TestLoader().loadTestsFromName(test_class)
    except AttributeError:
        if args.specific_test:
            # should only come up if user provided '--test' for something that doesn't exist
            println(' no such test', ERROR)
            return None
        else:
            raise
    except Exception as exc:
        println(' failed', ERROR)
        traceback.print_exc(exc)
        return None

    test_results = StringIO()
    run_result = stem.util.test_tools.TimedTestRunner(test_results,
                                                      verbosity=2).run(suite)

    if args.verbose:
        println(
            test.output.apply_filters(test_results.getvalue(),
                                      *output_filters))
    elif not run_result.failures and not run_result.errors:
        println(' success (%0.2fs)' % (time.time() - start_time), SUCCESS)
    else:
        if args.quiet:
            println(test_label, STATUS, NO_NL, STDERR)
            println(' failed (%0.2fs)' % (time.time() - start_time), ERROR,
                    STDERR)
            println(
                test.output.apply_filters(test_results.getvalue(),
                                          *output_filters), STDERR)
        else:
            println(' failed (%0.2fs)' % (time.time() - start_time), ERROR)
            println(
                test.output.apply_filters(test_results.getvalue(),
                                          *output_filters), NO_NL)

    return run_result
示例#27
0
def main():
    start_time = time.time()

    try:
        stem.prereq.check_requirements()
    except ImportError as exc:
        println("%s\n" % exc)
        sys.exit(1)

    test_config = stem.util.conf.get_config("test")
    test_config.load(os.path.join(STEM_BASE, "test", "settings.cfg"))

    try:
        args = _get_args(sys.argv[1:])
    except getopt.GetoptError as exc:
        println("%s (for usage provide --help)" % exc)
        sys.exit(1)
    except ValueError as exc:
        println(str(exc))
        sys.exit(1)

    if args.print_help:
        println(test.util.get_help_message())
        sys.exit()
    elif not args.run_unit and not args.run_integ:
        println("Nothing to run (for usage provide --help)\n")
        sys.exit()

    if not stem.prereq.is_mock_available():
        try:
            try:
                import unittest.mock
            except ImportError:
                import mock

            println(MOCK_OUT_OF_DATE_MSG % mock.__version__)
        except ImportError:
            println(MOCK_UNAVAILABLE_MSG)

        if stem.util.system.is_available('pip'):
            println("You can get it by running 'sudo pip install mock'.")
        elif stem.util.system.is_available('apt-get'):
            println(
                "You can get it by running 'sudo apt-get install python-mock'."
            )

        sys.exit(1)

    pyflakes_task, pep8_task = None, None

    if not stem.prereq.is_python_3() and not args.specific_test:
        if stem.util.test_tools.is_pyflakes_available():
            pyflakes_task = PYFLAKES_TASK

        if stem.util.test_tools.is_pep8_available():
            pep8_task = PEP8_TASK

    test.util.run_tasks(
        "INITIALISING",
        Task("checking stem version", test.util.check_stem_version),
        Task("checking python version", test.util.check_python_version),
        Task("checking pycrypto version", test.util.check_pycrypto_version),
        Task("checking mock version", test.util.check_mock_version),
        Task("checking pyflakes version", test.util.check_pyflakes_version),
        Task("checking pep8 version", test.util.check_pep8_version),
        Task("checking for orphaned .pyc files", test.util.clean_orphaned_pyc,
             (SRC_PATHS, )),
        Task("checking for unused tests", test.util.check_for_unused_tests,
             ((os.path.join(STEM_BASE, 'test'), ), )),
        pyflakes_task,
        pep8_task,
    )

    if args.run_python3 and sys.version_info[0] != 3:
        test.util.run_tasks(
            "EXPORTING TO PYTHON 3",
            Task("checking requirements", test.util.python3_prereq),
            Task("cleaning prior export", test.util.python3_clean,
                 (not args.run_python3_clean, )),
            Task("exporting python 3 copy", test.util.python3_copy_stem),
            Task("running tests", test.util.python3_run_tests),
        )

        println("BUG: python3_run_tests() should have terminated our process",
                ERROR)
        sys.exit(1)

    # buffer that we log messages into so they can be printed after a test has finished

    logging_buffer = stem.util.log.LogBuffer(args.logging_runlevel)
    stem.util.log.get_logger().addHandler(logging_buffer)

    # filters for how testing output is displayed

    error_tracker = test.output.ErrorTracker()

    output_filters = (
        error_tracker.get_filter(),
        test.output.strip_module,
        test.output.align_results,
        test.output.colorize,
    )

    # Number of tests that we have skipped. This is only available with python
    # 2.7 or later because before that test results didn't have a 'skipped'
    # attribute.

    skipped_tests = 0

    if args.run_unit:
        test.output.print_divider("UNIT TESTS", True)
        error_tracker.set_category("UNIT TEST")

        for test_class in test.util.get_unit_tests(args.specific_test):
            run_result = _run_test(args, test_class, output_filters,
                                   logging_buffer)
            skipped_tests += len(getattr(run_result, 'skipped', []))

        println()

    if args.run_integ:
        test.output.print_divider("INTEGRATION TESTS", True)
        integ_runner = test.runner.get_runner()

        # Determine targets we don't meet the prereqs for. Warnings are given about
        # these at the end of the test run so they're more noticeable.

        our_version = stem.version.get_system_tor_version(args.tor_path)
        skipped_targets = []

        for target in args.run_targets:
            # check if we meet this target's tor version prerequisites

            target_prereq = test.util.get_prereq(target)

            if target_prereq and our_version < target_prereq:
                skipped_targets.append(target)
                continue

            error_tracker.set_category(target)

            try:
                integ_runner.start(
                    target,
                    args.attribute_targets,
                    args.tor_path,
                    extra_torrc_opts=test.util.get_torrc_entries(target))

                println("Running tests...\n", STATUS)

                owner = None
                if integ_runner.is_accessible():
                    owner = integ_runner.get_tor_controller(
                        True)  # controller to own our main Tor process

                for test_class in test.util.get_integ_tests(
                        args.specific_test):
                    run_result = _run_test(args, test_class, output_filters,
                                           logging_buffer)
                    skipped_tests += len(getattr(run_result, 'skipped', []))

                if owner:
                    owner.close()

                # We should have joined on all threads. If not then that indicates a
                # leak that could both likely be a bug and disrupt further targets.

                active_threads = threading.enumerate()

                if len(active_threads) > 1:
                    println("Threads lingering after test run:", ERROR)

                    for lingering_thread in active_threads:
                        println("  %s" % lingering_thread, ERROR)

                    break
            except KeyboardInterrupt:
                println("  aborted starting tor: keyboard interrupt\n", ERROR)
                break
            except ValueError as exc:
                # can arise if get_torrc_entries() runs into a bad settings.cfg data

                println(str(exc), ERROR)
                break
            except OSError:
                error_tracker.register_error()
            finally:
                integ_runner.stop()

        if skipped_targets:
            println()

            for target in skipped_targets:
                req_version = test.util.get_prereq(target)
                println(
                    "Unable to run target %s, this requires tor version %s" %
                    (target, req_version), ERROR)

            println()

    if not stem.prereq.is_python_3():
        static_check_issues = {}

        if pyflakes_task and pyflakes_task.is_successful:
            for path, issues in pyflakes_task.result.items():
                for issue in issues:
                    static_check_issues.setdefault(path, []).append(issue)
        elif not stem.util.test_tools.is_pyflakes_available():
            println(
                "Static error checking requires pyflakes version 0.7.3 or later. Please install it from ...\n  http://pypi.python.org/pypi/pyflakes\n",
                ERROR)

        if pep8_task and pep8_task.is_successful:
            for path, issues in pep8_task.result.items():
                for issue in issues:
                    static_check_issues.setdefault(path, []).append(issue)
        elif not stem.util.test_tools.is_pep8_available():
            println(
                "Style checks require pep8 version 1.4.2 or later. Please install it from...\n  http://pypi.python.org/pypi/pep8\n",
                ERROR)

        _print_static_issues(static_check_issues)

    runtime_label = "(%i seconds)" % (time.time() - start_time)

    if error_tracker.has_errors_occured():
        println("TESTING FAILED %s" % runtime_label, ERROR)

        for line in error_tracker:
            println("  %s" % line, ERROR)
    else:
        if skipped_tests > 0:
            println("%i TESTS WERE SKIPPED" % skipped_tests, STATUS)

        println("TESTING PASSED %s\n" % runtime_label, SUCCESS)

    sys.exit(1 if error_tracker.has_errors_occured() else 0)
示例#28
0
文件: runner.py 项目: tlyu/stem
  def start(self, config_target, attribute_targets, tor_cmd):
    """
    Makes temporary testing resources and starts tor, blocking until it
    completes.

    :param str config_target: **Target** for this test run's torrc settings
    :param list attribute_targets: **Targets** for our non-configuration attributes
    :param str tor_cmd: command to start tor with

    :raises: OSError if unable to run test preparations or start tor
    """

    with self._runner_lock:
      self.attribute_targets = attribute_targets

      # if we're holding on to a tor process (running or not) then clean up after
      # it so we can start a fresh instance

      if self._tor_process:
        self.stop()

      println('Setting up a test instance...', STATUS)

      # if 'test_directory' is unset then we make a new data directory in /tmp
      # and clean it up when we're done

      config_test_dir = CONFIG['integ.test_directory']

      if config_test_dir:
        self._test_dir = stem.util.system.expand_path(config_test_dir, test.STEM_BASE)
      else:
        self._test_dir = tempfile.mktemp('-stem-integ')

      original_cwd, data_dir_path = os.getcwd(), self._test_dir
      self._tor_cmd = stem.util.system.expand_path(tor_cmd) if os.path.sep in tor_cmd else tor_cmd

      if test.Target.RELATIVE in self.attribute_targets:
        tor_cwd = os.path.dirname(self._test_dir)

        if not os.path.exists(tor_cwd):
          os.makedirs(tor_cwd)

        os.chdir(tor_cwd)
        data_dir_path = './%s' % os.path.basename(self._test_dir)

      config_csv = CONFIG['target.torrc'].get(config_target)
      extra_torrc_opts = []

      if config_csv:
        for opt in config_csv.split(','):
          opt = opt.strip()

          if opt in Torrc.keys():
            extra_torrc_opts.append(Torrc[opt])
          else:
            raise ValueError("'%s' isn't a test.runner.Torrc enumeration" % opt)

      self._custom_opts = extra_torrc_opts
      self._torrc_contents = BASE_TORRC % (data_dir_path, data_dir_path)

      if extra_torrc_opts:
        self._torrc_contents += '\n'.join(extra_torrc_opts) + '\n'

      try:
        self._tor_cwd = os.getcwd()
        self._run_setup()
        self._start_tor(self._tor_cmd)

        # strip the testing directory from recv_message responses if we're
        # simulating a chroot setup

        if test.Target.CHROOT in self.attribute_targets and not self._original_recv_message:
          # TODO: when we have a function for telling stem the chroot we'll
          # need to set that too

          self._original_recv_message = stem.socket.recv_message
          self._chroot_path = data_dir_path

          def _chroot_recv_message(control_file):
            return self._original_recv_message(_MockChrootFile(control_file, data_dir_path))

          stem.socket.recv_message = _chroot_recv_message

        if self.is_accessible():
          self._owner_controller = self.get_tor_controller(True)

        if test.Target.RELATIVE in self.attribute_targets:
          os.chdir(original_cwd)  # revert our cwd back to normal
      except OSError as exc:
        raise exc
示例#29
0
文件: runner.py 项目: soult/stem
    def start(self, run_target, attribute_targets, tor_cmd, extra_torrc_opts):
        """
    Makes temporary testing resources and starts tor, blocking until it
    completes.

    :param Target run_target: configuration we're running with
    :param list attribute_targets: **Targets** for our non-configuration attributes
    :param str tor_cmd: command to start tor with
    :param list extra_torrc_opts: additional torrc options for our test instance

    :raises: OSError if unable to run test preparations or start tor
    """

        with self._runner_lock:
            self.run_target = run_target
            self.attribute_targets = attribute_targets

            # if we're holding on to a tor process (running or not) then clean up after
            # it so we can start a fresh instance

            if self._tor_process:
                self.stop()

            println("Setting up a test instance...", STATUS)

            # if 'test_directory' is unset then we make a new data directory in /tmp
            # and clean it up when we're done

            config_test_dir = CONFIG["integ.test_directory"]

            if config_test_dir:
                self._test_dir = stem.util.system.expand_path(config_test_dir, STEM_BASE)
            else:
                self._test_dir = tempfile.mktemp("-stem-integ")

            original_cwd, data_dir_path = os.getcwd(), self._test_dir

            if Target.RELATIVE in self.attribute_targets:
                tor_cwd = os.path.dirname(self._test_dir)

                if not os.path.exists(tor_cwd):
                    os.makedirs(tor_cwd)

                os.chdir(tor_cwd)
                data_dir_path = "./%s" % os.path.basename(self._test_dir)

            self._tor_cmd = tor_cmd
            self._custom_opts = extra_torrc_opts
            self._torrc_contents = BASE_TORRC % data_dir_path

            if extra_torrc_opts:
                self._torrc_contents += "\n".join(extra_torrc_opts) + "\n"

            try:
                self._tor_cwd = os.getcwd()
                self._run_setup()
                self._start_tor(tor_cmd)

                # strip the testing directory from recv_message responses if we're
                # simulating a chroot setup

                if Target.CHROOT in self.attribute_targets and not self._original_recv_message:
                    # TODO: when we have a function for telling stem the chroot we'll
                    # need to set that too

                    self._original_recv_message = stem.socket.recv_message
                    self._chroot_path = data_dir_path

                    def _chroot_recv_message(control_file):
                        return self._original_recv_message(_MockChrootFile(control_file, data_dir_path))

                    stem.socket.recv_message = _chroot_recv_message

                # revert our cwd back to normal
                if Target.RELATIVE in self.attribute_targets:
                    os.chdir(original_cwd)
            except OSError as exc:
                raise exc
示例#30
0
  def start(self, config_target, attribute_targets, tor_cmd):
    """
    Makes temporary testing resources and starts tor, blocking until it
    completes.

    :param str config_target: **Target** for this test run's torrc settings
    :param list attribute_targets: **Targets** for our non-configuration attributes
    :param str tor_cmd: command to start tor with

    :raises: OSError if unable to run test preparations or start tor
    """

    with self._runner_lock:
      self.attribute_targets = attribute_targets

      # if we're holding on to a tor process (running or not) then clean up after
      # it so we can start a fresh instance

      if self._tor_process:
        self.stop()

      println('Setting up a test instance...', STATUS)

      # if 'test_directory' is unset then we make a new data directory in /tmp
      # and clean it up when we're done

      config_test_dir = CONFIG['integ.test_directory']

      if config_test_dir:
        self._test_dir = stem.util.system.expand_path(config_test_dir, test.STEM_BASE)
      else:
        self._test_dir = tempfile.mktemp('-stem-integ')

      original_cwd, data_dir_path = os.getcwd(), self._test_dir
      self._tor_cmd = stem.util.system.expand_path(tor_cmd) if os.path.sep in tor_cmd else tor_cmd

      if test.Target.RELATIVE in self.attribute_targets:
        tor_cwd = os.path.dirname(self._test_dir)

        if not os.path.exists(tor_cwd):
          os.makedirs(tor_cwd)

        os.chdir(tor_cwd)
        data_dir_path = './%s' % os.path.basename(self._test_dir)

      config_csv = CONFIG['target.torrc'].get(config_target)
      target_torrc_opts = []

      if config_csv:
        for opt in config_csv.split(','):
          opt = opt.strip()

          if opt in Torrc.keys():
            target_torrc_opts.append(Torrc[opt])
          else:
            raise ValueError("'%s' isn't a test.runner.Torrc enumeration" % opt)

      self._custom_opts = target_torrc_opts

      self._torrc_contents = CONFIG['integ.torrc']

      if target_torrc_opts:
        self._torrc_contents += '\n\n# Torrc options for the %s target\n\n' % config_target
        self._torrc_contents += '\n'.join(target_torrc_opts)

      if CONFIG['integ.extra_torrc']:
        self._torrc_contents += '\n\n# Torrc options from %s\n\n' % os.environ['STEM_TEST_CONFIG']
        self._torrc_contents += CONFIG['integ.extra_torrc']

      self._torrc_contents = self._torrc_contents.replace('[DATA_DIR]', data_dir_path)
      self._torrc_contents = self._torrc_contents.replace('[SOCKS_PORT]', str(SOCKS_PORT))
      self._torrc_contents = self._torrc_contents.replace('[OR_PORT]', str(ORPORT))

      try:
        self._tor_cwd = os.getcwd()
        self._run_setup()
        self._start_tor(self._tor_cmd)

        # strip the testing directory from recv_message responses if we're
        # simulating a chroot setup

        if test.Target.CHROOT in self.attribute_targets and not self._original_recv_message:
          # TODO: when we have a function for telling stem the chroot we'll
          # need to set that too

          self._original_recv_message = stem.socket.recv_message
          self._chroot_path = data_dir_path

          async def _chroot_recv_message(control_file):
            return await self._original_recv_message(_MockChrootFile(control_file, data_dir_path))

          stem.socket.recv_message = _chroot_recv_message

        if self.is_accessible():
          # TODO: refactor so owner controller is less convoluted

          loop = asyncio.new_event_loop()

          self._owner_controller_thread = threading.Thread(
            name = 'owning_controller',
            target = loop.run_forever,
            daemon = True,
          )

          self._owner_controller_thread.start()

          self._owner_controller = asyncio.run_coroutine_threadsafe(self.get_tor_controller(True), loop).result()

        if test.Target.RELATIVE in self.attribute_targets:
          os.chdir(original_cwd)  # revert our cwd back to normal
      except OSError as exc:
        raise exc
示例#31
0
def _run_test(args, test_class, exclude, output_filters):
    # When logging to a file we don't have stdout's test delimiters to correlate
    # logs with the test that generated them.

    if args.logging_path:
        stem.util.log.notice('Beginning test %s' % test_class)

    start_time = time.time()

    # Test classes look like...
    #
    #   test.unit.util.conf.TestConf.test_parse_enum_csv
    #
    # We want to strip the 'test.unit.' or 'test.integ.' prefix since it's
    # redundant. We also want to drop the test class name. The individual test
    # name at the end it optional (only present if we used the '--test'
    # argument).

    label_comp = test_class.split('.')[2:]
    del label_comp[-1 if label_comp[-1][0].isupper() else -2]
    test_label = '  %-52s' % ('.'.join(label_comp) + '...')

    if args.verbose:
        test.output.print_divider(test_class)
    else:
        println(test_label, STATUS, NO_NL)

    try:
        suite = unittest.TestLoader().loadTestsFromName(test_class)
    except AttributeError:
        if args.specific_test:
            # should only come up if user provided '--test' for something that doesn't exist
            println(' no such test', ERROR)
            return None
        else:
            raise
    except Exception as exc:
        println(' failed', ERROR)
        traceback.print_exc(exc)
        return None

    # check if we should skip any individual tests within this module

    if exclude:
        cropped_name = test.arguments.crop_module_name(test_class)
        cropped_name = cropped_name.rsplit('.', 1)[0]  # exclude the class name

        for prefix in exclude:
            if prefix.startswith(cropped_name):
                test_name = prefix.split('.')[-1]

                suite._tests = list(
                    filter(lambda test: test.id().split('.')[-1] != test_name,
                           suite._tests))

    test_results = io.StringIO()
    run_result = stem.util.test_tools.TimedTestRunner(test_results,
                                                      verbosity=2).run(suite)

    if args.verbose:
        println(
            test.output.apply_filters(test_results.getvalue(),
                                      *output_filters))
    elif not run_result.failures and not run_result.errors:
        println(' success (%0.2fs)' % (time.time() - start_time), SUCCESS)
    else:
        if args.quiet:
            println(test_label, STATUS, NO_NL, STDERR)
            println(' failed (%0.2fs)' % (time.time() - start_time), ERROR,
                    STDERR)
            println(
                test.output.apply_filters(test_results.getvalue(),
                                          *output_filters), STDERR)
        else:
            println(' failed (%0.2fs)' % (time.time() - start_time), ERROR)
            println(
                test.output.apply_filters(test_results.getvalue(),
                                          *output_filters), NO_NL)

    if args.logging_path:
        stem.util.log.notice('Finished test %s' % test_class)

    return run_result
示例#32
0
文件: runner.py 项目: soult/stem
    def _run_setup(self):
        """
    Makes a temporary runtime resources of our integration test instance.

    :raises: OSError if unsuccessful
    """

        # makes a temporary data directory if needed
        try:
            println("  making test directory (%s)... " % self._test_dir, STATUS, NO_NL)

            if os.path.exists(self._test_dir):
                println("skipped", STATUS)
            else:
                os.makedirs(self._test_dir)
                println("done", STATUS)
        except OSError as exc:
            println("failed (%s)" % exc, ERROR)
            raise exc

        # Tor checks during startup that the directory a control socket resides in
        # is only accessible by the tor user (and refuses to finish starting if it
        # isn't).

        if Torrc.SOCKET in self._custom_opts:
            try:
                socket_dir = os.path.dirname(CONTROL_SOCKET_PATH)
                println("  making control socket directory (%s)... " % socket_dir, STATUS, NO_NL)

                if os.path.exists(socket_dir) and stat.S_IMODE(os.stat(socket_dir).st_mode) == 0700:
                    println("skipped", STATUS)
                else:
                    if not os.path.exists(socket_dir):
                        os.makedirs(socket_dir)

                    os.chmod(socket_dir, 0700)
                    println("done", STATUS)
            except OSError as exc:
                println("failed (%s)" % exc, ERROR)
                raise exc

        # configures logging
        logging_path = CONFIG["integ.log"]

        if logging_path:
            logging_path = stem.util.system.expand_path(logging_path, STEM_BASE)
            println("  configuring logger (%s)... " % logging_path, STATUS, NO_NL)

            # delete the old log
            if os.path.exists(logging_path):
                os.remove(logging_path)

            logging.basicConfig(
                filename=logging_path,
                level=logging.DEBUG,
                format="%(asctime)s [%(levelname)s] %(message)s",
                datefmt="%D %H:%M:%S",
            )

            println("done", STATUS)
        else:
            println("  configuring logger... skipped", STATUS)

        # writes our testing torrc
        torrc_dst = os.path.join(self._test_dir, "torrc")
        try:
            println("  writing torrc (%s)... " % torrc_dst, STATUS, NO_NL)

            torrc_file = open(torrc_dst, "w")
            torrc_file.write(self._torrc_contents)
            torrc_file.close()

            println("done", STATUS)

            for line in self._torrc_contents.strip().splitlines():
                println("    %s" % line.strip(), SUBSTATUS)

            println()
        except Exception as exc:
            println("failed (%s)\n" % exc, ERROR)
            raise OSError(exc)
示例#33
0
def main():
    start_time = time.time()

    try:
        stem.prereq.check_requirements()
    except ImportError as exc:
        println('%s\n' % exc)
        sys.exit(1)

    test_config = stem.util.conf.get_config('test')
    test_config.load(os.path.join(test.STEM_BASE, 'test', 'settings.cfg'))

    try:
        args = test.arguments.parse(sys.argv[1:])
        test.task.TOR_VERSION.args = (args.tor_path, )
        test.output.SUPPRESS_STDOUT = args.quiet
    except ValueError as exc:
        println(str(exc))
        sys.exit(1)

    if args.print_help:
        println(test.arguments.get_help())
        sys.exit()
    elif not args.run_unit and not args.run_integ:
        println('Nothing to run (for usage provide --help)\n')
        sys.exit()

    if not stem.prereq.is_mock_available():
        try:
            import mock
            println(MOCK_OUT_OF_DATE_MSG % mock.__version__)
        except ImportError:
            println(MOCK_UNAVAILABLE_MSG)

        if stem.util.system.is_available('pip'):
            println("You can get it by running 'sudo pip install mock'.")
        elif stem.util.system.is_available('apt-get'):
            println(
                "You can get it by running 'sudo apt-get install python-mock'."
            )

        sys.exit(1)

    test.task.run(
        'INITIALISING',
        test.task.STEM_VERSION,
        test.task.TOR_VERSION if args.run_integ else None,
        test.task.PYTHON_VERSION,
        test.task.CRYPTO_VERSION,
        test.task.PYNACL_VERSION,
        test.task.MOCK_VERSION,
        test.task.PYFLAKES_VERSION,
        test.task.PYCODESTYLE_VERSION,
        test.task.CLEAN_PYC,
        test.task.UNUSED_TESTS,
        test.task.IMPORT_TESTS,
        test.task.PYFLAKES_TASK if not args.specific_test else None,
        test.task.PYCODESTYLE_TASK if not args.specific_test else None,
    )

    # buffer that we log messages into so they can be printed after a test has finished

    logging_buffer = stem.util.log.LogBuffer(args.logging_runlevel)
    stem.util.log.get_logger().addHandler(logging_buffer)

    # filters for how testing output is displayed

    error_tracker = test.output.ErrorTracker()

    output_filters = (
        error_tracker.get_filter(),
        test.output.runtimes,
        test.output.strip_module,
        test.output.align_results,
        test.output.colorize,
    )

    # Number of tests that we have skipped. This is only available with python
    # 2.7 or later because before that test results didn't have a 'skipped'
    # attribute.

    skipped_tests = 0

    if args.run_integ:
        default_test_dir = stem.util.system.expand_path(
            CONFIG['integ.test_directory'], test.STEM_BASE)
        async_args = test.AsyncTestArgs(default_test_dir, args.tor_path)

        for module_str in stem.util.test_tools.ASYNC_TESTS:
            if not args.specific_test or module_str.startswith(
                    args.specific_test):
                module = importlib.import_module(module_str.rsplit('.', 1)[0])
                test_classes = [
                    v for k, v in module.__dict__.items()
                    if k.startswith('Test')
                ]

                if len(test_classes) != 1:
                    print('BUG: Detected multiple tests for %s: %s' %
                          (module_str, ', '.join(test_classes)))
                    sys.exit(1)

                test_classes[0].run_tests(async_args)

    if args.run_unit:
        test.output.print_divider('UNIT TESTS', True)
        error_tracker.set_category('UNIT TEST')

        for test_class in get_unit_tests(args.specific_test):
            run_result = _run_test(args, test_class, output_filters)
            test.output.print_logging(logging_buffer)
            skipped_tests += len(getattr(run_result, 'skipped', []))

        println()

    if args.run_integ:
        test.output.print_divider('INTEGRATION TESTS', True)
        integ_runner = test.runner.get_runner()

        for target in args.run_targets:
            error_tracker.set_category(target)

            try:
                integ_runner.start(target, args.attribute_targets,
                                   args.tor_path)

                println('Running tests...\n', STATUS)

                for test_class in get_integ_tests(args.specific_test):
                    run_result = _run_test(args, test_class, output_filters)
                    test.output.print_logging(logging_buffer)
                    skipped_tests += len(getattr(run_result, 'skipped', []))
            except KeyboardInterrupt:
                println('  aborted starting tor: keyboard interrupt\n', ERROR)
                break
            except ValueError as exc:
                println(str(exc),
                        ERROR)  # can arise if there's bad settings.cfg data
                break
            except OSError:
                error_tracker.register_error()
            finally:
                println()
                integ_runner.stop()
                println()

                # We should have joined on all threads. If not then that indicates a
                # leak that could both likely be a bug and disrupt further targets.

                active_threads = threading.enumerate()

                if len(active_threads) > 1:
                    println('Threads lingering after test run:', ERROR)

                    for lingering_thread in active_threads:
                        println('  %s' % lingering_thread, ERROR)

                    break

    static_check_issues = {}

    for task in (test.task.PYFLAKES_TASK, test.task.PYCODESTYLE_TASK):
        if not task.is_available and task.unavailable_msg:
            println(task.unavailable_msg, ERROR)
        else:
            task.join()  # no-op if these haven't been run

            if task.result:
                for path, issues in task.result.items():
                    for issue in issues:
                        static_check_issues.setdefault(path, []).append(issue)

    _print_static_issues(static_check_issues)

    if error_tracker.has_errors_occured():
        println('TESTING FAILED (%i seconds)' % (time.time() - start_time),
                ERROR, STDERR)

        for line in error_tracker:
            println('  %s' % line, ERROR, STDERR)

        error_modules = error_tracker.get_modules()

        if len(error_modules) < 10 and not args.specific_test:
            println('\nYou can re-run just these tests with:\n', ERROR, STDERR)

            for module in error_modules:
                println('  %s --test %s' % (' '.join(sys.argv), module), ERROR,
                        STDERR)
    else:
        if skipped_tests > 0:
            println('%i TESTS WERE SKIPPED' % skipped_tests, STATUS)

        println('TESTING PASSED (%i seconds)\n' % (time.time() - start_time),
                SUCCESS)

    new_capabilities = test.get_new_capabilities()

    if new_capabilities:
        println(NEW_CAPABILITIES_FOUND, ERROR)

        for capability_type, msg in new_capabilities:
            println('  [%s] %s' % (capability_type, msg), ERROR)

    sys.exit(1 if error_tracker.has_errors_occured() else 0)
示例#34
0
def main():
  start_time = time.time()

  try:
    stem.prereq.check_requirements()
  except ImportError as exc:
    println('%s\n' % exc)
    sys.exit(1)

  test_config = stem.util.conf.get_config('test')
  test_config.load(os.path.join(STEM_BASE, 'test', 'settings.cfg'))

  try:
    args = test.arguments.parse(sys.argv[1:])
  except ValueError as exc:
    println(str(exc))
    sys.exit(1)

  if args.quiet:
    test.output.SUPPRESS_STDOUT = True

  if args.print_help:
    println(test.arguments.get_help())
    sys.exit()
  elif not args.run_unit and not args.run_integ:
    println('Nothing to run (for usage provide --help)\n')
    sys.exit()

  if not stem.prereq.is_mock_available():
    try:
      try:
        import unittest.mock
      except ImportError:
        import mock

      println(MOCK_OUT_OF_DATE_MSG % mock.__version__)
    except ImportError:
      println(MOCK_UNAVAILABLE_MSG)

    if stem.util.system.is_available('pip'):
      println("You can get it by running 'sudo pip install mock'.")
    elif stem.util.system.is_available('apt-get'):
      println("You can get it by running 'sudo apt-get install python-mock'.")

    sys.exit(1)

  pyflakes_task, pep8_task = None, None

  if not args.specific_test:
    if stem.util.test_tools.is_pyflakes_available():
      pyflakes_task = PYFLAKES_TASK

    if stem.util.test_tools.is_pep8_available():
      pep8_task = PEP8_TASK

  test.util.run_tasks(
    'INITIALISING',
    Task('checking stem version', test.util.check_stem_version),
    Task('checking python version', test.util.check_python_version),
    Task('checking pycrypto version', test.util.check_pycrypto_version),
    Task('checking mock version', test.util.check_mock_version),
    Task('checking pyflakes version', test.util.check_pyflakes_version),
    Task('checking pep8 version', test.util.check_pep8_version),
    Task('checking for orphaned .pyc files', test.util.clean_orphaned_pyc, (SRC_PATHS,)),
    Task('checking for unused tests', test.util.check_for_unused_tests, ((os.path.join(STEM_BASE, 'test'),),)),
    pyflakes_task,
    pep8_task,
  )

  # buffer that we log messages into so they can be printed after a test has finished

  logging_buffer = stem.util.log.LogBuffer(args.logging_runlevel)
  stem.util.log.get_logger().addHandler(logging_buffer)

  # filters for how testing output is displayed

  error_tracker = test.output.ErrorTracker()

  output_filters = (
    error_tracker.get_filter(),
    test.output.strip_module,
    test.output.align_results,
    test.output.colorize,
  )

  # Number of tests that we have skipped. This is only available with python
  # 2.7 or later because before that test results didn't have a 'skipped'
  # attribute.

  skipped_tests = 0

  if args.run_unit:
    test.output.print_divider('UNIT TESTS', True)
    error_tracker.set_category('UNIT TEST')

    for test_class in test.util.get_unit_tests(args.specific_test):
      run_result = _run_test(args, test_class, output_filters, logging_buffer)
      skipped_tests += len(getattr(run_result, 'skipped', []))

    println()

  if args.run_integ:
    test.output.print_divider('INTEGRATION TESTS', True)
    integ_runner = test.runner.get_runner()

    # Determine targets we don't meet the prereqs for. Warnings are given about
    # these at the end of the test run so they're more noticeable.

    our_version = stem.version.get_system_tor_version(args.tor_path)
    skipped_targets = []

    for target in args.run_targets:
      # check if we meet this target's tor version prerequisites

      target_prereq = test.util.get_prereq(target)

      if target_prereq and our_version < target_prereq:
        skipped_targets.append(target)
        continue

      error_tracker.set_category(target)

      try:
        integ_runner.start(target, args.attribute_targets, args.tor_path, extra_torrc_opts = test.util.get_torrc_entries(target))

        println('Running tests...\n', STATUS)

        owner = None
        if integ_runner.is_accessible():
          owner = integ_runner.get_tor_controller(True)  # controller to own our main Tor process

        for test_class in test.util.get_integ_tests(args.specific_test):
          run_result = _run_test(args, test_class, output_filters, logging_buffer)
          skipped_tests += len(getattr(run_result, 'skipped', []))

        if owner:
          owner.close()

        # We should have joined on all threads. If not then that indicates a
        # leak that could both likely be a bug and disrupt further targets.

        active_threads = threading.enumerate()

        if len(active_threads) > 1:
          println('Threads lingering after test run:', ERROR)

          for lingering_thread in active_threads:
            println('  %s' % lingering_thread, ERROR)

          break
      except KeyboardInterrupt:
        println('  aborted starting tor: keyboard interrupt\n', ERROR)
        break
      except ValueError as exc:
        # can arise if get_torrc_entries() runs into a bad settings.cfg data

        println(str(exc), ERROR)
        break
      except OSError:
        error_tracker.register_error()
      finally:
        println()
        integ_runner.stop()
        println()

    if skipped_targets:
      println()

      for target in skipped_targets:
        req_version = test.util.get_prereq(target)
        println('Unable to run target %s, this requires tor version %s' % (target, req_version), ERROR)

      println()

  static_check_issues = {}

  if pyflakes_task and pyflakes_task.is_successful:
    for path, issues in pyflakes_task.result.items():
      for issue in issues:
        static_check_issues.setdefault(path, []).append(issue)
  elif not stem.util.test_tools.is_pyflakes_available():
    println('Static error checking requires pyflakes version 0.7.3 or later. Please install it from ...\n  http://pypi.python.org/pypi/pyflakes\n', ERROR)

  if pep8_task and pep8_task.is_successful:
    for path, issues in pep8_task.result.items():
      for issue in issues:
        static_check_issues.setdefault(path, []).append(issue)
  elif not stem.util.test_tools.is_pep8_available():
    println('Style checks require pep8 version 1.4.2 or later. Please install it from...\n  http://pypi.python.org/pypi/pep8\n', ERROR)

  _print_static_issues(static_check_issues)

  runtime_label = '(%i seconds)' % (time.time() - start_time)

  if error_tracker.has_errors_occured():
    println('TESTING FAILED %s' % runtime_label, ERROR, STDERR)

    for line in error_tracker:
      println('  %s' % line, ERROR, STDERR)

    error_modules = error_tracker.get_modules()

    if len(error_modules) < 10 and not args.specific_test:
      println('\nYou can re-run just these tests with:\n', ERROR, STDERR)

      for module in error_modules:
        println('  %s --test %s' % (' '.join(sys.argv), module), ERROR, STDERR)
  else:
    if skipped_tests > 0:
      println('%i TESTS WERE SKIPPED' % skipped_tests, STATUS)

    println('TESTING PASSED %s\n' % runtime_label, SUCCESS)

  new_capabilities = test.util.get_new_capabilities()

  if new_capabilities:
    println(NEW_CAPABILITIES_FOUND, ERROR)

    for capability_type, msg in new_capabilities:
      println('  [%s] %s' % (capability_type, msg), ERROR)

  sys.exit(1 if error_tracker.has_errors_occured() else 0)
示例#35
0
def main():
  start_time = time.time()

  try:
    stem.prereq.check_requirements()
  except ImportError as exc:
    println('%s\n' % exc)
    sys.exit(1)

  test_config = stem.util.conf.get_config('test')
  test_config.load(os.path.join(STEM_BASE, 'test', 'settings.cfg'))

  try:
    args = test.arguments.parse(sys.argv[1:])
  except ValueError as exc:
    println(str(exc))
    sys.exit(1)

  if args.quiet:
    test.output.SUPPRESS_STDOUT = True

  if args.print_help:
    println(test.arguments.get_help())
    sys.exit()
  elif not args.run_unit and not args.run_integ:
    println('Nothing to run (for usage provide --help)\n')
    sys.exit()

  if not stem.prereq.is_mock_available():
    try:
      try:
        import unittest.mock as mock
      except ImportError:
        import mock

      println(MOCK_OUT_OF_DATE_MSG % mock.__version__)
    except ImportError:
      println(MOCK_UNAVAILABLE_MSG)

    if stem.util.system.is_available('pip'):
      println("You can get it by running 'sudo pip install mock'.")
    elif stem.util.system.is_available('apt-get'):
      println("You can get it by running 'sudo apt-get install python-mock'.")

    sys.exit(1)

  tor_version_check, pyflakes_task, pycodestyle_task = None, None, None

  if not args.specific_test:
    if stem.util.test_tools.is_pyflakes_available():
      pyflakes_task = PYFLAKES_TASK

    if stem.util.test_tools.is_pycodestyle_available():
      pycodestyle_task = PYCODESTYLE_TASK

  if args.run_integ:
    tor_version_check = Task('checking tor version', test.util.check_tor_version, (args.tor_path,))

  test.util.run_tasks(
    'INITIALISING',
    Task('checking stem version', test.util.check_stem_version),
    tor_version_check,
    Task('checking python version', test.util.check_python_version),
    Task('checking cryptography version', test.util.check_cryptography_version),
    Task('checking mock version', test.util.check_mock_version),
    Task('checking pyflakes version', test.util.check_pyflakes_version),
    Task('checking pycodestyle version', test.util.check_pycodestyle_version),
    Task('checking for orphaned .pyc files', test.util.clean_orphaned_pyc, (SRC_PATHS,)),
    Task('checking for unused tests', test.util.check_for_unused_tests, [(
      os.path.join(STEM_BASE, 'test', 'unit'),
      os.path.join(STEM_BASE, 'test', 'integ'),
    )]),
    pyflakes_task,
    pycodestyle_task,
  )

  # buffer that we log messages into so they can be printed after a test has finished

  logging_buffer = stem.util.log.LogBuffer(args.logging_runlevel)
  stem.util.log.get_logger().addHandler(logging_buffer)

  # filters for how testing output is displayed

  error_tracker = test.output.ErrorTracker()

  output_filters = (
    error_tracker.get_filter(),
    test.output.runtimes,
    test.output.strip_module,
    test.output.align_results,
    test.output.colorize,
  )

  # Number of tests that we have skipped. This is only available with python
  # 2.7 or later because before that test results didn't have a 'skipped'
  # attribute.

  skipped_tests = 0

  if args.run_unit:
    test.output.print_divider('UNIT TESTS', True)
    error_tracker.set_category('UNIT TEST')

    for test_class in test.util.get_unit_tests(args.specific_test):
      run_result = _run_test(args, test_class, output_filters, logging_buffer)
      skipped_tests += len(getattr(run_result, 'skipped', []))

    println()

  if args.run_integ:
    test.output.print_divider('INTEGRATION TESTS', True)
    integ_runner = test.runner.get_runner()

    # Determine targets we don't meet the prereqs for. Warnings are given about
    # these at the end of the test run so they're more noticeable.

    our_version = stem.version.get_system_tor_version(args.tor_path)
    skipped_targets = []
    integ_setup_thread = None

    if not args.specific_test or 'test.integ.installation'.startswith(args.specific_test):
      integ_setup_thread = test.integ.installation.setup()

    for target in args.run_targets:
      # check if we meet this target's tor version prerequisites

      target_prereq = test.util.get_prereq(target)

      if target_prereq and our_version < target_prereq:
        skipped_targets.append(target)
        continue

      error_tracker.set_category(target)

      try:
        integ_runner.start(target, args.attribute_targets, args.tor_path, extra_torrc_opts = test.util.get_torrc_entries(target))

        println('Running tests...\n', STATUS)

        owner = None
        if integ_runner.is_accessible():
          owner = integ_runner.get_tor_controller(True)  # controller to own our main Tor process

        for test_class in test.util.get_integ_tests(args.specific_test):
          run_result = _run_test(args, test_class, output_filters, logging_buffer)
          skipped_tests += len(getattr(run_result, 'skipped', []))

        if owner:
          owner.close()

        # We should have joined on all threads. If not then that indicates a
        # leak that could both likely be a bug and disrupt further targets.

        if integ_setup_thread:
          integ_setup_thread.join()

        active_threads = threading.enumerate()

        if len(active_threads) > 1:
          println('Threads lingering after test run:', ERROR)

          for lingering_thread in active_threads:
            println('  %s' % lingering_thread, ERROR)

          break
      except KeyboardInterrupt:
        println('  aborted starting tor: keyboard interrupt\n', ERROR)
        break
      except ValueError as exc:
        # can arise if get_torrc_entries() runs into a bad settings.cfg data

        println(str(exc), ERROR)
        break
      except OSError:
        error_tracker.register_error()
      finally:
        if integ_setup_thread:
          test.integ.installation.clean()

        println()
        integ_runner.stop()
        println()

    if skipped_targets:
      println()

      for target in skipped_targets:
        req_version = test.util.get_prereq(target)
        println('Unable to run target %s, this requires tor version %s' % (target, req_version), ERROR)

      println()

  static_check_issues = {}

  if pyflakes_task and pyflakes_task.is_successful:
    for path, issues in pyflakes_task.result.items():
      for issue in issues:
        static_check_issues.setdefault(path, []).append(issue)
  elif not stem.util.test_tools.is_pyflakes_available():
    println('Static error checking requires pyflakes version 0.7.3 or later. Please install it from ...\n  http://pypi.python.org/pypi/pyflakes\n', ERROR)

  if pycodestyle_task and pycodestyle_task.is_successful:
    for path, issues in pycodestyle_task.result.items():
      for issue in issues:
        static_check_issues.setdefault(path, []).append(issue)
  elif not stem.util.test_tools.is_pycodestyle_available():
    println('Style checks require pycodestyle version 1.4.2 or later. Please install it from...\n  http://pypi.python.org/pypi/pycodestyle\n', ERROR)

  _print_static_issues(static_check_issues)

  runtime_label = '(%i seconds)' % (time.time() - start_time)

  if error_tracker.has_errors_occured():
    println('TESTING FAILED %s' % runtime_label, ERROR, STDERR)

    for line in error_tracker:
      println('  %s' % line, ERROR, STDERR)

    error_modules = error_tracker.get_modules()

    if len(error_modules) < 10 and not args.specific_test:
      println('\nYou can re-run just these tests with:\n', ERROR, STDERR)

      for module in error_modules:
        println('  %s --test %s' % (' '.join(sys.argv), module), ERROR, STDERR)
  else:
    if skipped_tests > 0:
      println('%i TESTS WERE SKIPPED' % skipped_tests, STATUS)

    println('TESTING PASSED %s\n' % runtime_label, SUCCESS)

  new_capabilities = test.util.get_new_capabilities()

  if new_capabilities:
    println(NEW_CAPABILITIES_FOUND, ERROR)

    for capability_type, msg in new_capabilities:
      println('  [%s] %s' % (capability_type, msg), ERROR)

  sys.exit(1 if error_tracker.has_errors_occured() else 0)
示例#36
0
def _print_static_issues(args):
    static_check_issues = {}

    # If we're doing some sort of testing (unit or integ) and pyflakes is
    # available then use it. Its static checks are pretty quick so there's not
    # much overhead in including it with all tests.

    if args.run_unit or args.run_integ:
        if stem.util.system.is_available("pyflakes"):
            static_check_issues.update(
                test.util.get_pyflakes_issues(SRC_PATHS))
        else:
            println(
                "Static error checking requires pyflakes. Please install it from ...\n  http://pypi.python.org/pypi/pyflakes\n",
                ERROR)

    if args.run_style:
        if stem.util.system.is_available("pep8"):
            static_check_issues.update(
                test.util.get_stylistic_issues(SRC_PATHS))
        else:
            println(
                "Style checks require pep8. Please install it from...\n  http://pypi.python.org/pypi/pep8\n",
                ERROR)

    if static_check_issues:
        println("STATIC CHECKS", STATUS)

        for file_path in static_check_issues:
            println("* %s" % file_path, STATUS)

            for line_number, msg in static_check_issues[file_path]:
                line_count = "%-4s" % line_number
                println("  line %s - %s" % (line_count, msg))

            println()
示例#37
0
def main():
  start_time = time.time()

  try:
    stem.prereq.check_requirements()
  except ImportError, exc:
    println("%s\n" % exc)
    sys.exit(1)

  test_config = stem.util.conf.get_config("test")
  test_config.load(os.path.join(STEM_BASE, "test", "settings.cfg"))

  try:
    args = _get_args(sys.argv[1:])
  except getopt.GetoptError, exc:
    println("%s (for usage provide --help)" % exc)
    sys.exit(1)
  except ValueError, exc:
    println(str(exc))
    sys.exit(1)

  if args.print_help:
    println(test.util.get_help_message())
    sys.exit()
  elif not args.run_unit and not args.run_integ and not args.run_style:
    println("Nothing to run (for usage provide --help)\n")
    sys.exit()

  test.util.run_tasks(
    "INITIALISING",
    Task("checking stem version", test.util.check_stem_version),
示例#38
0
def main():
    start_time = time.time()

    try:
        stem.prereq.check_requirements()
    except ImportError, exc:
        println("%s\n" % exc)
        sys.exit(1)

    test_config = stem.util.conf.get_config("test")
    test_config.load(os.path.join(STEM_BASE, "test", "settings.cfg"))

    try:
        args = _get_args(sys.argv[1:])
    except getopt.GetoptError, exc:
        println("%s (for usage provide --help)" % exc)
        sys.exit(1)
    except ValueError, exc:
        println(str(exc))
        sys.exit(1)

    if args.print_help:
        println(test.util.get_help_message())
        sys.exit()
    elif not args.run_unit and not args.run_integ and not args.run_style:
        println("Nothing to run (for usage provide --help)\n")
        sys.exit()

    test.util.run_tasks(
        "INITIALISING",
        Task("checking stem version", test.util.check_stem_version),
示例#39
0
文件: run_tests.py 项目: soult/stem
def main():
  start_time = time.time()

  try:
    stem.prereq.check_requirements()
  except ImportError as exc:
    println("%s\n" % exc)
    sys.exit(1)

  test_config = stem.util.conf.get_config("test")
  test_config.load(os.path.join(STEM_BASE, "test", "settings.cfg"))

  try:
    args = _get_args(sys.argv[1:])
  except getopt.GetoptError as exc:
    println("%s (for usage provide --help)" % exc)
    sys.exit(1)
  except ValueError as exc:
    println(str(exc))
    sys.exit(1)

  if args.print_help:
    println(test.util.get_help_message())
    sys.exit()
  elif not args.run_unit and not args.run_integ and not args.run_style:
    println("Nothing to run (for usage provide --help)\n")
    sys.exit()

  if not stem.prereq.is_mock_available():
    try:
      import mock
      println(MOCK_OUT_OF_DATE_MSG % mock.__version__)
    except ImportError:
      println(MOCK_UNAVAILABLE_MSG)

    if stem.util.system.is_available('pip'):
      println("You can get it by running 'sudo pip install mock'.")
    elif stem.util.system.is_available('apt-get'):
      println("You can get it by running 'sudo apt-get install python-mock'.")

    sys.exit(1)

  test.util.run_tasks(
    "INITIALISING",
    Task("checking stem version", test.util.check_stem_version),
    Task("checking python version", test.util.check_python_version),
    Task("checking pycrypto version", test.util.check_pycrypto_version),
    Task("checking mock version", test.util.check_mock_version),
    Task("checking pyflakes version", test.util.check_pyflakes_version),
    Task("checking pep8 version", test.util.check_pep8_version),
    Task("checking for orphaned .pyc files", test.util.clean_orphaned_pyc, (SRC_PATHS,)),
    Task("checking for unused tests", test.util.check_for_unused_tests, ((os.path.join(STEM_BASE, 'test'),),)),
  )

  if args.run_python3 and sys.version_info[0] != 3:
    test.util.run_tasks(
      "EXPORTING TO PYTHON 3",
      Task("checking requirements", test.util.python3_prereq),
      Task("cleaning prior export", test.util.python3_clean, (not args.run_python3_clean,)),
      Task("exporting python 3 copy", test.util.python3_copy_stem),
      Task("running tests", test.util.python3_run_tests),
    )

    println("BUG: python3_run_tests() should have terminated our process", ERROR)
    sys.exit(1)

  # buffer that we log messages into so they can be printed after a test has finished

  logging_buffer = stem.util.log.LogBuffer(args.logging_runlevel)
  stem.util.log.get_logger().addHandler(logging_buffer)

  # filters for how testing output is displayed

  error_tracker = test.output.ErrorTracker()

  output_filters = (
    error_tracker.get_filter(),
    test.output.strip_module,
    test.output.align_results,
    test.output.colorize,
  )

  # Number of tests that we have skipped. This is only available with python
  # 2.7 or later because before that test results didn't have a 'skipped'
  # attribute.

  skipped_tests = 0

  if args.run_unit:
    test.output.print_divider("UNIT TESTS", True)
    error_tracker.set_category("UNIT TEST")

    for test_class in test.util.get_unit_tests(args.test_prefix):
      run_result = _run_test(test_class, output_filters, logging_buffer)
      skipped_tests += len(getattr(run_result, 'skipped', []))

    println()

  if args.run_integ:
    test.output.print_divider("INTEGRATION TESTS", True)
    integ_runner = test.runner.get_runner()

    # Determine targets we don't meet the prereqs for. Warnings are given about
    # these at the end of the test run so they're more noticeable.

    our_version = stem.version.get_system_tor_version(args.tor_path)
    skipped_targets = []

    for target in args.run_targets:
      # check if we meet this target's tor version prerequisites

      target_prereq = test.util.get_prereq(target)

      if target_prereq and our_version < target_prereq:
        skipped_targets.append(target)
        continue

      error_tracker.set_category(target)

      try:
        integ_runner.start(target, args.attribute_targets, args.tor_path, extra_torrc_opts = test.util.get_torrc_entries(target))

        println("Running tests...\n", STATUS)

        owner = None
        if integ_runner.is_accessible():
          owner = integ_runner.get_tor_controller(True)  # controller to own our main Tor process

        for test_class in test.util.get_integ_tests(args.test_prefix):
          run_result = _run_test(test_class, output_filters, logging_buffer)
          skipped_tests += len(getattr(run_result, 'skipped', []))

        if owner:
          owner.close()

        # We should have joined on all threads. If not then that indicates a
        # leak that could both likely be a bug and disrupt further targets.

        active_threads = threading.enumerate()

        if len(active_threads) > 1:
          println("Threads lingering after test run:", ERROR)

          for lingering_thread in active_threads:
            println("  %s" % lingering_thread, ERROR)

          error_tracker.register_error()
          break
      except KeyboardInterrupt:
        println("  aborted starting tor: keyboard interrupt\n", ERROR)
        break
      except ValueError as exc:
        # can arise if get_torrc_entries() runs into a bad settings.cfg data

        println(exc, ERROR)
        break
      except OSError:
        error_tracker.register_error()
      finally:
        integ_runner.stop()

    if skipped_targets:
      println()

      for target in skipped_targets:
        req_version = test.util.get_prereq(target)
        println("Unable to run target %s, this requires tor version %s" % (target, req_version), ERROR)

      println()

  if not stem.prereq.is_python_3():
    _print_static_issues(args)

  runtime_label = "(%i seconds)" % (time.time() - start_time)

  if error_tracker.has_errors_occured():
    println("TESTING FAILED %s" % runtime_label, ERROR)

    for line in error_tracker:
      println("  %s" % line, ERROR)
  else:
    if skipped_tests > 0:
      println("%i TESTS WERE SKIPPED" % skipped_tests, STATUS)

    println("TESTING PASSED %s\n" % runtime_label, SUCCESS)

  sys.exit(1 if error_tracker.has_errors_occured() else 0)
示例#40
0
文件: runner.py 项目: sammyshj/stem
 def print_init_line(line):
   println('  %s' % line, SUBSTATUS)
示例#41
0
        println("skipped", STATUS)
      else:
        os.makedirs(self._test_dir)
        println("done", STATUS)
    except OSError, exc:
      test.output.print_error("failed (%s)" % exc)
      raise exc

    # Tor checks during startup that the directory a control socket resides in
    # is only accessible by the tor user (and refuses to finish starting if it
    # isn't).

    if Torrc.SOCKET in self._custom_opts:
      try:
        socket_dir = os.path.dirname(CONTROL_SOCKET_PATH)
        println("  making control socket directory (%s)... " % socket_dir, STATUS, NO_NL)

        if os.path.exists(socket_dir) and stat.S_IMODE(os.stat(socket_dir).st_mode) == 0700:
          println("skipped", STATUS)
        else:
          if not os.path.exists(socket_dir):
            os.makedirs(socket_dir)

          os.chmod(socket_dir, 0700)
          println("done", STATUS)
      except OSError, exc:
        test.output.print_error("failed (%s)" % exc)
        raise exc

    # configures logging
    logging_path = CONFIG["integ.log"]
示例#42
0
def main():
    start_time = time.time()

    major_version, minor_version = sys.version_info[0:2]

    if major_version < 3 or (major_version == 3 and minor_version < 6):
        println('stem requires python version 3.6 or greater\n')
        sys.exit(1)

    signal.signal(signal.SIGABRT, log_traceback)
    signal.signal(signal.SIGUSR1, log_traceback)

    test_config = stem.util.conf.get_config('test')
    test_config.load(os.path.join(test.STEM_BASE, 'test', 'settings.cfg'))

    if 'STEM_TEST_CONFIG' in os.environ:
        test_config.load(os.environ['STEM_TEST_CONFIG'])

    try:
        args = test.arguments.parse(sys.argv[1:])
        test.task.TOR_VERSION.args = (args.tor_path, )
        test.output.SUPPRESS_STDOUT = args.quiet
    except ValueError as exc:
        println(str(exc))
        sys.exit(1)

    if args.print_help:
        println(test.arguments.get_help())
        sys.exit()
    elif not args.run_unit and not args.run_integ:
        println('Nothing to run (for usage provide --help)\n')
        sys.exit()

    test.task.run(
        'INITIALISING',
        test.task.STEM_VERSION,
        test.task.TOR_VERSION if args.run_integ else None,
        test.task.PYTHON_VERSION,
        test.task.PLATFORM_VERSION,
        test.task.CRYPTO_VERSION,
        test.task.PYFLAKES_VERSION,
        test.task.PYCODESTYLE_VERSION,
        test.task.CLEAN_PYC,
        test.task.UNUSED_TESTS,
        test.task.IMPORT_TESTS,
        test.task.REMOVE_TOR_DATA_DIR if args.run_integ else None,
        test.task.PYFLAKES_TASK if not args.specific_test else None,
        test.task.PYCODESTYLE_TASK if not args.specific_test else None,
    )

    # Test logging. If '--log-file' is provided we log to that location,
    # otherwise we buffer messages and log to stdout after its test completes.

    logging_buffer = queue.Queue()

    if args.logging_runlevel:
        if args.logging_path:
            handler = logging.FileHandler(args.logging_path, mode='w')
            handler.setLevel(stem.util.log.logging_level(
                args.logging_runlevel))
            handler.setFormatter(stem.util.log.FORMATTER)
        else:
            handler = logging.handlers.QueueHandler(logging_buffer)
            handler.setLevel(stem.util.log.logging_level(
                args.logging_runlevel))

        stem.util.log.get_logger().addHandler(handler)

    # filters for how testing output is displayed

    error_tracker = test.output.ErrorTracker()

    output_filters = (
        error_tracker.get_filter(),
        test.output.runtimes,
        test.output.strip_module,
        test.output.align_results,
        test.output.colorize,
    )

    # Number of tests that we have skipped. This is only available with python
    # 2.7 or later because before that test results didn't have a 'skipped'
    # attribute.

    skipped_tests = 0

    if args.run_integ:
        default_test_dir = stem.util.system.expand_path(
            CONFIG['integ.test_directory'], test.STEM_BASE)
        async_args = test.AsyncTestArgs(default_test_dir, args.tor_path)

        for module_str in stem.util.test_tools.ASYNC_TESTS:
            module = importlib.import_module(module_str.rsplit('.', 1)[0])
            test_classes = [
                v for k, v in module.__dict__.items() if k.startswith('Test')
            ]

            if len(test_classes) != 1:
                print('BUG: Detected multiple tests for %s: %s' %
                      (module_str, ', '.join(test_classes)))
                sys.exit(1)

            test_classes[0].run_tests(async_args)

    if args.run_unit:
        test.output.print_divider('UNIT TESTS', True)
        error_tracker.set_category('UNIT TEST')

        for test_class in get_unit_tests(args.specific_test,
                                         args.exclude_test):
            run_result = _run_test(args, test_class, args.exclude_test,
                                   output_filters)
            test.output.print_logging(logging_buffer)
            skipped_tests += len(getattr(run_result, 'skipped', []))

        println()

    if args.run_integ:
        test.output.print_divider('INTEGRATION TESTS', True)
        integ_runner = test.runner.get_runner()

        for target in args.run_targets:
            error_tracker.set_category(target)

            try:
                integ_runner.start(target, args.attribute_targets,
                                   args.tor_path)

                println('Running tests...\n', STATUS)

                for test_class in get_integ_tests(args.specific_test,
                                                  args.exclude_test):
                    run_result = _run_test(args, test_class, args.exclude_test,
                                           output_filters)
                    test.output.print_logging(logging_buffer)
                    skipped_tests += len(getattr(run_result, 'skipped', []))

                    if not integ_runner.assert_tor_is_running():
                        # our tor process died

                        error_tracker.register_error()
                        break
            except KeyboardInterrupt:
                println('  aborted starting tor: keyboard interrupt\n', ERROR)
                break
            except ValueError as exc:
                println(str(exc),
                        ERROR)  # can arise if there's bad settings.cfg data
                break
            except OSError:
                error_tracker.register_error()
            finally:
                println()
                integ_runner.stop()
                println()

                # We should have joined on all threads. If not then that indicates a
                # leak that could both likely be a bug and disrupt further targets.

                active_threads = threading.enumerate()

                if len(active_threads) > 1:
                    println('Threads lingering after test run:', ERROR)

                    for lingering_thread in active_threads:
                        println('  %s' % lingering_thread, ERROR)

                    break

    static_check_issues = {}

    for task in (test.task.PYFLAKES_TASK, test.task.PYCODESTYLE_TASK):
        if not task.is_available and task.unavailable_msg:
            println(task.unavailable_msg, ERROR)
        else:
            task.join()  # no-op if these haven't been run

            if task.result:
                for path, issues in task.result.items():
                    for issue in issues:
                        static_check_issues.setdefault(path, []).append(issue)

    _print_static_issues(static_check_issues)

    if error_tracker.has_errors_occured():
        println('TESTING FAILED (%i seconds)' % (time.time() - start_time),
                ERROR, STDERR)

        for line in error_tracker:
            println('  %s' % line, ERROR, STDERR)

        error_modules = error_tracker.get_modules()

        if len(error_modules) < 10 and not args.specific_test:
            println('\nYou can re-run just these tests with:\n', ERROR, STDERR)

            for module in error_modules:
                println(
                    '  %s --test %s' % (' '.join(
                        sys.argv), test.arguments.crop_module_name(module)),
                    ERROR, STDERR)
    else:
        if skipped_tests > 0:
            println('%i TESTS WERE SKIPPED' % skipped_tests, STATUS)

        println('TESTING PASSED (%i seconds)\n' % (time.time() - start_time),
                SUCCESS)

    new_capabilities = test.get_new_capabilities()

    if new_capabilities:
        println(NEW_CAPABILITIES_FOUND, ERROR)

        for capability_type, msg in sorted(new_capabilities,
                                           key=lambda x: x[1]):
            println('  [%s] %s' % (capability_type, msg), ERROR)

    sys.exit(1 if error_tracker.has_errors_occured() else 0)
示例#43
0
 def print_init_line(line):
     println('  %s' % line, SUBSTATUS)
示例#44
0
文件: runner.py 项目: jt-wang/stem
    def start(self, run_target, attribute_targets, tor_cmd, extra_torrc_opts):
        """
    Makes temporary testing resources and starts tor, blocking until it
    completes.

    :param Target run_target: configuration we're running with
    :param list attribute_targets: **Targets** for our non-configuration attributes
    :param str tor_cmd: command to start tor with
    :param list extra_torrc_opts: additional torrc options for our test instance

    :raises: OSError if unable to run test preparations or start tor
    """

        with self._runner_lock:
            self.run_target = run_target
            self.attribute_targets = attribute_targets

            # if we're holding on to a tor process (running or not) then clean up after
            # it so we can start a fresh instance

            if self._tor_process:
                self.stop()

            println("Setting up a test instance...", STATUS)

            # if 'test_directory' is unset then we make a new data directory in /tmp
            # and clean it up when we're done

            config_test_dir = CONFIG["integ.test_directory"]

            if config_test_dir:
                self._test_dir = stem.util.system.expand_path(
                    config_test_dir, STEM_BASE)
            else:
                self._test_dir = tempfile.mktemp("-stem-integ")

            original_cwd, data_dir_path = os.getcwd(), self._test_dir

            if Target.RELATIVE in self.attribute_targets:
                tor_cwd = os.path.dirname(self._test_dir)

                if not os.path.exists(tor_cwd):
                    os.makedirs(tor_cwd)

                os.chdir(tor_cwd)
                data_dir_path = "./%s" % os.path.basename(self._test_dir)

            self._tor_cmd = tor_cmd
            self._custom_opts = extra_torrc_opts
            self._torrc_contents = BASE_TORRC % data_dir_path

            if extra_torrc_opts:
                self._torrc_contents += "\n".join(extra_torrc_opts) + "\n"

            try:
                self._tor_cwd = os.getcwd()
                self._run_setup()
                self._start_tor(tor_cmd)

                # strip the testing directory from recv_message responses if we're
                # simulating a chroot setup

                if Target.CHROOT in self.attribute_targets and not self._original_recv_message:
                    # TODO: when we have a function for telling stem the chroot we'll
                    # need to set that too

                    self._original_recv_message = stem.socket.recv_message
                    self._chroot_path = data_dir_path

                    def _chroot_recv_message(control_file):
                        return self._original_recv_message(
                            _MockChrootFile(control_file, data_dir_path))

                    stem.socket.recv_message = _chroot_recv_message

                # revert our cwd back to normal
                if Target.RELATIVE in self.attribute_targets:
                    os.chdir(original_cwd)
            except OSError as exc:
                raise exc
示例#45
0
def _run_test(args, test_class, output_filters, logging_buffer):
  start_time = time.time()

  if args.verbose:
    test.output.print_divider(test_class)
  else:
    # Test classes look like...
    #
    #   test.unit.util.conf.TestConf.test_parse_enum_csv
    #
    # We want to strip the 'test.unit.' or 'test.integ.' prefix since it's
    # redundant. We also want to drop the test class name. The individual test
    # name at the end it optional (only present if we used the '--test'
    # argument).

    label_comp = test_class.split('.')[2:]
    del label_comp[-1 if label_comp[-1][0].isupper() else -2]
    label = '.'.join(label_comp)

    label = '  %s...' % label
    label = '%-54s' % label

    println(label, STATUS, NO_NL)

  try:
    suite = unittest.TestLoader().loadTestsFromName(test_class)
  except AttributeError:
    # should only come up if user provided '--test' for something that doesn't exist
    println(' no such test', ERROR)
    return None
  except Exception as exc:
    println(' failed', ERROR)
    traceback.print_exc(exc)
    return None

  test_results = StringIO()
  run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)

  if args.verbose:
    println(test.output.apply_filters(test_results.getvalue(), *output_filters))
  elif not run_result.failures and not run_result.errors:
    println(' success (%0.2fs)' % (time.time() - start_time), SUCCESS)
  else:
    if args.quiet:
      println(label, STATUS, NO_NL, STDERR)
      println(' failed (%0.2fs)' % (time.time() - start_time), ERROR, STDERR)
      println(test.output.apply_filters(test_results.getvalue(), *output_filters), STDERR)
    else:
      println(' failed (%0.2fs)' % (time.time() - start_time), ERROR)
      println(test.output.apply_filters(test_results.getvalue(), *output_filters), NO_NL)

  test.output.print_logging(logging_buffer)

  return run_result
示例#46
0
文件: runner.py 项目: jt-wang/stem
    def _run_setup(self):
        """
    Makes a temporary runtime resources of our integration test instance.

    :raises: OSError if unsuccessful
    """

        # makes a temporary data directory if needed
        try:
            println("  making test directory (%s)... " % self._test_dir,
                    STATUS, NO_NL)

            if os.path.exists(self._test_dir):
                println("skipped", STATUS)
            else:
                os.makedirs(self._test_dir)
                println("done", STATUS)
        except OSError as exc:
            println("failed (%s)" % exc, ERROR)
            raise exc

        # Tor checks during startup that the directory a control socket resides in
        # is only accessible by the tor user (and refuses to finish starting if it
        # isn't).

        if Torrc.SOCKET in self._custom_opts:
            try:
                socket_dir = os.path.dirname(CONTROL_SOCKET_PATH)
                println(
                    "  making control socket directory (%s)... " % socket_dir,
                    STATUS, NO_NL)

                if os.path.exists(socket_dir) and stat.S_IMODE(
                        os.stat(socket_dir).st_mode) == 0700:
                    println("skipped", STATUS)
                else:
                    if not os.path.exists(socket_dir):
                        os.makedirs(socket_dir)

                    os.chmod(socket_dir, 0700)
                    println("done", STATUS)
            except OSError as exc:
                println("failed (%s)" % exc, ERROR)
                raise exc

        # configures logging
        logging_path = CONFIG["integ.log"]

        if logging_path:
            logging_path = stem.util.system.expand_path(
                logging_path, STEM_BASE)
            println("  configuring logger (%s)... " % logging_path, STATUS,
                    NO_NL)

            # delete the old log
            if os.path.exists(logging_path):
                os.remove(logging_path)

            logging.basicConfig(
                filename=logging_path,
                level=logging.DEBUG,
                format='%(asctime)s [%(levelname)s] %(message)s',
                datefmt='%D %H:%M:%S',
            )

            println("done", STATUS)
        else:
            println("  configuring logger... skipped", STATUS)

        # writes our testing torrc
        torrc_dst = os.path.join(self._test_dir, "torrc")
        try:
            println("  writing torrc (%s)... " % torrc_dst, STATUS, NO_NL)

            torrc_file = open(torrc_dst, "w")
            torrc_file.write(self._torrc_contents)
            torrc_file.close()

            println("done", STATUS)

            for line in self._torrc_contents.strip().splitlines():
                println("    %s" % line.strip(), SUBSTATUS)

            println()
        except Exception as exc:
            println("failed (%s)\n" % exc, ERROR)
            raise OSError(exc)
示例#47
0
def _print_static_issues(args):
  static_check_issues = {}

  # If we're doing some sort of testing (unit or integ) and pyflakes is
  # available then use it. Its static checks are pretty quick so there's not
  # much overhead in including it with all tests.

  if args.run_unit or args.run_integ:
    if stem.util.system.is_available("pyflakes"):
      static_check_issues.update(test.util.get_pyflakes_issues(SRC_PATHS))
    else:
      println("Static error checking requires pyflakes. Please install it from ...\n  http://pypi.python.org/pypi/pyflakes\n", ERROR)

  if args.run_style:
    if stem.util.system.is_available("pep8"):
      static_check_issues.update(test.util.get_stylistic_issues(SRC_PATHS))
    else:
      println("Style checks require pep8. Please install it from...\n  http://pypi.python.org/pypi/pep8\n", ERROR)

  if static_check_issues:
    println("STATIC CHECKS", STATUS)

    for file_path in static_check_issues:
      println("* %s" % file_path, STATUS)

      for line_number, msg in static_check_issues[file_path]:
        line_count = "%-4s" % line_number
        println("  line %s - %s" % (line_count, msg))

      println()