示例#1
0
class TestCaseTest(TestCase):
    def setUp(self):
        self.result = TestResult()
        self.test = WasRun("testMethod")
        self.suite = TestSuite()

    def testRunning(self):
        self.test.run(self.result)
        assert (self.test.wasRun)

    def testSetUp(self):
        self.test.run(self.result)
        assert (self.test.wasSetUp)

    def testTemplateMethod(self):
        self.test.run(self.result)
        assert ("setUp testMethod tearDown" == self.test.log)

    def testResult(self):
        self.test.run(self.result)
        assert ("1 run, 0 failed" == self.result.summary())

    def testFailedResult(self):
        test = WasRun("testBrokenMethod")
        test.run(self.result)
        assert ("1 run, 1 failed" == self.result.summary())

    def testSuite(self):
        self.suite.add(WasRun("testMethod"))
        self.suite.add(WasRun("testBrokenMethod"))
        self.suite.run(self.result)
        assert ("2 run, 1 failed" == self.result.summary())
示例#2
0
    def __enter_class_context_managers(self, fixture_methods, callback):
        """Transform each fixture_method into a context manager with contextlib.contextmanager, enter them recursively, and call callback"""
        if fixture_methods:
            fixture_method = fixture_methods[0]
            ctm = contextmanager(fixture_method)()

            enter_result = TestResult(fixture_method)
            enter_result.start()
            self.fire_event(self.EVENT_ON_RUN_CLASS_SETUP_METHOD, enter_result)
            if self.__execute_block_recording_exceptions(ctm.__enter__, enter_result, is_class_level=True):
                enter_result.end_in_success()
            self.fire_event(self.EVENT_ON_COMPLETE_CLASS_SETUP_METHOD, enter_result)

            self.__enter_context_managers(fixture_methods[1:], callback)

            exit_result = TestResult(fixture_method)
            exit_result.start()
            self.fire_event(self.EVENT_ON_RUN_CLASS_TEARDOWN_METHOD, exit_result)
            if self.__execute_block_recording_exceptions(
                lambda: ctm.__exit__(None, None, None), exit_result, is_class_level=True
            ):
                exit_result.end_in_success()
            self.fire_event(self.EVENT_ON_COMPLETE_CLASS_TEARDOWN_METHOD, exit_result)
        else:
            callback()
示例#3
0
    def run(self):
        """Delegator method encapsulating the flow for executing a TestCase instance.

        This method tracks its progress in a TestResult with test_method 'run'.
        This TestResult is used as a signal when running in client/server mode:
        when the client is done running a TestCase and its fixtures, it sends
        this TestResult to the server during the EVENT_ON_COMPLETE_TEST_CASE
        phase.

        This could be handled better. See
        https://github.com/Yelp/Testify/issues/121.
        """

        # The TestResult constructor wants an actual method, which it inspects
        # to determine the method name (and class name, so it must be a method
        # and not a function!). self.run is as good a method as any.
        test_case_result = TestResult(self.run)
        test_case_result.start()
        self.fire_event(self.EVENT_ON_RUN_TEST_CASE, test_case_result)

        self.__run_class_setup_fixtures()
        self.__enter_class_context_managers(self.class_setup_teardown_fixtures, self.__run_test_methods)
        self.__run_class_teardown_fixtures()

        test_case_result.end_in_success()
        self.fire_event(self.EVENT_ON_COMPLETE_TEST_CASE, test_case_result)
class TestCaseTest(TestCase):

  def setUp(self):
    self.result = TestResult()

  def testTemplateMethod(self):
    test = WasRun("testMethod")
    test.run(self.result)
#    assert("setUp " == self.test.log)
    assert("setUp testMethod tearDown" == test.log)

  def testResult(self):
    test   = WasRun("testMethod")
    test.run(self.result)
    assert("1 run, 0 failed" == self.result.summary() )

  def testFailedResult(self):
    test   = WasRun("testBrokenMethod")
    test.run(self.result)
    assert("1 run, 1 failed" == self.result.summary() )

  def testFailedResultFormatting(self):
    result.testStarted()
    result.testFailed()
    assert("1 run, 1 failed" == self.result.summary() )

  def testSuite(self):
    suite = TestSuite()
    suite.add(WasRun("testMethod"))
    suite.add(WasRun("testBrokenMethod"))

    suite.run(self.result)
    assert("2 run, 1 failed" == self.result.summary() )
示例#5
0
 def post_result(self, url, user, psswd):
     print "Posting result to  %s"%url
     conf = {'result' : int(self.result),
             'message' : self.summary,
             'action'  : self.project_file,
             'test_id' : int(self.test_id) }
     tr = TestResult(conf)
     return tr.post(url, user, psswd)
 def post_result(self, url, user, psswd):
     self.logger.debug( "Posting result to  %s"%url)
     conf = {'result' : int(self.result),
             'message' : self.summary,
             'action'  : self.script_name,
             'test_id' : int(self.test_id) }
     tr = TestResult(conf)
     return tr.post(url, user, psswd)
示例#7
0
def run( fs ):

	mountingTestResult = TestResult()
	mountingTestResult.set_total_points(1)
	passedTest = True

	print("Validating that {} support is disabled...".format(fs))

	#In order to run the tests, a try catch block is set up to ensure the needed commands
	#are available on the system.

	try:

		#Input:
		#>>> modprobe -n -v `fs`
		#Expected output:
		#>>> install /bin/true

		fsTest1 = subprocess.check_output(('modprobe', '-n', '-v', fs))
		if "install /bin/true" not in fsTest1:
			report.report("(X)...Support for mounting {} is not disabled.".format(fs))
			passedTest = False

		#Input:
		#>>> lsmod | grep `fs`
		#Expected output:
		#<NONE>

		fsTest2 = subprocess.Popen(('lsmod'), stdout=subprocess.PIPE)

		#With grep piping, a try catch block is needed to guarantee that if the grep
		#returns no results, the process will not fail.
		try:
			fsTest2Output = subprocess.check_output(('grep', fs), stdin=fsTest2.stdout)
			passedTest = False
			print("(X) ... A module exists in /proc/modules for {}.")
		except subprocess.CalledProcessError as e:
			if str(e) != "Command '('grep', '{}')' returned non-zero exit status 1".format(fs):
				passedTest = False
			
	except OSError as e:                    #Catch if any of our commands fail
		report.error("(!)...Tools do not support running a scan for {}\n".format(fs))
		mountingTestResult.set_error(True)
		mountingTestResult.set_error_status("      {}".format(e))
		return mountingTestResult

	#If passedTest has been set by any of the checks, the test fails
	if passedTest == True:
		report.report("......Passed!")
		mountingTestResult.set_points(1)
	else:
		report.mitigation("      Mitigation: run install {} /bin/true".format(fs))
		report.report("......Failed!")

	
	#Send up the result
	return mountingTestResult
 def post_result(self, url, user, psswd):
     self.logger.debug("Posting result to  %s" % url)
     conf = {
         'result': int(self.result),
         'message': self.summary,
         'action': self.script_name,
         'test_id': int(self.test_id)
     }
     tr = TestResult(conf)
     return tr.post(url, user, psswd)
示例#9
0
def run( d , dname, full ):

	partitionTestResult = TestResult()
	if full == True:
		partitionTestResult.set_total_points(4)
	else:
		partitionTestResult.set_total_points(1)
	partitionScore = 0
	print("Validating that {} has a separate partition...".format(d))
	try:

		#Input:
		#>>> mount | grep `d`
		#Expected output:
		#>>> tmpfs on `d` type tmpfs (rw,nosuid,nodev,noexec,relatime)

		fsTest1 = subprocess.Popen(('mount'), stdout=subprocess.PIPE)
		try:
			fstTest1Output = subprocess.check_output(('grep', d), stdin=fsTest1.stdout)
			partitionScore += 1
			print("......Passed!")
			if full == True:
				partitionScore += output_verification(fstTest1Output, d, dname)
			print partitionScore
		except subprocess.CalledProcessError as e:
			report.report("(X)...{} does not exist in a separate partition.".format(d))
			mit(d, dname)
			

	except OSError:
		report.report("(!)...Tools do not support the use of the mount command.".format(fs))


	partitionTestResult.set_points(partitionScore)
	return partitionTestResult
示例#10
0
    def run(self):
        """Delegator method encapsulating the flow for executing a TestCase instance.

        This method tracks its progress in a TestResult with test_method 'run'.
        This TestResult is used as a signal when running in client/server mode:
        when the client is done running a TestCase and its fixtures, it sends
        this TestResult to the server during the EVENT_ON_COMPLETE_TEST_CASE
        phase.

        This could be handled better. See
        https://github.com/Yelp/Testify/issues/121.
        """

        # The TestResult constructor wants an actual method, which it inspects
        # to determine the method name (and class name, so it must be a method
        # and not a function!). self.run is as good a method as any.
        test_case_result = TestResult(self.run)
        test_case_result.start()
        self.fire_event(self.EVENT_ON_RUN_TEST_CASE, test_case_result)

        self._stage = self.STAGE_CLASS_SETUP
        with self.__test_fixtures.class_context(
                setup_callbacks=[
                    functools.partial(self.fire_event, self.EVENT_ON_RUN_CLASS_SETUP_METHOD),
                    functools.partial(self.fire_event, self.EVENT_ON_COMPLETE_CLASS_SETUP_METHOD),
                ],
                teardown_callbacks=[
                    functools.partial(self.fire_event, self.EVENT_ON_RUN_CLASS_TEARDOWN_METHOD),
                    functools.partial(self.fire_event, self.EVENT_ON_COMPLETE_CLASS_TEARDOWN_METHOD),
                ],
        ) as class_fixture_failures:
            # if we have class fixture failures, we're not going to bother
            # running tests, but we need to generate bogus results for them all
            # and mark them as failed.
            self.__run_test_methods(class_fixture_failures)
            self._stage = self.STAGE_CLASS_TEARDOWN

        # class fixture failures count towards our total
        self.failure_count += len(class_fixture_failures)

        # you might think that we would want to do this... but this is a
        # bogus test result used for reporting to the server. we always
        # have it report success, i guess.
        # for exc_info in fixture_failures:
        #     test_case_result.end_in_failure(exc_info)

        if not test_case_result.complete:
            test_case_result.end_in_success()

        self.fire_event(self.EVENT_ON_COMPLETE_TEST_CASE, test_case_result)
示例#11
0
    def __run_class_fixture(self,
                            fixture_method,
                            function_to_call,
                            stage,
                            callback_on_run_event,
                            callback_on_complete_event,
                            fire_events=True):
        self._stage = stage

        result = TestResult(fixture_method)
        try:
            result.start()
            if fire_events:
                self.fire_event(callback_on_run_event, result)
            if self.__execute_block_recording_exceptions(function_to_call,
                                                         result,
                                                         is_class_level=True):
                result.end_in_success()
            else:
                self.failure_count += 1
        except (KeyboardInterrupt, SystemExit):
            result.end_in_interruption(sys.exc_info())
            raise
        finally:
            if fire_events:
                self.fire_event(callback_on_complete_event, result)
示例#12
0
    def run(self):
        """ 
        Runs all tests against each data point in test data set
         """
        algorithms = self.algorithms
        data_set = self.data
        data_set_test_results = []

        for data_point_raw in data_set:
            data_point = data_point_raw.copy()
            test_name = data_point.get("name")
            del data_point["name"]
            test_results = []
            for algorithm in algorithms:
                algorithm_result = algorithm.use(**data_point)
                test_results.append(algorithm_result)

            test_result = TestResult(test_name, algorithms, test_results, data_point)
            data_set_test_results.append(test_result)

        self.set_results(data_set_test_results)
        results = self.get_results()

        if all(result.get_status() == "pass" for result in data_set_test_results):
            self.set_all_passed(True)
        else:
            self.print_results()

        return data_set_test_results
示例#13
0
    def __run_class_setup_fixtures(self):
        """Running the class's class_setup method chain."""
        self._stage = self.STAGE_CLASS_SETUP

        for fixture_method in self.class_setup_fixtures:
            result = TestResult(fixture_method)

            try:
                for callback in self.__on_run_test_method_callbacks:
                    callback(self, fixture_method)

                result.start()

                if self.__execute_block_recording_exceptions(fixture_method, result, is_class_level=True):
                    result.end_in_success()
            except (KeyboardInterrupt, SystemExit):
                result.end_in_incomplete(sys.exc_info())
                for callback in self.__on_complete_test_method_callbacks:
                    callback(self, result)
                raise
            else:
                for callback in self.__on_complete_test_method_callbacks:
                    callback(self, result)

        self.__run_deprecated_fixture_method('classSetUp')
  def test_test_timings_trie(self):
    individual_test_timings = []
    individual_test_timings.append(
        [TestResult('foo/bar/baz.html', status='SUCCESS', elapsed_time=1.2)])
    individual_test_timings.append(
        [TestResult('bar.html', status='SUCCESS', elapsed_time=0.0001)])
    trie = generate_test_timings_trie(individual_test_timings)

    expected_trie = {
      'bar.html': 0,
      'foo': {
        'bar': {
          'baz.html': 1200,
        }
      }
    }

    self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
示例#15
0
class TestCaseTest(TestCase):
    def setUp(self):
        self.result = TestResult()

    def testTemplateMethod(self):
        test = WasRun("testMethod")
        test.run(self.result)
        assert ("setUp testMethod tearDown " == test.log)

    def testResult(self):
        test = WasRun("testMethod")
        test.run(self.result)
        assert ("1 run, 0 failed" == self.result.summary())

    def testFailedResult(self):
        test = WasRun("testBrokenMethod")
        test.run(self.result)
        assert ("1 run, 1 failed" == self.result.summary())

    def testFailedResultFormatting(self):
        self.result.testStarted()
        self.result.testFailed()
        assert ("1 run, 1 failed" == self.result.summary())

    def setupFailed(self):
        test = BrokenSetup("testMethod")
        try:
            test.run(self.result)
            assert False
        except:
            pass

    def testSuiteContainsFailingSetup(self):
        suite = TestSuite()
        suite.add(BrokenSetup("testMethod"))
        suite.run(self.result)
        assert ("1 run, 1 failed" == self.result.summary())

    def testSuite(self):
        suite = TestSuite()
        suite.add(WasRun("testMethod"))
        suite.add(WasRun("testBrokenMethod"))
        suite.run(self.result)
        assert ("2 run, 1 failed" == self.result.summary())

    def tearDownIfFailed(self):
        test = WasRun("testBrokenMethod")
        test.run(self.result)
    def run(self, duration=3):
        """ 
        Runs all the tests passed in at creation
         """
        M = self.max_value
        n = self.data_size
        algorithms = self.algorithms

        start_time = time.time()
        end_time = start_time + duration
        iterations = 0
        print(f"Running stress test iterations for {duration} seconds ...")

        while True:
            # ensure timed
            now = time.time()
            iterations += 1
            if now >= end_time:
                print(f"\n------ ALL ITERATIONS PASSED ------")
                print(
                    f"------ ELAPSED TIME - {round(now - start_time, 2)} SECONDS ------"
                )
                print(f"------ ITERATIONS - {iterations} ------\n")
                break

            # build test data
            test_data = self.build_data()
            test_name = f"Iteration_{iterations}"

            # get test results
            test_results = []
            for algorithm in algorithms:
                algorithm_result = algorithm.use(**test_data)
                test_results.append(algorithm_result)

            result = TestResult(test_name, algorithms, test_results, test_data)

            if result.get_status() == "fail":
                self.print_fail(result)
                break
示例#17
0
def partition(d, dname):

    partitionTestResult = TestResult()
    partitionTestResult.set_total_points(1)
    partitionScore = 0
    print("Validating that {} has a separate partition...".format(d))
    try:
        fsTest1 = subprocess.Popen(('mount'), stdout=subprocess.PIPE)
        try:
            fstTest1Output = subprocess.check_output(('grep', d),
                                                     stdin=fsTest1.stdout)
            partitionScore += 1
            print("......Passed!")
        except subprocess.CalledProcessError as e:
            report.report(
                "(X)...{} does not exist in a separate partition.".format(d))
            report.mitigation(
                "      Mitigation: run systemctl unmask {}.mount".format(
                    dname))
            report.mitigation(
                "                      systemctl enable {}.mount".format(
                    dname))
            print("......Failed!")

    except OSError:
        report.report(
            "(!)...Tools do not support the use of the mount command.".format(
                fs))

    partitionTestResult.set_points(partitionScore)
    return partitionTestResult
示例#18
0
    def run(self):
        """Delegator method encapsulating the flow for executing a TestCase instance.

        This method tracks its progress in a TestResult with test_method 'run'.
        This TestResult is used as a signal when running in client/server mode:
        when the client is done running a TestCase and its fixtures, it sends
        this TestResult to the server during the EVENT_ON_COMPLETE_TEST_CASE
        phase.

        This could be handled better. See
        https://github.com/Yelp/Testify/issues/121.
        """

        # The TestResult constructor wants an actual method, which it inspects
        # to determine the method name (and class name, so it must be a method
        # and not a function!). self.run is as good a method as any.
        test_case_result = TestResult(self.run)
        test_case_result.start()
        self.fire_event(self.EVENT_ON_RUN_TEST_CASE, test_case_result)

        fixtures = []
        all_class_fixtures = self.class_setup_fixtures + self.class_setup_teardown_fixtures + self.class_teardown_fixtures
        for fixture in sorted(all_class_fixtures, key=make_sortable_fixture_key):
            # We convert all class-level fixtures to
            # class_setup_teardown fixtures a) to handle all
            # class-level fixtures the same and b) to make the
            # behavior more predictable when a TestCase has different
            # fixtures interacting.
            if fixture._fixture_type == 'class_teardown':
                fixture = self.__convert_class_teardown_to_class_setup_teardown(fixture)
            elif fixture._fixture_type == 'class_setup':
                fixture = self.__convert_class_setup_to_class_setup_teardown(fixture)
            fixtures.append(fixture)

        self.__enter_class_context_managers(fixtures, self.__run_test_methods)

        test_case_result.end_in_success()
        self.fire_event(self.EVENT_ON_COMPLETE_TEST_CASE, test_case_result)
示例#19
0
    def __run_class_fixtures(self, stage, fixtures, callback_on_run_event,
                             callback_on_complete_event):
        """Set the current _stage, run a set of fixtures, calling callbacks before and after each."""
        self._stage = stage

        for fixture_method in fixtures:
            result = TestResult(fixture_method)

            try:
                for callback in self.__callbacks[callback_on_run_event]:
                    callback(result.to_dict())

                result.start()

                if self.__execute_block_recording_exceptions(
                        fixture_method, result, is_class_level=True):
                    result.end_in_success()
            except (KeyboardInterrupt, SystemExit):
                result.end_in_interruption(sys.exc_info())
                raise
            finally:
                for callback in self.__callbacks[callback_on_complete_event]:
                    callback(result.to_dict())
示例#20
0
def report_test_results_for_run(client, run_name, suite_id, case_name,
                                case_status):
    the_case = client.get_case_by_name(suite_id, case_name)
    the_run = client.add_run(
        client.test_run_struct(
            name=run_name,
            suite_id=int(suite_id),
            milestone_id=client.get_milestone_by_name("8.0")['id'],
            description=run_name,
            config_ids=None,
            include_all=True,
            assignedto=None,
            case_ids=[the_case['id']]))
    client.add_results_for_cases(the_run['id'], suite_id,
                                 [TestResult(case_name, None, case_status, 0)])
示例#21
0
    def run(self):
        """Delegator method encapsulating the flow for executing a TestCase instance.

        This method tracks its progress in a TestResult with test_method 'run'.
        This TestResult is used as a signal when running in client/server mode:
        when the client is done running a TestCase and its fixtures, it sends
        this TestResult to the server during the EVENT_ON_COMPLETE_TEST_CASE
        phase.

        This could be handled better. See
        https://github.com/Yelp/Testify/issues/121.
        """

        # The TestResult constructor wants an actual method, which it inspects
        # to determine the method name (and class name, so it must be a method
        # and not a function!). self.run is as good a method as any.
        test_case_result = TestResult(self.run)
        test_case_result.start()
        self.fire_event(self.EVENT_ON_RUN_TEST_CASE, test_case_result)

        self._stage = self.STAGE_CLASS_SETUP
        with self.__test_fixtures.class_context(
                setup_callbacks=[
                    functools.partial(self.fire_event,
                                      self.EVENT_ON_RUN_CLASS_SETUP_METHOD),
                    functools.partial(
                        self.fire_event,
                        self.EVENT_ON_COMPLETE_CLASS_SETUP_METHOD),
                ],
                teardown_callbacks=[
                    functools.partial(self.fire_event,
                                      self.EVENT_ON_RUN_CLASS_TEARDOWN_METHOD),
                    functools.partial(
                        self.fire_event,
                        self.EVENT_ON_COMPLETE_CLASS_TEARDOWN_METHOD),
                ],
        ) as class_fixture_failures:
            # if we have class fixture failures, we're not going to bother
            # running tests, but we need to generate bogus results for them all
            # and mark them as failed.
            self.__run_test_methods(class_fixture_failures)
            self._stage = self.STAGE_CLASS_TEARDOWN

        # class fixture failures count towards our total
        self.failure_count += len(class_fixture_failures)

        # you might think that we would want to do this... but this is a
        # bogus test result used for reporting to the server. we always
        # have it report success, i guess.
        # for exc_info in fixture_failures:
        #     test_case_result.end_in_failure(exc_info)

        if not test_case_result.complete:
            test_case_result.end_in_success()

        self.fire_event(self.EVENT_ON_COMPLETE_TEST_CASE, test_case_result)
示例#22
0
    def __run_class_fixtures(self, stage, fixtures, callback_on_run_event, callback_on_complete_event):
        """Set the current _stage, run a set of fixtures, calling callbacks before and after each."""
        self._stage = stage

        for fixture_method in fixtures:
            result = TestResult(fixture_method)

            try:
                for callback in self.__callbacks[callback_on_run_event]:
                    callback(result.to_dict())

                result.start()

                if self.__execute_block_recording_exceptions(fixture_method, result, is_class_level=True):
                    result.end_in_success()
            except (KeyboardInterrupt, SystemExit):
                result.end_in_interruption(sys.exc_info())
                raise
            finally:
                for callback in self.__callbacks[callback_on_complete_event]:
                    callback(result.to_dict())
示例#23
0
 def run(self, result):
     result = TestResult()
     result.testStarted()
     self.setUp()
     try:
         # the dynamic invocation of methods is called Pluggable Selector,
         exec("self." + self.name + "()")
     except:
         result.testFailed()
     self.tearDown()
示例#24
0
    def test(self, training_result):
        test_results = {output: TestResult() for output in OUTPUT_VALUES}

        with open(self.__test_file, 'r') as f:
            for lineno, line in enumerate(f):
                if lineno >= 2:
                    input_vector, y_hat = line.rstrip().split(": ")
                    x = input_vector.split(" ")
                    maximizer = Maximizer()
                    for y in OUTPUT_VALUES:
                        joint = training_result.calculate_joint(x, y)
                        maximizer.update(y, joint)
                    test_results[y_hat].tested += 1
                    if maximizer.y == y_hat: test_results[y_hat].correct += 1

        return test_results
示例#25
0
    def test(self, betas):
        test_results = {output: TestResult() for output in OUTPUT_VALUES}

        with open(self.__test_file, 'r') as f:
            for lineno, line in enumerate(f):
                if lineno >= 2:
                    input_vector, y = line.rstrip().split(": ")

                    x = [int(x_i) for x_i in input_vector.split(" ")]
                    x.insert(0, 1)

                    z = sum([betas[j] * x_j for j, x_j in enumerate(x)])
                    p_y = 1 / (1 + exp(-z))
                    y_hat = "1" if p_y > .5 else "0"
                    test_results[y].tested += 1
                    if y_hat == y: test_results[y].correct += 1

        return test_results
示例#26
0
    def __run_class_fixture(self, fixture_method, function_to_call, stage, callback_on_run_event, callback_on_complete_event, fire_events=True):
        self._stage = stage

        result = TestResult(fixture_method)
        try:
            result.start()
            if fire_events:
                self.fire_event(callback_on_run_event, result)
            if self.__execute_block_recording_exceptions(function_to_call, result, is_class_level=True):
                result.end_in_success()
            else:
                self.failure_count += 1
        except (KeyboardInterrupt, SystemExit):
            result.end_in_interruption(sys.exc_info())
            raise
        finally:
            if fire_events:
                self.fire_event(callback_on_complete_event, result)
示例#27
0
    def _generate_and_test_full_results_json(self, passed_tests_list,
                                             failed_tests_list):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        get_test_set = lambda ts, label: set(
            [t for t in ts if t.startswith(label)])
        DISABLED_tests = get_test_set(tests_set, 'DISABLED_')
        FLAKY_tests = get_test_set(tests_set, 'FLAKY_')
        MAYBE_tests = get_test_set(tests_set, 'MAYBE_')
        FAILS_tests = get_test_set(tests_set, 'FAILS_')
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests
                                  | MAYBE_tests) - set(failed_tests_list)

        failed_tests = set(failed_tests_list) - DISABLED_tests

        test_timings = {}
        test_results_map = {}
        for i, test in enumerate(tests_set):
            test_name = canonical_name(test)
            test_timings[test_name] = i
            test_results_map[test_name] = [
                TestResult(test,
                           status='FAILURE' if
                           (test in failed_tests) else 'SUCCESS',
                           elapsed_time=test_timings[test_name])
            ]

        # Do not write to an actual file.
        mock_writer = lambda path, data: True

        generator = JSONResultsGenerator(self.builder_name,
                                         self.build_number,
                                         '',
                                         test_results_map,
                                         svn_revisions=[('blink', '12345')],
                                         file_writer=mock_writer)

        results_json = generator.get_full_results_json()
        self._verify_full_json_results(results_json, tests_set, PASS_tests,
                                       failed_tests, test_timings)
        self.assertEqual(results_json.get('blink_revision'), '12345')
示例#28
0
def execute_test():

    test_url = "http://localhost:8080/async/python"
    headers = {'Accept': 'application/json'}
    test_response = requests.get(test_url, headers=headers)
    if test_response.status_code == 200:
        print("Test succeeded")
        return
    print("Test Failed with status: {test_response.status_code}")

    stacktrace = test_response.json()['stackTrace']
    stacktrace_files = []
    print(stacktrace)
    for entry in stacktrace:
        if entry.get('fileName'):
            classname = entry["className"]
            filename_and_path = classname.replace(".", "/")
            stacktrace_files.append(filename_and_path)
            print(filename_and_path)
    test_result = TestResult(test_response.status_code, stacktrace_files)
    return test_result
    def run(self):
        """ 
        Runs all tests against each data point in test data set
         """
        algorithms = self.algorithms
        data_set = self.data
        data_set_test_results = []

        for data_point in data_set:
            test_name = data_point.get('name')
            del data_point['name']
            test_results = []

            for algorithm in algorithms:
                algorithm_result = algorithm.use(**data_point)
                test_results.append(algorithm_result)

            test_result = TestResult(test_name, algorithms, test_results,
                                     data_point)
            data_set_test_results.append(test_result)

        self.set_results(data_set_test_results)
    def run(self, duration=3, max_capacity=500):
        """ 
        Runs all the tests passed in at creation
         """
        M = self.max_value
        n = self.data_size
        algorithms = self.algorithms

        start_time = time.time()
        end_time = start_time + duration
        iterations = 0
        print(f'Running stress test iterations for {duration} seconds ...')

        while True:
            # ensure timed
            now = time.time()
            iterations += 1
            if now >= end_time:
                print(f'\n------ ALL ITERATIONS PASSED ------')
                print(
                    f'------ ELAPSED TIME - {round(now - start_time, 2)} SECONDS ------'
                )
                print(f'------ ITERATIONS - {iterations} ------')
                break

            # build test data
            weights, values, capacity = self.build_data(n, M, max_capacity)
            test_data = {
                'capacity': capacity,
                'weights': weights,
                'values': values
            }
            test_name = f'Iteration_{iterations}'

            # get test results
            test_results = []
            for algorithm in algorithms:
                algorithm_result = round(algorithm.use(**test_data), 2)
                test_results.append(algorithm_result)

            result = TestResult(test_name, algorithms, test_results, test_data)
            result.check_results(show_print=False)

            if result.failure == True:
                result.print_fail()
                break
示例#31
0
class TestCaseTest(TestCase):
        
    def setUp(self):
        self.result= TestResult()
        
    def testTemplateMethod(self):
        test= WasRun("testMethod")
        test.run(self.result)
        assert("setUp testMethod tearDown " == test.log)
        print('testTemplateMethod done')
    
    def testResult(self):
        test= WasRun("testMethod")
        test.run(self.result)
        assert("1 run, 0 failed" == self.result.summary())
        print('testResult done')
        
    def testFailedResult(self):
        test= WasRun("testBrokenMethod")
        test.run(self.result)
        assert("1 run, 1 failed" == self.result.summary())
        print('testFailedResult done')
    
    def testFailedResultFormatting(self):
        self.result.testStarted()
        self.result.testFailed()
        assert("1 run, 1 failed" == self.result.summary())
        print('testFailedResultFormatting done')
        
    def testSuite(self):
        suite= TestSuite()
        suite.add(WasRun("testMethod"))
        suite.add(WasRun("testBrokenMethod"))
        suite.run(self.result)
        assert("2 run, 1 failed" == self.result.summary())
        print('testSuite done')
示例#32
0
    def __run_class_teardown_fixtures(self):
        """End the process of running tests.  Run the class's class_teardown methods"""
        self._stage = self.STAGE_CLASS_TEARDOWN

        self.__run_deprecated_fixture_method('classTearDown')

        for fixture_method in self.class_teardown_fixtures:
            result = TestResult(fixture_method)
            try:
                for callback in self.__on_run_test_method_callbacks:
                    callback(self, fixture_method)

                result.start()

                if self.__execute_block_recording_exceptions(fixture_method, result, is_class_level=True):
                    result.end_in_success()
            except (KeyboardInterrupt, SystemExit):
                result.end_in_incomplete(sys.exc_info())
                for callback in self.__on_complete_test_method_callbacks:
                    callback(self, result)
                raise
            else:
                for callback in self.__on_complete_test_method_callbacks:
                    callback(self, result)
示例#33
0
    def run(self):
        """Delegator method encapsulating the flow for executing a TestCase instance.

        This method tracks its progress in a TestResult with test_method 'run'.
        This TestResult is used as a signal when running in client/server mode:
        when the client is done running a TestCase and its fixtures, it sends
        this TestResult to the server during the EVENT_ON_COMPLETE_TEST_CASE
        phase.

        This could be handled better. See
        https://github.com/Yelp/Testify/issues/121.
        """

        # The TestResult constructor wants an actual method, which it inspects
        # to determine the method name (and class name, so it must be a method
        # and not a function!). self.run is as good a method as any.
        test_case_result = TestResult(self.run)
        test_case_result.start()
        self.fire_event(self.EVENT_ON_RUN_TEST_CASE, test_case_result)

        fixtures = []
        all_class_fixtures = self.class_setup_fixtures + self.class_setup_teardown_fixtures + self.class_teardown_fixtures
        for fixture in sorted(all_class_fixtures,
                              key=make_sortable_fixture_key):
            # We convert all class-level fixtures to
            # class_setup_teardown fixtures a) to handle all
            # class-level fixtures the same and b) to make the
            # behavior more predictable when a TestCase has different
            # fixtures interacting.
            if fixture._fixture_type == 'class_teardown':
                fixture = self.__convert_class_teardown_to_class_setup_teardown(
                    fixture)
            elif fixture._fixture_type == 'class_setup':
                fixture = self.__convert_class_setup_to_class_setup_teardown(
                    fixture)
            fixtures.append(fixture)

        self.__enter_class_context_managers(fixtures, self.__run_test_methods)

        test_case_result.end_in_success()
        self.fire_event(self.EVENT_ON_COMPLETE_TEST_CASE, test_case_result)
示例#34
0
    def __run_deprecated_fixture_method(self, fixture_name):
        """This runs an old-style (eg/ 'def setUp') fixture method."""
        if hasattr(self, fixture_name):
            deprecated_method = getattr(self, fixture_name)

            if fixture_name.startswith('class'):
                result = TestResult(deprecated_method)
                try:
                    for callback in self.__on_run_test_method_callbacks:
                        callback(self, deprecated_method)

                    result.start()
                    if self.__execute_block_recording_exceptions(deprecated_method, result, is_class_level=True):
                        result.end_in_success()
                except (KeyboardInterrupt, SystemExit):
                    result.end_in_incomplete(sys.exc_info())
                    for callback in self.__on_complete_test_method_callbacks:
                        callback(self, result)
                    raise
                else:
                    for callback in self.__on_complete_test_method_callbacks:
                        callback(self, result)
            else:
                deprecated_method()
示例#35
0
    def __run_test_methods(self):
        """Run this class's setup fixtures / test methods / teardown fixtures.

        These are run in the obvious order - setup and teardown go before and after,
        respectively, every test method.  If there was a failure in the class_setup
        phase, no method-level fixtures or test methods will be run, and we'll eventually
        skip all the way to the class_teardown phase.   If a given test method is marked
        as disabled, neither it nor its fixtures will be run.  If there is an exception
        during the setup phase, the test method will not be run and execution
        will continue with the teardown phase.
        """
        for test_method in self.runnable_test_methods():

            result = TestResult(test_method)

            try:
                self._method_level = True  # Flag that we're currently running method-level stuff (rather than class-level)

                # run "on-run" callbacks. e.g. print out the test method name
                self.fire_event(self.EVENT_ON_RUN_TEST_METHOD, result)

                result.start()

                if self.__class_level_failure:
                    result.end_in_failure(self.__class_level_failure)
                elif self.__class_level_error:
                    result.end_in_error(self.__class_level_error)
                else:
                    # first, run setup fixtures
                    self._stage = self.STAGE_SETUP

                    def _setup_block():
                        for fixture_method in self.setup_fixtures:
                            fixture_method()

                    self.__execute_block_recording_exceptions(
                        _setup_block, result)

                    def _run_test_block():
                        # then run the test method itself, assuming setup was successful
                        self._stage = self.STAGE_TEST_METHOD
                        if not result.complete:
                            self.__execute_block_recording_exceptions(
                                test_method, result)

                    def _setup_teardown_block():
                        self.__enter_context_managers(
                            self.setup_teardown_fixtures, _run_test_block)

                    # then run any setup_teardown fixtures, assuming setup was successful.
                    if not result.complete:
                        self.__execute_block_recording_exceptions(
                            _setup_teardown_block, result)

                    # finally, run the teardown phase
                    self._stage = self.STAGE_TEARDOWN

                    def _teardown_block():
                        for fixture_method in self.teardown_fixtures:
                            fixture_method()

                    self.__execute_block_recording_exceptions(
                        _teardown_block, result)

                # if nothing's gone wrong, it's not about to start
                if not result.complete:
                    result.end_in_success()
            except (KeyboardInterrupt, SystemExit):
                result.end_in_interruption(sys.exc_info())
                raise
            finally:
                self.fire_event(self.EVENT_ON_COMPLETE_TEST_METHOD, result)
                self._method_level = False

                if not result.success:
                    self.failure_count += 1
                    if self.failure_limit and self.failure_count >= self.failure_limit:
                        return
示例#36
0
    def __run_class_fixtures(self, stage, fixtures, callback_on_run_event, callback_on_complete_event):
        """Set the current _stage, run a set of fixtures, calling callbacks before and after each."""
        self._stage = stage

        for fixture_method in fixtures:
            result = TestResult(fixture_method)

            try:
                self.fire_event(callback_on_run_event, result)
                result.start()
                if self.__execute_block_recording_exceptions(fixture_method, result, is_class_level=True):
                    result.end_in_success()
                else:
                    if self.__class_level_failure:
                        result.end_in_failure(self.__class_level_failure)
                        ### Bump failure count?
                        ### Something about failure_limit?
                    elif self.__class_level_error:
                        result.end_in_error(self.__class_level_error)
                        ### Bump failure count?
                        ### Something about failure_limit?
                    else:
                        raise Exception("Couldn't find a class-level failure or error even"
                            " though we failed while executing a class-level fixture."
                            " This should not be possible. Aborting.")
            except (KeyboardInterrupt, SystemExit):
                result.end_in_interruption(sys.exc_info())
                raise
            finally:
                self.fire_event(callback_on_complete_event, result)
示例#37
0
 def setUp(self):
     self.result = TestResult()
     self.test = WasRun("testMethod")
     self.suite = TestSuite()
示例#38
0
    def __run_test_methods(self, class_fixture_failures):
        """Run this class's setup fixtures / test methods / teardown fixtures.

        These are run in the obvious order - setup and teardown go before and after,
        respectively, every test method.  If there was a failure in the class_setup
        phase, no method-level fixtures or test methods will be run, and we'll eventually
        skip all the way to the class_teardown phase.   If a given test method is marked
        as disabled, neither it nor its fixtures will be run.  If there is an exception
        during the setup phase, the test method will not be run and execution
        will continue with the teardown phase.
        """
        for test_method in self.runnable_test_methods():
            result = TestResult(test_method)

            # Sometimes, test cases want to take further action based on
            # results, e.g. further clean-up or reporting if a test method
            # fails. (Yelp's Selenium test cases do this.) If you need to
            # programatically inspect test results, you should use
            # self.results().

            # NOTE: THIS IS INCORRECT -- im_self is shared among all test
            # methods on the TestCase instance. This is preserved for backwards
            # compatibility and should be removed eventually.

            try:
                # run "on-run" callbacks. e.g. print out the test method name
                self.fire_event(self.EVENT_ON_RUN_TEST_METHOD, result)

                result.start()
                self.__all_test_results.append(result)

                # if class setup failed, this test has already failed.
                self._stage = self.STAGE_CLASS_SETUP
                for exc_info in class_fixture_failures:
                    result.end_in_failure(exc_info)

                if result.complete:
                    continue

                # first, run setup fixtures
                self._stage = self.STAGE_SETUP
                with self.__test_fixtures.instance_context() as fixture_failures:
                    # we haven't had any problems in class/instance setup, onward!
                    if not fixture_failures:
                        self._stage = self.STAGE_TEST_METHOD
                        result.record(test_method)
                    self._stage = self.STAGE_TEARDOWN

                # maybe something broke during teardown -- record it
                for exc_info in fixture_failures:
                    result.end_in_failure(exc_info)

                # if nothing's gone wrong, it's not about to start
                if not result.complete:
                    result.end_in_success()

            except (KeyboardInterrupt, SystemExit):
                result.end_in_interruption(sys.exc_info())
                raise

            finally:
                self.fire_event(self.EVENT_ON_COMPLETE_TEST_METHOD, result)

                if not result.success:
                    self.failure_count += 1
                    if self.failure_limit and self.failure_count >= self.failure_limit:
                        break
  def testFailedResult(self):
    test   = WasRun("testBrokenMethod")
    test.run(self.result)
    assert("1 run, 1 failed" == self.result.summary() )

  def testFailedResultFormatting(self):
    result.testStarted()
    result.testFailed()
    assert("1 run, 1 failed" == self.result.summary() )

  def testSuite(self):
    suite = TestSuite()
    suite.add(WasRun("testMethod"))
    suite.add(WasRun("testBrokenMethod"))

    suite.run(self.result)
    assert("2 run, 1 failed" == self.result.summary() )

suite = TestSuite()

suite.add(TestCaseTest("testTemplateMethod"))
suite.add(TestCaseTest("testResult"))
suite.add(TestCaseTest("testFailedResult"))
suite.add(TestCaseTest("testFailedResultFormatting"))
suite.add(TestCaseTest("testSuite"))

result = TestResult()
suite.run(result)
print result.summary()

 def setUp(self):
   self.result = TestResult()
示例#41
0
    def __run_test_methods(self):
        """Run this class's setup fixtures / test methods / teardown fixtures.

        These are run in the obvious order - setup and teardown go before and after,
        respectively, every test method.  If there was a failure in the class_setup
        phase, no method-level fixtures or test methods will be run, and we'll eventually
        skip all the way to the class_teardown phase.   If a given test method is marked
        as disabled, neither it nor its fixtures will be run.  If there is an exception
        during during the setup phase, the test method will not be run and execution
        will continue with the teardown phase.
        """
        for test_method in self.runnable_test_methods():

            result = TestResult(test_method)
            test_method.im_self.test_result = result

            try:
                # run "on-run" callbacks. eg/ print out the test method name
                for callback in self.__on_run_test_method_callbacks:
                    callback(self, test_method)
                result.start()

                if self.__class_level_failure:
                    result.end_in_failure(self.__class_level_failure)
                elif self.__class_level_error:
                    result.end_in_error(self.__class_level_error)
                else:
                    # first, run setup fixtures
                    self._stage = self.STAGE_SETUP
                    def _setup_block():
                        for fixture_method in self.setup_fixtures:
                            fixture_method()
                        self.__run_deprecated_fixture_method('setUp')
                    self.__execute_block_recording_exceptions(_setup_block, result)

                    # then run the test method itself, assuming setup was successful
                    self._stage = self.STAGE_TEST_METHOD
                    if not result.complete:
                        self.__execute_block_recording_exceptions(test_method, result)

                    # finally, run the teardown phase
                    self._stage = self.STAGE_TEARDOWN
                    def _teardown_block():
                        self.__run_deprecated_fixture_method('tearDown')
                        for fixture_method in self.teardown_fixtures:
                            fixture_method()
                    self.__execute_block_recording_exceptions(_teardown_block, result)

                # if nothing's gone wrong, it's not about to start
                if not result.complete:
                    result.end_in_success()
            except (KeyboardInterrupt, SystemExit):
                result.end_in_incomplete(sys.exc_info())
                for callback in self.__on_complete_test_method_callbacks:
                    callback(self, result)
                raise
            else:
                for callback in self.__on_complete_test_method_callbacks:
                    callback(self, result)
示例#42
0
        suite = TestSuite()
        suite.add(BrokenSetup("testMethod"))
        suite.run(self.result)
        assert ("1 run, 1 failed" == self.result.summary())

    def testSuite(self):
        suite = TestSuite()
        suite.add(WasRun("testMethod"))
        suite.add(WasRun("testBrokenMethod"))
        suite.run(self.result)
        assert ("2 run, 1 failed" == self.result.summary())

    def tearDownIfFailed(self):
        test = WasRun("testBrokenMethod")
        test.run(self.result)


suite = TestSuite()
suite.add(TestCaseTest("testTemplateMethod"))
suite.add(TestCaseTest("testResult"))
suite.add(TestCaseTest("testFailedResultFormatting"))
suite.add(TestCaseTest("testFailedResult"))
suite.add(TestCaseTest("testSuite"))
suite.add(TestCaseTest("setupFailed"))
suite.add(TestCaseTest("testSuiteContainsFailingSetup"))
suite.add(TestCaseTest("tearDownIfFailed"))

result = TestResult()
suite.run(result)
print(result.summary())
示例#43
0
    def run(self, test):
        "Run the given test case or test suite."

        self.runner_start_time = datetime.datetime.now()

        test_class_dict = {}

        def find_test_methods(test_decl):
            is_iterable = hasattr(test_decl, '__iter__')

            if (is_iterable):
                for tests in test_decl:
                    find_test_methods(tests)
            else:
                cls_nm = type(test_decl).__name__

                if not test_class_dict.get(cls_nm):
                    test_class_dict[cls_nm] = list()

                test_class_dict[cls_nm].append(test_decl)

        # convert the given TestCase/TestSuite into a dictionary of test-classes
        find_test_methods(test)

        all_results = list()
        success_results = list()
        failure_results = list()
        error_results = list()
        skipped_results = list()

        utils.write_separator()
        utils.write_log("INFO", "T E S T S")

        for k, class_tests in test_class_dict.iteritems():
            class_suite = TestSuite(class_tests)
            reports_dir = os.path.join(os.path.dirname(__main__.__file__),
                                       "test-reports")

            if not os.path.exists(reports_dir):
                os.makedirs(reports_dir)

            with file(os.path.join(reports_dir, k + '.txt'), 'wb') as fp:
                # execute all tests in this test class
                class_result = TestResult([sys.stdout, fp], class_tests)
                class_suite(class_result)

                # get the test-results from this class and add them to the summary lists
                all_results.extend(class_result.all_results)
                success_results.extend(class_result.success_results)
                failure_results.extend(class_result.failure_results)
                error_results.extend(class_result.error_results)
                skipped_results.extend(class_result.skipped_results)

        tests_success = not any(error_results) and not any(failure_results)
        tests_result = "SUCCESS" if tests_success else "FAILURE"
        self.runner_stop_time = datetime.datetime.now()

        # print final summary log after all tests are done running
        print
        utils.write_separator()
        utils.write_log("INFO", "TESTS RUN %(tests_result)s" % locals())
        utils.write_separator()
        utils.write_log("INFO")
        utils.write_log("INFO", "Results:")

        if not tests_success:
            utils.write_log("INFO")

        def print_summary_problems(err_list, kind):
            if (any(err_list)):
                utils.write_log("ERROR", kind + "s: ")

                for r in err_list:
                    test_class, test_method = utils.get_test_names(r.test)
                    err_message = r.errObj[1].message
                    err_frame = r.errObj[2].tb_next
                    err_lineno = err_frame.tb_lineno if err_frame else ""
                    utils.write_log(
                        "ERROR",
                        "  %(test_class)s.%(test_method)s:%(err_lineno)s %(err_message)s"
                        % locals())

        print_summary_problems(failure_results, "Failure")
        print_summary_problems(error_results, "Error")

        num_success = len(success_results)
        num_failures = len(failure_results)
        num_errors = len(error_results)
        num_skips = len(skipped_results)

        utils.write_log("INFO")
        utils.write_log(
            "ERROR",
            "Tests run: %(num_success)s, Failures: %(num_failures)s, Errors: %(num_errors)s, Skipped: %(num_skips)s"
            % locals())
        utils.write_log("INFO")

        total_elapsed = self.runner_stop_time - self.runner_start_time

        utils.write_separator()
        utils.write_log("INFO", "Total time: %(total_elapsed)s s" % locals())
        utils.write_log("INFO", "Finished at: %s" % self.runner_stop_time)
        utils.write_separator()
示例#44
0
    def __run_test_methods(self, class_fixture_failures):
        """Run this class's setup fixtures / test methods / teardown fixtures.

        These are run in the obvious order - setup and teardown go before and after,
        respectively, every test method.  If there was a failure in the class_setup
        phase, no method-level fixtures or test methods will be run, and we'll eventually
        skip all the way to the class_teardown phase.   If a given test method is marked
        as disabled, neither it nor its fixtures will be run.  If there is an exception
        during the setup phase, the test method will not be run and execution
        will continue with the teardown phase.
        """
        for test_method in self.runnable_test_methods():
            result = TestResult(test_method)

            # Sometimes, test cases want to take further action based on
            # results, e.g. further clean-up or reporting if a test method
            # fails. (Yelp's Selenium test cases do this.) If you need to
            # programatically inspect test results, you should use
            # self.results().

            # NOTE: THIS IS INCORRECT -- im_self is shared among all test
            # methods on the TestCase instance. This is preserved for backwards
            # compatibility and should be removed eventually.

            try:
                # run "on-run" callbacks. e.g. print out the test method name
                self.fire_event(self.EVENT_ON_RUN_TEST_METHOD, result)

                result.start()
                self.__all_test_results.append(result)

                # first, run setup fixtures
                self._stage = self.STAGE_SETUP
                with self.__test_fixtures.instance_context() as fixture_failures:
                    # we haven't had any problems in class/instance setup, onward!
                    if not (fixture_failures + class_fixture_failures):
                        self._stage = self.STAGE_TEST_METHOD
                        result.record(test_method)
                    self._stage = self.STAGE_TEARDOWN

                # maybe something broke during teardown -- record it
                for exc_info in fixture_failures + class_fixture_failures:
                    result.end_in_failure(exc_info)

                # if nothing's gone wrong, it's not about to start
                if not result.complete:
                    result.end_in_success()

            except (KeyboardInterrupt, SystemExit):
                result.end_in_interruption(sys.exc_info())
                raise

            finally:
                self.fire_event(self.EVENT_ON_COMPLETE_TEST_METHOD, result)

                if not result.success:
                    self.failure_count += 1
                    if self.failure_limit and self.failure_count >= self.failure_limit:
                        break
 def __init__(self, name):
     self.name = name
     self.wasRun = None
     self.test = None
     self.log = None
     self.result = TestResult()
示例#46
0
    def __run_test_methods(self):
        """Run this class's setup fixtures / test methods / teardown fixtures.

        These are run in the obvious order - setup and teardown go before and after,
        respectively, every test method.  If there was a failure in the class_setup
        phase, no method-level fixtures or test methods will be run, and we'll eventually
        skip all the way to the class_teardown phase.   If a given test method is marked
        as disabled, neither it nor its fixtures will be run.  If there is an exception
        during during the setup phase, the test method will not be run and execution
        will continue with the teardown phase.
        """
        for test_method in self.runnable_test_methods():

            result = TestResult(test_method)
            test_method.im_self.test_result = result

            try:
                self._method_level = True # Flag that we're currently running method-level stuff (rather than class-level)

                # run "on-run" callbacks. eg/ print out the test method name
                for callback in self.__callbacks[self.EVENT_ON_RUN_TEST_METHOD]:
                    callback(result.to_dict())
                result.start()

                if self.__class_level_failure:
                    result.end_in_failure(self.__class_level_failure)
                elif self.__class_level_error:
                    result.end_in_error(self.__class_level_error)
                else:
                    # first, run setup fixtures
                    self._stage = self.STAGE_SETUP
                    def _setup_block():
                        for fixture_method in self.setup_fixtures + [ self.setUp ]:
                            fixture_method()
                    self.__execute_block_recording_exceptions(_setup_block, result)

                    def _run_test_block():
                        # then run the test method itself, assuming setup was successful
                        self._stage = self.STAGE_TEST_METHOD
                        if not result.complete:
                            self.__execute_block_recording_exceptions(test_method, result)

                    def _setup_teardown_block():
                        self.__enter_context_managers(self.setup_teardown_fixtures, _run_test_block)

                    # then run any setup_teardown fixtures, assuming setup was successful.
                    if not result.complete:
                        self.__execute_block_recording_exceptions(_setup_teardown_block, result)

                    # finally, run the teardown phase
                    self._stage = self.STAGE_TEARDOWN
                    def _teardown_block():
                        for fixture_method in [ self.tearDown ] + self.teardown_fixtures:
                            fixture_method()
                    self.__execute_block_recording_exceptions(_teardown_block, result)

                # if nothing's gone wrong, it's not about to start
                if not result.complete:
                    result.end_in_success()
            except (KeyboardInterrupt, SystemExit):
                result.end_in_interruption(sys.exc_info())
                raise
            finally:
                for callback in self.__callbacks[self.EVENT_ON_COMPLETE_TEST_METHOD]:
                    callback(result.to_dict())

                self._method_level = False

                if not result.success:
                    self.failure_count += 1
                    if self.failure_limit and self.failure_count >= self.failure_limit:
                        return
示例#47
0
 def setUp(self):
     self.result = TestResult()