Example #1
0
 def test_should_use_provided_formatter(self, parse_file):
     """ Scenario: run verbose on systems with color support """
     run(sentinel.filename, sentinel.suite, verbose=True, formatter=sentinel.formatter)
     parse_file.assert_called_once_with(sentinel.filename)
     parse_file.return_value.evaluate.assert_called_once_with(
         sentinel.suite, formatter=sentinel.formatter,
         show_all_missing=True)
Example #2
0
 def test_should_parse_file_and_evaluate_tests(self, parse_file, PlainTextFormatter, has_color_support):
     """ Scenario: run and evaluate """
     has_color_support.return_value = False
     run(sentinel.filename, sentinel.suite)
     parse_file.assert_called_once_with(sentinel.filename)
     parse_file.return_value.evaluate.assert_called_once_with(
         sentinel.suite, show_all_missing=True)
    def bdd_morelia_write_code(self):
        """ Write BDD feature code """
        try:
            run(self.FEATURE_FILE, self, verbose=True)
        except AssertionError as e:
            steps_code = str(e)

            if steps_code.startswith('Cannot match steps:'):
                steps_lines = '\n'.join(steps_code.split('\n')[1:])

                (scenario_id,
                 bdd_filename) = get_bdd_module_name(self.TESTING_PREFIX,
                                                     self.FEATURE_FILE)
                with open(os.path.join(self.PACKAGE_DIRECTORY, bdd_filename),
                          'w') as f:
                    print('# Feature File: {}'.format(self.FEATURE_FILE),
                          file=f)
                    print(TEST_CASE_FILE_FMT.format_map({
                        'feature_file':
                        self.FEATURE_FILE,
                        'scenario_id':
                        scenario_id,
                        'steps_lines':
                        steps_lines.rstrip(),
                        'testing_prefix':
                        self.TESTING_PREFIX
                    }),
                          file=f)

                # Show the new module's filename.
                print('New BDD Test Case module: "{}"\n'.format(bdd_filename),
                      file=sys.stderr)
            else:
                raise
Example #4
0
 def test_should_run_verbose_with_plain_text_formatter(self, parse_file, PlainTextFormatter, has_color_support):
     """ Scenario: run verbose on windows """
     has_color_support.return_value = False
     run(sentinel.filename, sentinel.suite, verbose=True)
     parse_file.assert_called_once_with(sentinel.filename)
     parse_file.return_value.evaluate.assert_called_once_with(
         sentinel.suite, formatter=PlainTextFormatter.return_value,
         show_all_missing=True)
Example #5
0
 def test_should_run_verbose_with_color_text_formatter(self, parse_file, ColorTextFormatter, has_color_support):
     """ Scenario: run verbose on systems with color support """
     has_color_support.return_value = True
     run(sentinel.filename, sentinel.suite, verbose=True)
     parse_file.assert_called_once_with(sentinel.filename)
     parse_file.return_value.evaluate.assert_called_once_with(
         sentinel.suite, formatter=ColorTextFormatter.return_value,
         show_all_missing=True)
Example #6
0
 def test_addition(self):
     """Addition feature."""
     run(
         __file__,
         self,
         as_str=__doc__,
         scenario="Subsequent additions",
         verbose=True,
     )
Example #7
0
 def test_should_parse_file_and_evaluate_tests(self, parse_file,
                                               PlainTextFormatter,
                                               has_color_support):
     """ Scenario: run and evaluate """
     has_color_support.return_value = False
     run(sentinel.filename, sentinel.suite)
     parse_file.assert_called_once_with(sentinel.filename, scenario='.*')
     parse_file.return_value.evaluate.assert_called_once_with(
         sentinel.suite, show_all_missing=True)
Example #8
0
 def test_should_run_verbose_with_color_text_formatter(
         self, parse_file, ColorTextFormatter, has_color_support):
     """ Scenario: run verbose on systems with color support """
     has_color_support.return_value = True
     run(sentinel.filename, sentinel.suite, verbose=True)
     parse_file.assert_called_once_with(sentinel.filename, scenario='.*')
     parse_file.return_value.evaluate.assert_called_once_with(
         sentinel.suite,
         formatter=ColorTextFormatter.return_value,
         show_all_missing=True)
Example #9
0
 def test_should_run_verbose_with_plain_text_formatter(
         self, parse_file, PlainTextFormatter, has_color_support):
     """ Scenario: run verbose on windows """
     has_color_support.return_value = False
     run(sentinel.filename, sentinel.suite, verbose=True)
     parse_file.assert_called_once_with(sentinel.filename, scenario='.*')
     parse_file.return_value.evaluate.assert_called_once_with(
         sentinel.suite,
         formatter=PlainTextFormatter.return_value,
         show_all_missing=True)
Example #10
0
 def test_should_use_provided_formatter(self, parse_file):
     """ Scenario: run verbose on systems with color support """
     run(sentinel.filename,
         sentinel.suite,
         verbose=True,
         formatter=sentinel.formatter)
     parse_file.assert_called_once_with(sentinel.filename, scenario='.*')
     parse_file.return_value.evaluate.assert_called_once_with(
         sentinel.suite,
         formatter=sentinel.formatter,
         show_all_missing=True)
Example #11
0
 def test_should_parse_as_string_and_evaluate_tests(self, parse_as_str,
                                                    PlainTextFormatter,
                                                    has_color_support):
     """ Scenario: run and evaluate feature passed as string """
     has_color_support.return_value = False
     run(sentinel.filename, sentinel.suite, as_str=sentinel.some_string)
     parse_as_str.assert_called_once_with(sentinel.filename,
                                          sentinel.some_string,
                                          scenario='.*')
     parse_as_str.return_value.evaluate.assert_called_once_with(
         sentinel.suite, show_all_missing=True)
Example #12
0
    def test_merging_files(self):
        with patch.object(InputSourceURL, 'read') as read:
            read.return_value = 'pdf content'
            with patch('cloudconvert.base.requests') as requests_base:
                process_result = Mock(status_code=200)
                process_result.json.return_value = {'url': '//localhost'}

                convertion_result = Mock(status_code=200)
                convertion_result.json.return_value = {'step': 'finished', 'ur': '//localhost', 'output': {'ext': 'pdf', 'url': '//localhost'}}

                requests_base.post.side_effect = [process_result, convertion_result]
                requests_base.get.return_value.content = 'abc'

                filename = os.path.abspath(os.path.join(os.path.dirname(__file__), 'merging_files.feature'))
                run(filename, self)
Example #13
0
 def test_should_report_on_all_failing_scenarios(self):
     self._add_failure_pattern = re.compile(
         'Scenario: Add two numbers\n\s*Then the result should be "120" on the screen\n\s*.*AssertionError:\s*70 != 120',
         re.DOTALL)
     self._substract_failure_pattern = re.compile(
         'Scenario: Subtract two numbers\n\s*Then the result should be "80" on the screen\n\s*.*AssertionError:\s*70 != 80',
         re.DOTALL)
     self._multiply_failure_pattern = re.compile(
         'Scenario: Multiply two numbers\n\s*Then the result should be "12" on the screen\n\s*.*AssertionError:\s*3 != 12',
         re.DOTALL)
     self._division_failure_pattern = re.compile(
         'Scenario: Divide two numbers\n\s*Then the result should be "4" on the screen\n\s*.*AssertionError:\s*2 != 4',
         re.DOTALL)
     filename = os.path.join(
         pwd, 'features/info_on_all_failing_scenarios.feature')
     run(filename, self)
Example #14
0
 def test_should_only_run_matching_scenarios(self):
     self._matching_pattern = r"Scenario Matches [12]"
     run(self.filename, self, scenario=self._matching_pattern)
     assert ["first", "fourth"] == self.executed
Example #15
0
 def test_many_test_methods(self):
     """Check setUp/tearDown when many tests in one TestCase."""
     filename = os.path.join(pwd, 'features/setupteardown.feature')
     run(filename, self)
Example #16
0
 def test_addition(self):
     """ Addition feature """
     filename = os.path.join(os.path.dirname(__file__), 'calculator.feature')
     run(filename, self, verbose=True)
Example #17
0
 def test_addition(self):
     ''' Addition feature '''
     filename = os.path.join(os.path.dirname(__file__),
                             'calculator.feature')
     run(filename, self, verbose=True, show_all_missing=True)
 def test_addition(self):
     run('example.feature', self, verbose=True)
 def test_addition(self):
     """Addition feature."""
     run(__file__, self, as_str=__doc__, scenario='Subsequent additions', verbose=True)
Example #20
0
 def test_setup_teardown(self):
     """Check for multiple setUp/tearDown calls."""
     filename = os.path.join(pwd, 'features/setupteardown.feature')
     run(filename, self)
Example #21
0
 def test_basic_arithmetic(self):
     """Run tests for the requirements in calculator.feature """
     run('arithmetic/features/arithmetic.feature', self, verbose=True)
Example #22
0
 def test_docstrings(self):
     filename = os.path.join(pwd, 'features/docstrings.feature')
     run(filename, self)
Example #23
0
 def test_labels(self):
     filename = os.path.join(pwd, 'features/labels.feature')
     run(filename, self)
Example #24
0
 def test_comments(self):
     filename = os.path.join(pwd, 'features/comments.feature')
     run(filename, self)
Example #25
0
 def test_docstrings(self):
     filename = os.path.join(pwd, "features/docstrings.feature")
     run(filename, self)
Example #26
0
    def test_fail_informatively_on_bad_scenario_regex(self):
        self._matching_pattern = "\\"

        with self.assertRaises(InvalidScenarioMatchingPattern):
            run(self.filename, self, scenario=self._matching_pattern)
Example #27
0
 def test_addition(self):
     ''' Addition feature '''
     filename = os.path.join(os.path.dirname(__file__), 'calculator.feature')
     run(filename, self, verbose=True, show_all_missing=True)
Example #28
0
 def test_fail_informatively_on_bad_scenario_regex_deprecated(self):
     with self.assertRaises(InvalidScenarioMatchingPattern):
         run(self.filename, self, scenario="\\")
Example #29
0
 def test_comments(self):
     filename = features_dir / "comments.feature"
     run(filename, self)
Example #30
0
 def test_addition(self):
     """Addition feature."""
     filename = os.path.join(os.path.dirname(__file__),
                             'calculator.feature')
     run(filename, self, verbose=True)
Example #31
0
 def test_add_task(self):
     filename = os.path.join(os.path.dirname(__file__),
                             '../../docs/features/add_task.feature')
     run(filename, self, verbose=True)
Example #32
0
 def test_should_only_run_matching_scenarios_deprecated(self):
     matching_pattern = r"Scenario Matches [12]"
     run(self.filename, self, scenario=matching_pattern)
     assert ["first", "fourth"] == self.executed