示例#1
0
    def worker(self, proc_number):
        while 1:
            try:
                joblist_index = self.joblist_index_queue.get_nowait()
            except Exception as e:
                break
            current_job = self.joblist[joblist_index]
            writebuf = io.StringIO()
            self.setfeature(current_job)
            self.config.outputs = []
            self.config.outputs.append(StreamOpener(stream=writebuf))

            stream_openers = self.config.outputs

            self.formatters = make_formatters(self.config, stream_openers)

            for formatter in self.formatters:
                formatter.uri(current_job.filename)

            start_time = time.strftime("%Y-%m-%d %H:%M:%S")
            current_job.run(self)
            end_time = time.strftime("%Y-%m-%d %H:%M:%S")

            sys.stderr.write(current_job.status[0]+"\n")

            if current_job.type == 'feature':
                for reporter in self.config.reporters:
                    reporter.feature(current_job)

            # self.clean_buffer(writebuf)
            job_report_text = self.generatereport(
                proc_number, current_job,
                start_time, end_time, writebuf)

            if job_report_text:
                results = dict()
                results['steps_passed'] = 0
                results['steps_failed'] = 0
                results['steps_skipped'] = 0
                results['steps_undefined'] = 0
                results['steps_untested'] = 0
                results['jobtype'] = current_job.type
                results['reportinginfo'] = job_report_text
                results['status'] = current_job.status
                if current_job.type != 'feature':
                    results['uniquekey'] = current_job.filename + current_job.feature.name
                else:
                    results['scenarios_passed'] = 0
                    results['scenarios_failed'] = 0
                    results['scenarios_skipped'] = 0
                    self.countscenariostatus(current_job, results)
                self.countstepstatus(current_job, results)
                if current_job.type != 'feature' and getattr(self.config, 'junit'):
                        results['junit_report'] = self.generate_junit_report(current_job, writebuf)
                self.resultsqueue.put(results)
示例#2
0
    def run(context, **kwargs):
        cmd_args = '-v -f allure_behave.formatter:AllureFormatter -f pretty'
        cmd = '{options} {cmd}'.format(cmd=cmd_args,
                                       options=kwargs.get('args', ''))
        config = Configuration(command_args=cmd)

        result_tmp_dir = mkdtemp(dir=os.environ.get('TEST_TMP', None))
        stream_opener = StreamOpener(filename=result_tmp_dir)

        model_runner = ModelRunner(config, [context.feature_definition])
        model_runner.formatters = make_formatters(config, [stream_opener])
        model_runner.run()

        context.allure_report = AllureReport(result_tmp_dir)
示例#3
0
def run_behave_with_allure(context, **kwargs):
    with test_context():
        cmd_args = '-f allure_behave.formatter:AllureFormatter'
        cmd = '{options} {cmd}'.format(cmd=cmd_args,
                                       options=kwargs.get('args', ''))
        config = Configuration(command_args=cmd)
        result_tmp_dir = mkdtemp(dir=os.environ.get('TEST_TMP', None))
        stream_opener = StreamOpener(filename=result_tmp_dir)
        model_runner = ModelRunner(config, context.feature_definition)
        model_runner.formatters = make_formatters(config, [stream_opener])
        model_runner.hooks = getattr(context, 'globals', dict())
        model_runner.run()
        context.allure_report = AllureReport(result_tmp_dir)

    os.environ.pop("ALLURE_TESTPLAN_PATH", None)
    def run(context, **kwargs):
        cmd_args = '-f allure_behave.formatter:AllureFormatter'
        cmd = '{options} {cmd}'.format(cmd=cmd_args, options=kwargs.get('args', ''))
        config = Configuration(command_args=cmd)

        result_tmp_dir = mkdtemp(dir=os.environ.get('TEST_TMP', None))
        stream_opener = StreamOpener(filename=result_tmp_dir)

        model_runner = ModelRunner(config, [context.feature_definition])
        model_runner.formatters = make_formatters(config, [stream_opener])
        model_runner.formatters[0].listener.fixture_context.enter()
        model_runner.hooks = getattr(context, 'globals', dict())
        model_runner.run()

        model_runner.formatters[0].listener.__del__()
        context.allure_report = AllureReport(result_tmp_dir)
示例#5
0
    def run_with_paths(self):
        self.context = Context(self)
        self.load_hooks()
        self.load_step_definitions()

        # -- ENSURE: context.execute_steps() works in weird cases (hooks, ...)
        # self.setup_capture()
        # self.run_hook('before_all', self.context)

        # -- STEP: Parse all feature files (by using their file location).
        feature_locations = [filename for filename in self.feature_locations() if not self.config.exclude(filename)]
        features = parse_features(feature_locations, language=self.config.lang)
        self.features.extend(features)

        # -- STEP: Run all features.
        stream_openers = self.config.outputs
        self.formatters = make_formatters(self.config, stream_openers)
        return self.run_model()
示例#6
0
    def run_with_paths(self):
        self.context = Context(self)
        self.load_hooks()
        self.load_step_definitions()

        # -- ENSURE: context.execute_steps() works in weird cases (hooks, ...)
        # self.setup_capture()
        # self.run_hook("before_all", self.context)

        # -- STEP: Parse all feature files (by using their file location).
        feature_locations = [filename for filename in self.feature_locations()
                             if not self.config.exclude(filename)]
        features = parse_features(feature_locations, language=self.config.lang)
        self.features.extend(features)

        # -- STEP: Run all features.
        stream_openers = self.config.outputs
        self.formatters = make_formatters(self.config, stream_openers)
        return self.run_model()
示例#7
0
 def _formatters(self, file_object, config): # pylint: disable=no-self-use
     stream_opener = StreamOpener(stream=file_object)
     formatters = make_formatters(config, [stream_opener])
     for f in formatters:
         f.uri('<string>')
     return formatters
示例#8
0
    def run_with_paths(self):
        context = self.context = Context(self)
        self.load_hooks()
        self.load_step_definitions()
        assert not self.aborted
        stream_openers = self.config.outputs
        failed_count = 0

        # -- ENSURE: context.execute_steps() works in weird cases (hooks, ...)
        self.setup_capture()
        self.run_hook('before_all', context)

        # -- STEP: Parse all feature files (by using their file location).
        feature_locations = [ filename for filename in self.feature_locations()
                                    if not self.config.exclude(filename) ]
        features = parse_features(feature_locations, language=self.config.lang)
        self.features.extend(features)

        # -- STEP: Multi-processing!
        if getattr(self.config, 'proc_count'):
            return self.run_multiproc()

        # -- STEP: Run all features.
        self.formatters = make_formatters(self.config, stream_openers)
        undefined_steps_initial_size = len(self.undefined_steps)
        run_feature = True
        for feature in features:
            if run_feature:
                try:
                    self.feature = feature
                    for formatter in self.formatters:
                        formatter.uri(feature.filename)

                    failed = feature.run(self)
                    if failed:
                        failed_count += 1
                        if self.config.stop or self.aborted:
                            # -- FAIL-EARLY: After first failure.
                            run_feature = False
                except KeyboardInterrupt:
                    self.aborted = True
                    failed_count += 1
                    run_feature = False

            # -- ALWAYS: Report run/not-run feature to reporters.
            # REQUIRED-FOR: Summary to keep track of untested features.
            for reporter in self.config.reporters:
                reporter.feature(feature)

        # -- AFTER-ALL:
        if self.aborted:
            print("\nABORTED: By user.")
        for formatter in self.formatters:
            formatter.close()
        self.run_hook('after_all', context)
        for reporter in self.config.reporters:
            reporter.end()
        # if self.aborted:
        #     print "\nABORTED: By user."

        failed = ((failed_count > 0) or self.aborted or
                  (len(self.undefined_steps) > undefined_steps_initial_size))
        return failed
def get_formatter(config, stream_openers):
    warnings.warn("Use make_formatters() instead",
                  DeprecationWarning,
                  stacklevel=2)
    return _registry.make_formatters(config, stream_openers)
示例#10
0
    def worker(self, proc_number):
        self.context.procnum = proc_number
        while 1:
            try:
                joblist_index = self.joblist_index_queue.get_nowait()
            except Exception, e:
                break
            current_job = self.joblist[joblist_index]
            writebuf = StringIO.StringIO()
            self.setfeature(current_job)
            self.config.outputs = []
            self.config.outputs.append(StreamOpener(stream=writebuf))

            stream_openers = self.config.outputs

            self.formatters = make_formatters(self.config, stream_openers)

            for formatter in self.formatters:
                formatter.uri(current_job.filename)

            if self.step_registry is None:
                self.step_registry = the_step_registry
            start_time = time.strftime("%Y-%m-%d %H:%M:%S")
            # if current_job.type == 'scenario' and getattr(self.config,
            #                                       'parallel_scenario_feature_hooks'):
            #     self.run_hook('before_feature', self.context, current_job)
            current_job.run(self)
            # if current_job.type == 'scenario' and getattr(self.config,
            #                                       'parallel_scenario_feature_hooks'):
            #     self.run_hook('after_feature', self.context, current_job)
            end_time = time.strftime("%Y-%m-%d %H:%M:%S")

            sys.stderr.write("%s: %s \n" %
                             (current_job.filename, current_job.status.name))

            if current_job.type == 'feature':
                for reporter in self.config.reporters:
                    reporter.feature(current_job)

            job_report_text = self.generatereport(proc_number, current_job,
                                                  start_time, end_time,
                                                  writebuf)

            if job_report_text:
                results = {}
                results['steps_passed'] = 0
                results['steps_failed'] = 0
                results['steps_skipped'] = 0
                results['steps_undefined'] = 0
                results['steps_untested'] = 0
                results['jobtype'] = current_job.type
                results['reportinginfo'] = job_report_text
                results['status'] = str(current_job.status.name)
                if current_job.type != 'feature':
                    results['uniquekey'] = \
                    current_job.filename + current_job.feature.name
                else:
                    results['scenarios_passed'] = 0
                    results['scenarios_failed'] = 0
                    results['scenarios_skipped'] = 0
                    results['scenarios_untested'] = 0
                    # results['scenarios_undefined'] = 0
                    # results['scenarios_executing'] = 0

                    self.countscenariostatus(current_job, results)
                self.countstepstatus(current_job, results)
                if current_job.type != 'feature' and \
                    getattr(self.config, 'junit'):
                    results['junit_report'] = \
                    self.generate_junit_report(current_job, writebuf)
                self.resultsqueue.put(results)
示例#11
0
 def _formatter(self, file_object, config):  # pylint: disable=no-self-use
     stream_opener = StreamOpener(stream=file_object)
     f = make_formatters(config, [stream_opener])[0]
     f.uri("<string>")
     return f
示例#12
0
 def _formatters(self, file_object, config):  # pylint: disable=no-self-use
     stream_opener = StreamOpener(stream=file_object)
     formatters = make_formatters(config, [stream_opener])
     for f in formatters:
         f.uri('<string>')
     return formatters
示例#13
0
    def run_with_paths(self):
        feature_locations = [
            filename for filename in self.feature_locations()
            if not self.config.exclude(filename)
        ]
        self.load_hooks(
        )  # hooks themselves not used, but 'environment.py' loaded
        # step definitions are needed here for formatters only
        self.load_step_definitions()
        features = parse_features(feature_locations, language=self.config.lang)
        self.features.extend(features)
        feature_count, scenario_count = self.scan_features()
        njobs = len(self.jobs_map)
        proc_count = int(self.config.proc_count)
        print(("INFO: {0} scenario(s) and {1} feature(s) queued for"
               " consideration by {2} workers. Some may be skipped if the"
               " -t option was given...".format(scenario_count, feature_count,
                                                proc_count)))

        procs = []
        old_outs = self.config.outputs
        self.config.outputs = []
        old_reporters = self.config.reporters
        self.config.reporters = []

        for i in range(proc_count):
            client = MultiProcClientRunner(self, i)
            p = multiprocessing.Process(target=client.run)
            procs.append(p)
            p.start()
            del p

        print(("INFO: started {0} workers for {1} jobs.".format(
            proc_count, njobs)))

        self.config.reporters = old_reporters
        self.formatters = make_formatters(self.config, old_outs)
        self.config.outputs = old_outs
        while (not self.jobsq.empty()):
            # 1: consume while tests are running
            self.consume_results()
            if not any([p.is_alive() for p in procs]):
                break

        if any([p.is_alive() for p in procs]):
            self.jobsq.join()  # wait for all jobs to be processed
            print("INFO: all jobs have been processed")

            while self.consume_results(timeout=0.1):
                # 2: remaining results
                pass

            # then, wait for all workers to exit:
            [p.join() for p in procs]

        print("INFO: all sub-processes have returned")

        while self.consume_results(timeout=0.1):
            # 3: just in case some arrive late in the pipe
            pass

        for f in self.features:
            # make sure all features (including ones that have not returned)
            # are printed
            self._output_feature(f)

        for formatter in self.formatters:
            formatter.close()
        for reporter in self.config.reporters:
            reporter.end()

        return self.results_fail
示例#14
0
def get_formatter(config, stream_openers):
    warnings.warn("Use make_formatters() instead",
                  DeprecationWarning, stacklevel=2)
    return _registry.make_formatters(config, stream_openers)
示例#15
0
 def _formatter(self, file_object, config):  # pylint: disable=no-self-use
     stream_opener = StreamOpener(stream=file_object)
     f = make_formatters(config, [stream_opener])[0]
     f.uri("<string>")
     return f
示例#16
0
 def _formatters(self, file, config):
     stream_opener = StreamOpener(stream=file)
     fs = make_formatters(config, [stream_opener])
     for f in fs:
         f.uri('<string>')
     return fs