def run(biobox_type, image, task, stop=True):
    """
    Runs the behave cucumber features for the given biobox and tast given by
    the passed arguments. Creates a directory in the current working directory,
    where the verfication files are created. Returns a dictionary of the behave
    output.
    """
    from behave.__main__ import main as behave_main
    _, tmp_file = tempfile.mkstemp()

    cmd = "{file} --define IMAGE={image} --define TASK={task} --define TMPDIR={tmp_dir} --outfile {tmp_file} --format json.pretty --no-summary"
    if stop:
        cmd += " --stop"
    args = {
        'file': feature_file(biobox_type),
        'tmp_dir': tmp_feature_dir(),
        'image': image,
        'tmp_file': tmp_file,
        'task': task
    }

    behave_main(cmd.format(**args))

    with open(tmp_file, 'r') as f:
        return json.loads(f.read())
Exemplo n.º 2
0
def delegate_test(scenario):
    argstable = [
        'behave/features/django_admin/',
        '-n{}'.format(scenario.replace('Scenario: ', '').replace('\r', '')),
        '-f allure_behave.formatter:AllureFormatter',
        '-o%allure_result_folder%'
    ]
    behave_main(argstable)
    return scenario
Exemplo n.º 3
0
    def handle(self, *args, **options):
        # Configure django environment
        django_test_runner = DiscoverRunner()
        django_test_runner.setup_test_environment()
        old_config = django_test_runner.setup_databases()

        # Run Behave tests
        behave_main(args=sys.argv[2:])

        # Teardown django environment
        django_test_runner.teardown_databases(old_config)
        django_test_runner.teardown_test_environment()
Exemplo n.º 4
0
    def handle(self, *args, **options):
        # Configure django environment
        django_test_runner = DiscoverRunner()
        django_test_runner.setup_test_environment()
        old_config = django_test_runner.setup_databases()

        # Run Behave tests
        behave_main(args=sys.argv[2:])

        # Teardown django environment
        django_test_runner.teardown_databases(old_config)
        django_test_runner.teardown_test_environment()
def run_tests(args):
    if not get_features(args):
        print("No features could be found to check.")
        return False
    behave_args = [get_resource_path("features")]
    if args["advanced_arguments"]:
        behave_args.extend(args["advanced_arguments"].split())
    elif not args["console"]:
        behave_args.extend(["--format", "json.pretty", "--outfile", "report/report.json"])
    behave_main(behave_args)
    print("# All tests are finished.")
    return True
Exemplo n.º 6
0
    def run(self, **kwargs):
        """Run the BehaveFramework feature files

        Here are the steps:
           * create the output directories if required,
           * run behave features with parameters
           * get the results in output.json,

        Args:
            kwargs: Arbitrary keyword arguments.

        Returns:
            EX_OK if all suites ran well.
            EX_RUN_ERROR otherwise.
        """
        try:
            suites = kwargs["suites"]
        except KeyError:
            self.__logger.exception("Mandatory args were not passed")
            return self.EX_RUN_ERROR
        if not os.path.exists(self.res_dir):
            try:
                os.makedirs(self.res_dir)
            except Exception:  # pylint: disable=broad-except
                self.__logger.exception("Cannot create %s", self.res_dir)
                return self.EX_RUN_ERROR
        config = [
            '--junit', f'--junit-directory={self.res_dir}', '--format=json',
            f'--outfile={self.json_file}'
        ]
        html_file = os.path.join(self.res_dir, 'output.html')
        config += [
            '--format=behave_html_formatter:HTMLFormatter',
            f'--outfile={html_file}'
        ]
        if kwargs.get("tags", False):
            config += ['--tags=' + ','.join(kwargs.get("tags", []))]
        if kwargs.get("console", False):
            config += ['--format=pretty', '--outfile=-']
        for feature in suites:
            config.append(feature)
        self.start_time = time.time()
        behave_main(config)
        self.stop_time = time.time()

        try:
            self.parse_results()
            self.__logger.info("Results were successfully parsed")
        except Exception:  # pylint: disable=broad-except
            self.__logger.exception("Cannot parse results")
            return self.EX_RUN_ERROR
        return self.EX_OK
Exemplo n.º 7
0
def run_tests(args):
    if not get_features(args):
        print('No requirements could be found to check.')
        return False
    behave_args = [get_resource_path('features')]
    if args.advanced_arguments:
        behave_args.extend(args.advanced_arguments.split())
    else:
        behave_args.extend(
            ['--junit', '--junit-directory', args.junit_directory])
    behave_main(behave_args)
    print('# All tests are finished.')
    return True
Exemplo n.º 8
0
def run_tests(args):
    if not get_features(args):
        print('No features could be found to check.')
        return False
    behave_args = [get_resource_path('features')]
    if args['advanced_arguments']:
        behave_args.extend(args['advanced_arguments'].split())
    elif not args['console']:
        behave_args.extend(
            ['--format', 'json.pretty', '--outfile', 'report/report.json'])
    behave_main(behave_args)
    print('# All tests are finished.')
    return True
Exemplo n.º 9
0
Arquivo: run.py Projeto: sabau/Python
def check_queue(*args):
    SC = SauceClient(SC_USERNAME, SC_TOKEN)
    activity = SC.account.get_activity()

    while activity['totals']['queued'] > 0:
        slept = randint(10, 150) / 1000
        print('Queue already occupied: {} will be waiting for {} s'.format(args[0], slept))
        sleep(slept)
        activity = SC.account.get_activity()
    print('behave {}'.format(args))

    # DONT lancuh a thread here, or we will loose track of it and when the main process quit,
    # the childs die too (see threads.join at the bottom of the file)
    behave_main(*args)
Exemplo n.º 10
0
 def test_feature(self, args):
     tmpdir = tempfile.mkdtemp()
     features_path = os.path.join(tmpdir, "features")
     steps_path = os.path.join(features_path, "steps")
     report_json = os.path.join(tmpdir, "report.json")
     shutil.copytree(os.path.join(self.base_path, "features"),
                     features_path)
     shutil.copy(args["feature"], features_path)
     if args["steps"]:
         if os.path.isfile(args["steps"]):
             shutil.copy(args["steps"], steps_path)
         elif os.path.isdir(args["steps"]):
             copy_tree(args["steps"], steps_path)
     behave_main(self.get_behave_args(args, features_path, report_json))
     return report_json
Exemplo n.º 11
0
    def handle(self, *args, **options):
        behave_args = self.get_behave_args()

        # Configure django environment
        if options['dry_run'] or options['use_existing_database']:
            django_test_runner = ExistingDatabaseTestRunner()
        else:
            django_test_runner = BehaviorDrivenTestRunner()

        django_test_runner.setup_test_environment()

        if options['keepdb']:
            django_test_runner.keepdb = True

        old_config = django_test_runner.setup_databases()

        # Run Behave tests
        monkey_patch_behave(django_test_runner)
        exit_status = behave_main(args=behave_args)

        # Teardown django environment
        django_test_runner.teardown_databases(old_config)
        django_test_runner.teardown_test_environment()

        if exit_status != 0:
            sys.exit(exit_status)
Exemplo n.º 12
0
    def handle(self, *args, **options):
        behave_args = self.get_behave_args()

        # Configure django environment
        if options['dry_run'] or options['use_existing_database']:
            django_test_runner = ExistingDatabaseTestRunner()
        else:
            django_test_runner = BehaviorDrivenTestRunner()

        django_test_runner.setup_test_environment()

        if options['keepdb']:
            django_test_runner.keepdb = True

        old_config = django_test_runner.setup_databases()

        # Run Behave tests
        monkey_patch_behave(django_test_runner)
        exit_status = behave_main(args=behave_args)

        # Teardown django environment
        django_test_runner.teardown_databases(old_config)
        django_test_runner.teardown_test_environment()

        if exit_status != 0:
            sys.exit(exit_status)
Exemplo n.º 13
0
def run_behave(testname):
    """Run the Behave machinery, read overall test result, capture log file."""
    test_specification = "{dir}/{test}{suffix}".format(dir=TEST_DIR,
                                                       test=testname,
                                                       suffix=TEST_SUFFIX)
    result = behave_main([test_specification])
    print("Test result: {result}:".format(result=result))
Exemplo n.º 14
0
    def test_features(self):
        """test features will run the feature files tests.

test features uses the --dir option to the test command to know what
directory the feature files will be found in. The directory must exist,
and contain the subdirectory features/live_read.
"""
        
        from behave.__main__ import main as behave_main

        sdir = self.ovalues.sdir
        assert os.path.isdir(sdir), \
          "Directory does not exist: " +sdir
        sdir_live_read = os.path.join(sdir, 'features')
        assert os.path.isdir(sdir_live_read), \
          "Directory should contain features: " +sdir
        os.chdir(sdir)

        largs = BEHAVE_ARGS.split()
        # we exclude the tests tagged @wip Work in progress
        largs.extend(['--tags', '~wip', 'features'])
        iretval = behave_main(largs)

        assert iretval == 0, "ERROR: Behave tests failed"
        self.poutput("INFO: features passed in " + sdir)
Exemplo n.º 15
0
def delegate_test(self, browser: str, scenario: str):
    def replace_char(string: str) -> str:
        for chars in REPLACE_CHARS:
            string = string.replace(chars, "")
        return string

    args_list = [
        "features/",
        "--no-skipped",
        "--format=allure_behave.formatter:AllureFormatter",
        f"--outfile={browser}_results/",
        "--logging-filter=-root",
        "--name",
        replace_char(scenario),
    ]

    # set env var that decides in which browser the test should be executed
    env_vars = {"BROWSER": browser, "ALLURE_INDENT_OUTPUT": "2"}
    with set_env(env_vars):
        temp_redirect = io.StringIO()
        with redirect_stdout(temp_redirect):
            exit_code = behave_main(args_list)

    behave_result = temp_redirect.getvalue()
    logger.info(behave_result)
    if exit_code == 1:
        self.update_state(state=states.FAILURE, meta=behave_result)
    sys.exit(exit_code)
Exemplo n.º 16
0
    def handle(self, *args, **options):

        # Check the flags
        if options['use_existing_database'] and options['simple']:
            self.stderr.write(self.style.WARNING(
                '--simple flag has no effect'
                ' together with --use-existing-database'
            ))

        # Configure django environment
        if options['dry_run'] or options['use_existing_database']:
            django_test_runner = ExistingDatabaseTestRunner()
        elif options['simple']:
            django_test_runner = SimpleTestRunner()
        else:
            django_test_runner = BehaviorDrivenTestRunner()

        django_test_runner.setup_test_environment()

        if options['keepdb']:
            django_test_runner.keepdb = True

        old_config = django_test_runner.setup_databases()

        # Run Behave tests
        monkey_patch_behave(django_test_runner)
        behave_args = self.get_behave_args()
        exit_status = behave_main(args=behave_args)

        # Teardown django environment
        django_test_runner.teardown_databases(old_config)
        django_test_runner.teardown_test_environment()

        if exit_status != 0:
            sys.exit(exit_status)
Exemplo n.º 17
0
def main() -> None:
    """Run main entry-point for s2i based integration tests."""
    args = ["--show-timings"]
    if _GENERATE_REPORT:
        args.extend(["-f", "html", "-o", "behave-report.html"])

    if _TAGS:
        args.extend(["--tags", _TAGS])

    # Pass any additional arguments to behave.
    args.extend(sys.argv[1:])

    _print_info()
    print("Tests are executed using", args, file=sys.stderr)

    try:
        exit_code = behave_main(args)
    except OSError:
        pass

    if _GENERATE_REPORT and _MAIL_REPORT:
        send_email()

    if _ARTIFACTS_DIRECTORY is not None:
        shutil.copy(_BEHAVE_HTML_REPORT, _ARTIFACTS_DIRECTORY)

    sys.exit(exit_code)
Exemplo n.º 18
0
    def test_features(self):
        """test features will run the feature files tests.

test features uses the --dir option to the test command to know what
directory the feature files will be found in. The directory must exist,
and contain the subdirectory features/live_read.
"""

        from behave.__main__ import main as behave_main

        sdir = self.ovalues.sdir
        assert os.path.isdir(sdir), \
          "Directory does not exist: " +sdir
        sdir_live_read = os.path.join(sdir, 'features')
        assert os.path.isdir(sdir_live_read), \
          "Directory should contain features: " +sdir
        os.chdir(sdir)

        largs = BEHAVE_ARGS.split()
        # we exclude the tests tagged @wip Work in progress
        largs.extend(['--tags', '~wip', 'features'])
        iretval = behave_main(largs)

        assert iretval == 0, "ERROR: Behave tests failed"
        self.poutput("INFO: features passed in " + sdir)
def lambda_runner(event, context):
    suffix = datetime.now().strftime(DATETIME_FORMAT)
    results_location = f'/tmp/result_{suffix}'
    run_args = get_run_args(event, results_location)
    print(f'Running with args: {run_args}')
    # behave -t @smoke -t ~@login -k -f allure_behave.formatter:AllureFormatter -o output --no-capture

    try:
        return_code = behave_main(run_args)
        test_result = False if return_code == 1 else True

    except Exception as e:
        print(e)
        test_result = False

    response = {'test_result': test_result}

    s3 = boto3.resource('s3')

    for file in os.listdir(results_location):
        if file.endswith('.json'):
            s3.Bucket(REPORTS_BUCKET).upload_file(f'{results_location}/{file}', f'tmp_reports/{file}')

    call(f'rm -rf {results_location}', shell=True)

    return {
        'statusCode': 200,
        'body': json.dumps(response)
    }
Exemplo n.º 20
0
 def test_tck(self):
     env = Env()
     cmd = ["./features/", '--tags=-skip']
     if not env.verbose:
         cmd.append('--format=progress')
     res = 'pass' if behave_main(cmd) == 0 else 'fail'
     env.assertEquals(res, 'pass')
Exemplo n.º 21
0
    def handle(self, *args, **options):

        # Check the flags
        if options['use_existing_database'] and options['simple']:
            self.stderr.write(
                self.style.WARNING('--simple flag has no effect'
                                   ' together with --use-existing-database'))

        # Configure django environment
        if options['dry_run'] or options['use_existing_database']:
            django_test_runner = ExistingDatabaseTestRunner()
        elif options['simple']:
            django_test_runner = SimpleTestRunner()
        else:
            django_test_runner = BehaviorDrivenTestRunner()

        django_test_runner.setup_test_environment()

        if options['keepdb']:
            django_test_runner.keepdb = True

        old_config = django_test_runner.setup_databases()

        # Run Behave tests
        monkey_patch_behave(django_test_runner)
        behave_args = self.get_behave_args()
        exit_status = behave_main(args=behave_args)

        # Teardown django environment
        django_test_runner.teardown_databases(old_config)
        django_test_runner.teardown_test_environment()

        if exit_status != 0:
            sys.exit(exit_status)
Exemplo n.º 22
0
    def run(self, *args, **kwargs):
        frontend = os.environ.get("APP", app.config.get("APP", None))
        if not frontend:
            print("You need to configure the APP to be used!")
            exit(1)

        exit(behave_main(sys.argv[2:] + ['--no-capture',
             "beavy_apps/{}/tests/features".format(frontend)]))
Exemplo n.º 23
0
def test_tck():
    env = Env()
    cmd = ["./features/", '--tags=-skip']
    #  cmd = ["./features/", '--tags=-crash'] # Run all tests except crashing tests
    if not env.verbose:
        cmd.append('--format=progress')
    res = behave_main(cmd)
    res = 'pass' if res == 0 else 'fail'
    env.assertEquals(res, 'pass')
Exemplo n.º 24
0
    def run(self, *args, **kwargs):
        frontend = os.environ.get("APP", app.config.get("APP", None))
        if not frontend:
            print("You need to configure the APP to be used!")
            exit(1)

        exit(
            behave_main(sys.argv[2:] + [
                '--no-capture', "beavy_apps/{}/tests/features".format(frontend)
            ]))
Exemplo n.º 25
0
def test_tck():
    env = Env(decodeResponses=True)
    cmd = ["./features/", '--tags=-crash', '--tags=-skip']
    #  cmd = ["./features/", '--tags=-crash'] # Run all tests except crashing tests
    if not env.verbose:
        cmd.append('--format=progress')
    if env.getEnvKwargs()['debugger']:
        cmd.append('--tags=-leak')
    res = behave_main(cmd)
    res = 'pass' if res == 0 else 'fail'
    env.assertEquals(res, 'pass')
Exemplo n.º 26
0
def run(argv):
    opts   = util.parse_docopt(__doc__, argv, False)
    biobox = opts['<biobox_type>']
    image  = opts['<image>']
    task   = opts['--task']

    from behave.__main__ import main as behave_main
    _, tmp_file = tempfile.mkstemp()
    cmd = "{file} --define IMAGE={image} --define TASK={task} --define TMPDIR={tmp_dir} --outfile {tmp_file} --no-summary --stop"
    args = {'file':     verification_file(biobox),
            'tmp_dir':  tmp_feature_dir(),
            'image':    image,
            'tmp_file': tmp_file,
            'task':     task}

    behave_main(cmd.format(**args))

    with open(tmp_file, 'r') as f:
        output = f.read()

    if "Assertion Failed" in output:
        util.err_exit('failed_verification', {'image': image, 'biobox': biobox.replace('_', ' ')})
def run(project):
    print('Starting django testserver')
    django_process = subprocess.Popen(
        ['python',
         '{0}/manage.py'.format(project),
         'testserver',
         '--noinput'],
        stdout=open('/dev/null', 'w'),
        stderr=open('/dev/null', 'w'))
    time.sleep(2)

    print('Starting Xvfb')
    xvfb_process = subprocess.Popen(
        ['Xvfb', ':99', '-ac', '-screen', '0', '1024x768x8'])
    os.environ["DISPLAY"] = ":99"

    try:
        behave_main()  # This calls sys.exit() under all circumstances
    finally:
        print('Stopping django testserver')
        django_process.terminate()
        print('Stopping Xvfb')
        xvfb_process.terminate()
def run(biobox_type, image, task, stop = True):
    """
    Runs the behave cucumber features for the given biobox and tast given by
    the passed arguments. Creates a directory in the current working directory,
    where the verfication files are created. Returns a dictionary of the behave
    output.
    """
    from behave.__main__ import main as behave_main
    _, tmp_file = tempfile.mkstemp()

    cmd = "{file} --define IMAGE={image} --define TASK={task} --define TMPDIR={tmp_dir} --outfile {tmp_file} --format json.pretty --no-summary"
    if stop:
      cmd += " --stop"
    args = {'file':     feature_file(biobox_type),
            'tmp_dir':  tmp_feature_dir(),
            'image':    image,
            'tmp_file': tmp_file,
            'task':     task}

    behave_main(cmd.format(**args))

    with open(tmp_file, 'r') as f:
        return json.loads(f.read())
Exemplo n.º 29
0
    def handle(self, *args, **options):
        django_test_runner = CloneExistingDatabaseTestRunner()
        django_test_runner.setup_test_environment()
        old_config = django_test_runner.setup_databases()

        # Run Behave tests
        monkey_patch_behave(django_test_runner)
        behave_args = self.get_behave_args()
        exit_status = behave_main(args=behave_args)

        # Teardown django environment
        django_test_runner.teardown_databases(old_config)
        django_test_runner.teardown_test_environment()

        if exit_status != 0:
            sys.exit(exit_status)
Exemplo n.º 30
0
    def run(self, image, run_tags, test_names):
        """Run test suite"""
        test_path = os.path.join(self.target, 'test')
        logger.debug("Running behave in '%s'." % test_path)
        args = [test_path,
                '--junit',
                '--junit-directory', 'results',
                '--no-skipped',
                '-t', '~ignore',
                '-D', 'IMAGE=%s' % image]

        if test_names:
            for name in test_names:
                args.append('--name')
                args.append("%s" % name)
        else:
            for tag in run_tags:
                if ':' in tag:
                    test_tag = tag.split(':')[0]

                args.append('-t')
                if '/' in tag:
                    args.append("@%s,@%s" % (test_tag.split('/')[0], test_tag))
                else:
                    args.append(tag)

            # Check if we're running runtests on CI or locally
            # If we run tests locally - skip all features that
            # are marked with the @ci annotation
            if getpass.getuser() != "jenkins":
                args.append("-t")
                args.append("~ci ")

        try:
            from behave.__main__ import main as behave_main

            with Chdir(os.path.join(self.target, 'test')):
                if behave_main(args) != 0:
                    raise CekitError("Test execution failed, please consult output above")
        except CekitError:
            raise
        except:
            raise CekitError("An error occurred while executing tests")
Exemplo n.º 31
0
    def handle(self, *args, **options):

        # Check the flags
        if options['use_existing_database'] and options['simple']:
            self.stderr.write(self.style.WARNING(
                '--simple flag has no effect'
                ' together with --use-existing-database'
            ))

        # Configure django environment
        passthru_args = ('failfast',
                         'interactive',
                         'keepdb',
                         'reverse')
        runner_args = {k: v for
                       k, v in
                       options.items() if k in passthru_args and v is not None}

        if options['dry_run'] or options['use_existing_database']:
            django_test_runner = ExistingDatabaseTestRunner(**runner_args)
        elif options['simple']:
            django_test_runner = SimpleTestRunner(**runner_args)
        else:
            django_test_runner = BehaviorDrivenTestRunner(**runner_args)

        django_test_runner.setup_test_environment()

        old_config = django_test_runner.setup_databases()

        # Run Behave tests
        monkey_patch_behave(django_test_runner)
        behave_args = self.get_behave_args()
        exit_status = behave_main(args=behave_args)

        # Teardown django environment
        django_test_runner.teardown_databases(old_config)
        django_test_runner.teardown_test_environment()

        if exit_status != 0:
            sys.exit(exit_status)
Exemplo n.º 32
0
    def handle(self, *args, **options):

        # Check the flags
        if options['use_existing_database'] and options['simple']:
            self.stderr.write(
                self.style.WARNING('--simple flag has no effect'
                                   ' together with --use-existing-database'))

        # Configure django environment
        passthru_args = ('failfast', 'interactive', 'keepdb', 'reverse')
        runner_args = {
            k: v
            for k, v in options.items() if k in passthru_args and v is not None
        }

        if options['dry_run'] or options['use_existing_database']:
            django_test_runner = ExistingDatabaseTestRunner(**runner_args)
        elif options['simple']:
            django_test_runner = SimpleTestRunner(**runner_args)
        else:
            django_test_runner = BehaviorDrivenTestRunner(**runner_args)

        django_test_runner.setup_test_environment()

        old_config = django_test_runner.setup_databases()

        # Run Behave tests
        monkey_patch_behave(django_test_runner)
        behave_args = self.get_behave_args()
        exit_status = behave_main(args=behave_args)

        # Teardown django environment
        django_test_runner.teardown_databases(old_config)
        django_test_runner.teardown_test_environment()

        if exit_status != 0:
            sys.exit(exit_status)
Exemplo n.º 33
0
def iMain():
    
    if sys.argv[-1] == 'publish':
        os.system(sys.executable +" setup.py sdist --formats=gztar upload --show-response")
        os.system(sys.executable +" setup.py bdist_wheel upload --show-response")
        print "You probably want to also tag the version now:"
        print "  git tag -a %s -m 'version %s'" % (version, version, )
        print "  git push --tags"
        return 0
    
    if sys.argv[-1] == 'test':
        try:
            modules = map(__import__, dParams['tests_require'])
        except ImportError as e:
            err_msg = e.message.replace("No module named ", "")
            msg = "%s is not installed. Install your test requirments." % err_msg
            raise ImportError(msg)
        sStartDir = os.path.dirname(os.path.realpath(__file__))          
        os.chdir(os.path.join(sStartDir, 'tests'))
        
        largs = BEHAVE_ARGS.split()
        largs.extend(['features'])
        from behave.__main__ import main as behave_main
        iRetval = behave_main(largs)
        return iRetval
    
    if '--help' in sys.argv:
        print """
Extra commands:

  setup.py publish    will sdist upload the distribution to pypi
  setup.py test       will run a very basic test of the software
"""
        # drop through
        
    setup(**dParams)
    return 0
Exemplo n.º 34
0
def run_behave(testname):
    """Run the Behave machinery, read overall test result, capture log file."""
    test_specification = "{dir}/{test}{suffix}".format(dir=TEST_DIR,
                                                       test=testname,
                                                       suffix=TEST_SUFFIX)
    logfile = mktemp()
    print("Logfile: {logfile}".format(logfile=logfile))

    output_specification = "--outfile={logfile}".format(logfile=logfile)
    result = behave_main([test_specification, output_specification])
    print("Test result: {result}:".format(result=result))

    try:
        log = None

        with open(logfile, "r") as fin:
            log = fin.read()

        remove(logfile)
        return result, log

    except Exception as e:
        print("Exception occured: ", e)
        return 2, None
Exemplo n.º 35
0
    subtests_to_execute = "all"

    if len(sys.argv) > 1:
        if sys.argv[1] in ["integration", "conformance"]:
            tests_to_execute = sys.argv[1]
            if len(sys.argv) == 3:
                subtests_to_execute = sys.argv[2]

    if tests_to_execute in ["conformance", "all"]:
        print
        print "##############################"
        print "# Conformance tests"
        print "##############################"
        print

        res_behave = behave_main("tests/apps/gaming_challenges/controllers/api")

    if tests_to_execute in ["integration", "all"]:
        print
        print "##############################"
        print "# Unit/Integration tests"
        print "##############################"
        print

        test_loader = unittest.TestLoader()
        all_tests = []

        ## Models
        if subtests_to_execute in ["model", "all"]:
            # Users manager tests
            all_tests.append(test_loader.loadTestsFromTestCase(
Exemplo n.º 36
0
__author__ = 'rsantamaria'

from behave.__main__ import main as behave_main


behave_main(['/Users/rsantamaria/PycharmProjects/Behave/', '-t ~@skip', '-k'])
Exemplo n.º 37
0
import sys
sys.path.append('\\synthetics\\modules\\behave-1.2.5')
#sys.path.append('\\synthetics\\modules\\behave')
sys.path.append('\\synthetics\\modules\\parse-1.8.4')
sys.path.append('\\synthetics\\modules\\selenium-3.13.0')
sys.path.append('\\synthetics\\modules\\parse_type-0.4.2')
sys.path.append('\\synthetics\\modules\\traceback2-1.4.0')
sys.path.append('\\synthetics\\modules\\linecache2-1.0.0')
sys.path.append('\\synthetics\\modules\\nose-1.3.7')
sys.path.append('\\synthetics\\modules\\six-1.11.0')
sys.path.append('\\synthetics\\modules\\enum-0.4.6')
sys.path.append('\\synthetics\\modules\\influxdb-5.1.0')

sys.path.append('\\synthetics\\modules\\requests-2.19.1')
sys.path.append('\\synthetics\\modules\\urllib3-1.23')
sys.path.append('\\synthetics\\modules\\chardet-3.0.4')
sys.path.append('\\synthetics\\modules\\certifi-2018.4.16')
sys.path.append('\\synthetics\\modules\\idna-2.7')
sys.path.append('\\synthetics\\modules\\pytz-2013.7')
sys.path.append('\\synthetics\\modules\\py-dateutil-2.2')

print sys.path

from behave.__main__ import main as behave_main

behave_main(["\\synthetics\\features", "-n NEAT"])
#behave_main(["\\synthetics\\features"])
#behave_main(["-n NEAT"])
Exemplo n.º 38
0
from sys import exit
from behave.__main__ import main as behave_main

if __name__ == "__main__":
    exit(behave_main())
Exemplo n.º 39
0
# Run python behave from python instead of command line
from behave.__main__ import main as behave_main
behave_main("path/to/tutorial")
Exemplo n.º 40
0
from behave.__main__ import main as behave_main
#behave_main('--name "Check reset password functionality"')
behave_main()
Exemplo n.º 41
0
"""Programmatic entrypoint to running behave from the command line"""
import os
import sys

from behave.__main__ import main as behave_main

if __name__ == "__main__":
    os.chdir(os.path.dirname(__file__))
    exit_code = behave_main(
        "--tags=tests-replicated "
        "-D environment='https://replicated-test.n-s.internal/' ")
Exemplo n.º 42
0
from behave.__main__ import main as behave_main
import os
os.chdir("D:\ProgApps\Py2710\Behave\chap1")
behave_main("D:\\ProgApps\\Py2710\\Behave\\chap1")

Exemplo n.º 43
0
def behave_main0():
    from behave.__main__ import main as behave_main
    monkeypatch_behave()
    setup_behave()
    return behave_main()
Exemplo n.º 44
0
from behave.__main__ import main as behave_main
import logger

# https://acme-test.uipath.com/account/login
# [email protected]
# 123456
if __name__ == '__main__':
    # Config logger
    logger.init_logging()
    # Run example feature
    # behave_main("features/Example.feature")
    # behave_main("features/Outline.feature -f json.pretty -o test.json --no-summary")

    # Run outline feature {-n "<scenario_name>"}
    # behave_main("features/Outline.feature -f json.pretty -o test.json --no-summary")

    # Run multiple features
    # behave_main("features/Outline.feature features/Example.feature -f json.pretty -o test.json --no-summary")

    # Run selenium features
    behave_main("features/Selenium.feature -f json.pretty -o test.json --no-summary")

    # Run step data scenario -- multiple scenarios run {-n "<scenario_name>" -n "<scenario_name>"}
    # behave_main("features/Outline.feature -n Example1 -n Example2 -f json.pretty -o test.json --no-summary")

    # Run feature by tags
    # behave_main("features/Outline.feature --tags=@tag -f json.pretty -o test.json --no-summary")
Exemplo n.º 45
0
def behave_main0():
    from behave.__main__ import main as behave_main
    monkeypatch_behave()
    setup_behave()
    return behave_main()
Exemplo n.º 46
0
def behave_main0():
    # from behave.configuration import Configuration
    from behave.__main__ import main as behave_main
    setup_behave()
    return behave_main()