def main(): # print 'sys.argv[1]'+sys.argv[1] #print len(sys.argv) parameters = None if len(sys.argv) > 1: arguments = sys.argv[1] parameters = sys.argv else: arguments = None #read_config_file('atf.properties') #run_tests_advanced(tests=arguments) #python core/ATFcore.py temp_location results_dir PYTEST_PARAM browser test1 test2 test3... print 'parameters[0]:'+parameters[0] temp_test_location = parameters[1] print 'parameters[1]:'+parameters[1] results_dir = parameters[2] print 'parameters[2]:'+parameters[2] print 'parameters[3]:'+parameters[3] #print 'parameters[4]:'+parameters[4] #print 'parameters[5]:'+parameters[5] pytest.main(' --html='+results_dir+'/report.html --resultlog='+results_dir+'/results.log --junitxml '+results_dir+'/results.xml -v -x '+temp_test_location) # Usage if pytest: """
def test_worker(home, test, results_queue): pid = str(os.getpid()) log_name = test.split('/')[-1].split('.')[0] + "_" + pid results_log = os.path.join(home, TMP_RESULTS_PATH, "results_{0}.txt".format(log_name)) try: # Since all test workers output to stdout at the same time, send that output to a temporary junk file old_stdout = sys.stdout sys.stdout = open(os.path.join(home, TMP_PATH, "tmp_{0}.txt".format(pid)), 'w') pytest.main("--result-log={0} {1}".format(results_log, os.path.join(home, test))) sys.stdout.close() sys.stdout = old_stdout # Clean up incase control-c is pressed except KeyboardInterrupt: if not sys.stdout.closed: sys.stdout.close() sys.stdout = old_stdout raise # Clean up if there is an IOError from the output file except IOError: if not sys.stdout.closed: sys.stdout.close() sys.stdout = old_stdout print "There's been an IOError" raise results_queue.put(log_name)
def test_pytest(): try: import pytest import os pytest.main('-xvs %s' % os.path.dirname(__file__)) except ImportError: print 'error importing pytest'
def run_tests(): """ Need to comment out the following line in C:\VirtualEnvs\mappyfile\Lib\site-packages\pep8.py #stdin_get_value = sys.stdin.read Or get AttributeError: '_ReplInput' object has no attribute 'read' """ pytest.main(["tests/test_symbolset.py"])
def test(arguments=''): """Run ODL tests given by arguments.""" import pytest this_dir = os.path.dirname(__file__) odl_root = os.path.abspath(os.path.join(this_dir, os.pardir, os.pardir)) base_args = '-x {odl_root}/odl {odl_root}/test '.format(odl_root=odl_root) pytest.main(base_args + arguments)
def run_tests(self): # import here, cause outside the eggs aren't loaded elsewhere import pytest print("Running: pytest %s" % self.test_args) sys.path.insert(0, "lib") pytest.main(self.test_args)
def main(): logging.basicConfig(level=logging.DEBUG) # cleanup any existing data netnode.Netnode(TEST_NAMESPACE).kill() pytest.main(['--capture=sys', os.path.dirname(__file__)])
def test(no_optional_skip=False): from pytest import main root = os.path.abspath(os.path.dirname(__file__)) args = [root] if no_optional_skip: args.append('--no-optional-skip') main(args=args)
def run(self): import pytest if self.noweb: exit_code = pytest.main(['-m', 'not web']) else: exit_code = pytest.main([]) sys.exit(exit_code)
def run(**kwargs): config_name = kwargs.get('config_name', None) groups = kwargs.get('run_groups', []) old_groups = kwargs.get('groups', None) explain = kwargs.get('explain', None) groups_to_run = [] groups.extend(old_groups or []) # Collect from pytest only once! pytest.main(['--collect-only', 'fuel_tests', ]) from fuel_tests.tests.conftest import test_names for g in set(groups): if g in test_names: sys.exit(pytest.main('-m {}'.format(g))) if config_name: register_system_test_cases( groups=[g], configs=[config_name]) groups_to_run.append("{0}({1})".format(g, config_name)) else: register_system_test_cases(groups=[g]) groups_to_run.append(g) if not set([split_group_config(i)[0] if split_group_config(i) else i for i in groups_to_run]) < set(get_groups()): sys.exit('There are no cases mapped to current group, ' 'please be sure that you put right test group name.') if explain: print_explain(groups) else: register(groups=["run_system_test"], depends_on_groups=groups_to_run) TestProgram(groups=['run_system_test'], argv=clean_argv_proboscis()).run_and_exit()
def run(self, module=None): modules = [] if module is not None: modules = [module] else: modules = current_app.config['INSTALLED_APPS'] test_modules = [] current_app.db.create_all() for module in modules: try: get_module('%s.tests' % module) test_modules.append('%s/tests' % get_module_path(module)) except ImportError: pass #cmd = '--cov-report html --cov .' #cmd += ' '.join(['--cov %s' % test_module.replace('/tests', '') for test_module in test_modules]) cmd = ' '.join(test_modules) pytest.main(cmd) current_app.db.drop_all()
def main(): if len(sys.argv) > 1: wd = sys.argv[1] else: wd = "." student_dirs = filter(os.path.isdir, os.listdir(wd)) students = [] for sdir in student_dirs: os.path.join(wd, sdir) class Student: name = sdir dirname = os.path.join(wd, sdir) students.append(Student) for s in students: for fname in os.listdir(s.dirname): if fname.startswith("test_"): s.code = open(os.path.join(s.dirname, fname)).read() s.code = s.code.replace("def test_", "def test_" + s.name + "_") code = "\n".join(s.code for s in students) pooled_test_paths = [] for s in students: path = os.path.join(s.dirname, "tests_pooled.py") fw = open(path, "w") fw.write(code) fw.close() print "Written", path pooled_test_paths.append(path) init_path = os.path.join(s.dirname, "__init__.py") if not os.path.isfile(init_path): open(init_path, "w").close() for path in pooled_test_paths: pytest.main(["--color=no", path])
def main(args): os.environ['MULTICONF_WARN_JSON_NESTING'] = 'true' print("Running tests", args) if args and args != ['-v']: return pytest.main(['--capture=sys'] + args) engine = tenjin.Engine() major_version = sys.version_info[0] cov_rc_file_name = jp(here, '.coverage_rc_' + str(major_version)) with open(cov_rc_file_name, 'w') as cov_rc_file: cov_rc_file.write(engine.render(jp(here, "coverage_rc.tenjin"), dict(major_version=major_version))) rc = pytest.main(['--capture=sys', '--cov=' + here + '/..', '--cov-report=term-missing', '--cov-config=' + cov_rc_file_name] + (args if args == ['-v'] else [])) print("Validating demo for all envs") try: del os.environ['PYTHONPATH'] except KeyError: pass for env_name in 'prod', 'preprod', 'devlocal', 'devs', 'devi': print() osenv = {'PYTHONPATH': ':'.join(sys.path)} rc |= subprocess.call((sys.executable, here + '/../demo/demo.py', '--env', env_name), env=osenv) return rc
def run_tests(args): """Run tests.""" args = args # args is unused path = os.path.abspath(os.path.dirname(__file__)) sys.argv[1] = path pytest.main()
def run(path, session, timeout=0): """Run Python test at ``path`` in pytest. The provided ``session`` is exposed as a fixture available in the scope of the test functions. :param path: Path to the test file. :param session: WebDriver session to expose. :param timeout: Duration before interrupting potentially hanging tests. If 0, there is no timeout. :returns: List of subtest results, which are tuples of (test id, status, message, stacktrace). """ if pytest is None: do_delayed_imports() recorder = SubtestResultRecorder() plugins = [recorder, fixtures, fixtures.Session(session)] # TODO(ato): Deal with timeouts with TemporaryDirectory() as cache: pytest.main(["--strict", # turn warnings into errors "--verbose", # show each individual subtest "--capture", "no", # enable stdout/stderr from tests "--basetemp", cache, # temporary directory path], plugins=plugins) return recorder.results
def main(): import os import sys from ptvsd.visualstudio_py_debugger import DONT_DEBUG, DEBUG_ENTRYPOINTS, get_code from ptvsd.attach_server import DEFAULT_PORT, enable_attach, wait_for_attach sys.path[0] = os.getcwd() os.chdir(sys.argv[1]) secret = sys.argv[2] port = int(sys.argv[3]) testFx = sys.argv[4] args = sys.argv[5:] DONT_DEBUG.append(os.path.normcase(__file__)) DEBUG_ENTRYPOINTS.add(get_code(main)) enable_attach(secret, ('127.0.0.1', port), redirect_output = False) sys.stdout.flush() print('READY') sys.stdout.flush() wait_for_attach() try: if testFx == 'pytest': import pytest pytest.main(args) else: import nose nose.run(argv=args) sys.exit() finally: pass
def run(): parser = argparse.ArgumentParser(description="运行UI自动化测试用例".decode("utf8")) parser.add_argument("--casepath", default=DEFAULT_CASE_PATH, help="测试用例的执行目录,不设置时默认执行case目录下全部用例".decode("utf8")) parser.add_argument("-P", "--priority", type=int, default=3, help="运行测试用例的优先级(1,2,3),不设置默认执行全部优先级的用例".decode("utf8")) parser.add_argument("--html-result", action="store", dest="html_result", type=bool, metavar="NAME", default=False, help="输出html报表".decode("utf8")) parser.add_argument("--txt-result", action="store", dest="txt_result", type=bool, metavar="NAME", default=False, help="输出txt报表".decode("utf8")) args = parser.parse_args() case_path_str = args.casepath.replace("\\", "\\\\") priority_str = " -P {0}".format(args.priority) html_report_str = "" txt_report_str = "" if args.html_result: html_report_str = " --html-result True" if args.txt_result: txt_report_str = " --txt-result True" pytest_str = r" {case_path}{priority}{html_report}{txt_report} -s --clearcache".format( case_path=case_path_str, priority=priority_str, html_report=html_report_str, txt_report=txt_report_str ) print("py.test {0}".format(pytest_str)) pytest.main(pytest_str)
def handle(self, *a, **b): ## http://pytest.org/latest/usage.html ## --tb=native is also usable coverage = False opts = ["--tb=short"] keyword_specified = False settings.DATABASES = dict(default=settings.TEST_DB) if not b.get('capture'): opts.append("-s") if b.get('keyword') and b['keyword']: opts.append("-k") opts.append(b['keyword']) keyword_specified = True if not b.get('paste'): opts.append("--pastebin=all") if not b.get('functional') and not keyword_specified: opts.append("-k") opts.append("not functional_") if b.get('coverage'): coverage = True coverage_modules = b.get('coverage', '').split(",") if coverage: import coverage conf = getattr(settings, 'COVERAGE_CONF', '') cov = coverage.coverage(source=coverage_modules, config_file=conf) cov.start() pytest.main(opts + list(a)) if coverage: cov.stop() cov.report()
def main(argv=None): """Get arguments and run tests.""" if not argv: argv = sys.argv[1:] parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-l', '--local', action="store_true", help="Skip remote tests") args = parser.parse_args(argv) # Move us up one if we are in the tests directory if os.path.basename(os.path.abspath('.')) == 'tests': os.chdir('..') # Run the tests print('Running py.test tests') if args.local: print('Skipping remote queue tests') pytest.main(['tests/test_options.py', 'tests/test_queue.py', 'tests/test_local.py', 'tests/test_config.py']) else: pytest.main() print('py.test tests complete, running local queue test.') check_call([sys.executable, 'tests/local_queue.py'])
def run_module(args): if args.list: list_module_test() elif args.all: pytest.main(['-s', '-v', '-k', 'not test_core']) else: pytest.main(['-s', '-v', args.file])
def run_tests(): # pytest treats the string given it like a Python string in code, applying an \ sequences to it. Just changing \\ to / makes it unhappy (one test fails, Macs get confused). Just pass in the file name, not the path. self_name = os.path.basename(__file__) # Get the .py name to this file; it can be run from a .pyc, but that boogers pytest. self_name = os.path.splitext(self_name)[0] + ".py" # Run all tests -- see http://pytest.org/latest/usage.html#calling-pytest-from-python-code. Produce `XML output <http://pytest.org/latest/usage.html#creating-junitxml-format-files>`_ so that a grader can extract the pass/fail stats. pytest.main(self_name + " --junitxml=unit_test.xml") #' --tb=line' +
def RunInSingleDir(module_name_seq, xmlout, dirname): """ Run pytest TestProgram w/ given argv""" module_fullpath_list = [] for module_name in module_name_seq: parts = module_name.split('.') modname = parts[0] test_spec = '.'.join(parts[1:]) module_fullpath = os.path.join(dirname, modname) if os.path.isdir(module_fullpath): pass elif not module_fullpath.endswith('.py'): module_fullpath += '.py' if test_spec: module_fullpath += '::' + test_spec.replace('.', '::') module_fullpath_list.append(module_fullpath) result = wingtest_common.XmlTestResult(xmlout) runner = wingtest_common.XmlTestRunner(result) plugin = CPytestPlugin(dirname, result, runner) try: import pytest # -s turns off stdout/err capturing # -p no:terminal turns off printing test result status to stdout # --tb=native gets parseable tracebacks on exceptions pytest.main(args=['-s', '-p', 'no:terminal', '--tb=native'] + module_fullpath_list, plugins=[plugin]) except SystemExit: raise except Exception: # Note that import error from test files end up here, so this is # not just for runner exceptions xmlout._write_exc_info(sys.exc_info())
def run_tests(self): import pytest error_code = pytest.main(['-k', 'parse/test/test_settings.py']) if error_code: sys.exit(errcode) errcode = pytest.main(self.test_args) sys.exit(errcode)
def test(): r""" Run all the doctests available. """ import pytest path = os.path.split(__file__)[0] pytest.main(args=[path, '--doctest-modules', '-r s'])
def main(args): print("Running tests, args:", args) if args and args != ['-v']: return pytest.main(['--capture=sys'] + args) engine = tenjin.Engine(cache=False) major_version = sys.version_info[0] minor_version = sys.version_info[1] # Note: This naming is duplicated in .travis.yml cov_rc_file_name = jp(_here, '.coverage_rc_' + str(os.environ.get('TRAVIS_PYTHON_VERSION', str(major_version) + '.' + str(minor_version)))) with open(cov_rc_file_name, 'w') as cov_rc_file: cov_rc_file.write(engine.render(jp(_here, "coverage_rc.tenjin"), dict( major_version=major_version, minor_version=minor_version, type_check_supported=type_check.vcheck()))) rc = pytest.main(['--capture=sys', '--cov=' + _here + '/..', '--cov-report=term-missing', '--cov-config=' + cov_rc_file_name] + (args if args == ['-v'] else [])) print() try: del os.environ['PYTHONPATH'] except KeyError: pass for env_name in 'prod', 'preprod', 'devlocal', 'devs', 'devi': demo_out = jp(_here, env_name + '.demo_out') print("Validating demo for env {env} - output in {out}".format(env=env_name, out=demo_out)) osenv = {'PYTHONPATH': ':'.join(sys.path)} with open(demo_out, 'w') as outf: rc |= subprocess.check_call((sys.executable, _here + '/../demo/demo.py', '--env', env_name), env=osenv, stdout=outf) print() return rc
def merge_config(args): collect_config = CollectConfig() with silence(): pytest.main(['--collect-only'], plugins=[collect_config]) if not collect_config.path: return config = ConfigParser() config.read(collect_config.path) if not config.has_section('pytest-watch'): return for cli_name in args: if not cli_name.startswith(CLI_OPTION_PREFIX): continue config_name = cli_name[len(CLI_OPTION_PREFIX):] # Let CLI options take precedence if args[cli_name]: continue # Find config option if not config.has_option('pytest-watch', config_name): continue # Merge config option using the expected type if isinstance(args[cli_name], bool): args[cli_name] = config.getboolean('pytest-watch', config_name) else: args[cli_name] = config.get('pytest-watch', config_name)
def main(argv): if '--help' in argv: print("Usage: ./runtests.py <testfiles>") return mydir = os.path.dirname(os.path.abspath(__file__)) verbosity_args = [] if 'PYGI_TEST_VERBOSE' in os.environ: verbosity_args += ['--capture=no'] if 'TEST_NAMES' in os.environ: names = os.environ['TEST_NAMES'].split() elif 'TEST_FILES' in os.environ: names = [] for filename in os.environ['TEST_FILES'].split(): names.append(filename[:-3]) elif len(argv) > 1: names = [] for filename in argv[1:]: names.append(filename.replace('.py', '')) else: return pytest.main([mydir] + verbosity_args) def unittest_to_pytest_name(name): parts = name.split(".") parts[0] = os.path.join(mydir, parts[0] + ".py") return "::".join(parts) return pytest.main([unittest_to_pytest_name(n) for n in names] + verbosity_args)
def test(coverage=False): args = [] if coverage: args.append("--cov=.") pytest.main(args)
def do_list(args, unknown_args): """Print a lists of tests than what pytest offers.""" # Run test collection and get the tree out. old_output = sys.stdout try: sys.stdout = output = StringIO() pytest.main(['--collect-only']) finally: sys.stdout = old_output # put the output in a more readable tree format. lines = output.getvalue().split('\n') output_lines = [] for line in lines: match = re.match(r"(\s*)<([^ ]*) '([^']*)'", line) if not match: continue indent, nodetype, name = match.groups() # only print top-level for short list if args.list: if not indent: output_lines.append( os.path.basename(name).replace('.py', '')) else: print(indent + name) if args.list: colify(output_lines)
def run(self, params, args): (exitonfail, pretty) = self.fillParams([ ('exitonfail', False), ('pretty', True) ]) exitonfail = self.str2bool(exitonfail) pretty = self.str2bool(pretty) current_dir = os.getcwd() os.chdir('/opt/stack/lib/python3.7/site-packages/stack/commands/report/system') tests = glob('tests/*') # make it real ugly. if exitonfail and not pretty: _return_code = main(['--verbose', '--exitfirst', *args, *tests]) # exit with first failure elif exitonfail: _return_code = main(['--verbose', '--capture=no', '--exitfirst', *args, *tests]) # show tracebacks of failures but don't fail. elif not pretty: _return_code = main(['--verbose', '--capture=no', *args, *tests]) # pretty and no tracebacks else: _return_code = main(['--verbose', '--capture=no', '--tb=no', *args, *tests]) os.chdir(current_dir) # If any of the tests failed, throw an error if _return_code > 0: raise CommandError(self, "One or more tests failed")
3. Check error log does not have "_entryrdn_insert_key" errors :expectedresults: 1. Success 2. Success 3. Success """ inst = topo.ms['supplier1'] suffix = Domain(inst, "ou=people," + DEFAULT_SUFFIX) backends = Backends(inst) backend = backends.get(DEFAULT_BENAME) # Reindex nsuniqueid backend.reindex(attrs=['nsuniqueid'], wait=True) # Do some updates for idx in range(0, 5): suffix.replace('description', str(idx)) # Check error log for RUV entryrdn errors. Stopping instance forces RUV # to be written and quickly exposes the error inst.stop() assert not inst.searchErrorsLog("entryrdn_insert_key") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main('-s {}'.format(CURRENT_FILE))
# Hash testing counter = Counter() counter[pgf_1] += 1 counter[pgf_2] += 1 counter[pgf_3] += 1 self.assertEqual(counter[pgf_1], 3) self.assertEqual(counter[pgf_2], 3) self.assertEqual(counter[pgf_3], 3) class LocalModeExecutorTest(RayTrialExecutorTest): def setUp(self): ray.init(local_mode=True) self.trial_executor = RayTrialExecutor() def tearDown(self): ray.shutdown() _register_all() # re-register the evicted objects def testTrialCleanup(self): self.skipTest( "Skipping as trial cleanup is not applicable for local mode.") if __name__ == "__main__": import sys sys.exit(pytest.main(["-v", __file__]))
def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.pytest_args) sys.exit(errno)
@pytest.mark.skipif(os.environ.get('GUROBI_PATH') is None or not SnoptSolver().available(), reason='This test relies on Gurobi and SNOPT.') def test_parsing(): # Try to parse an example of this grammar. grammar = SpatialSceneGrammar(root_node_type=Desk, root_node_tf=torch.eye(4)) torch.random.manual_seed(42) observed_tree = grammar.sample_tree(detach=True) observed_nodes = observed_tree.get_observed_nodes() inference_results = infer_mle_tree_with_mip( grammar, observed_nodes, verbose=True, max_scene_extent_in_any_dir=10.) assert inference_results.optim_result.is_success() mip_optimized_tree = get_optimized_tree_from_mip_results(inference_results) refinement_results = optimize_scene_tree_with_nlp( grammar, mip_optimized_tree, verbose=True, max_scene_extent_in_any_dir=10.) assert refinement_results.optim_result.is_success() refined_tree = refinement_results.refined_tree score = refined_tree.score(verbose=True) assert torch.isfinite(score), "Refined tree was infeasible." if __name__ == "__main__": pytest.main()
cl_arrays = [ cl_array.arange(queue, 0, 3, dtype=np.float32) for i in range(1, 10) ] idx = cl_array.arange(queue, 0, 6, dtype=np.int32) out_arrays = [ cl_array.zeros(queue, (10,), np.float32) for i in range(9) ] out_compare = [np.zeros((10,), np.float32) for i in range(9)] for i, ary in enumerate(out_compare): ary[idx.get()] = np.arange(0, 6, dtype=np.float32) cl_array.multi_put(cl_arrays, idx, out=out_arrays) assert np.all(np.all(out_compare[i] == out_arrays[i].get()) for i in range(9)) if __name__ == "__main__": # make sure that import failures get reported, instead of skipping the # tests. if len(sys.argv) > 1: exec(sys.argv[1]) else: from pytest import main main([__file__]) # vim: filetype=pyopencl:fdm=marker
option = webdriver.ChromeOptions() option.add_experimental_option('w3c', False) self.driver = webdriver.Chrome(options=option) self.driver.implicitly_wait(5) def teardown(self): self.driver.quit() def test_touchaction_scrollbottom(self): """ 打开Chrome 打卡URL: http://www.baidu.com 向搜索框中输入‘selenium测试’ 通过TouchAction点击搜索框 滑动到底部,点击下一页 关闭Chrome """ self.driver.get("http://www.baidu.com") ele = self.driver.find_element_by_css_selector('[id="kw"]') ele.send_keys("selenium测试") ele_search = self.driver.find_element_by_css_selector('[id="su"]') action = TouchActions(self.driver) action.tap(ele_search) action.perform() action.scroll_from_element(ele, 0, 10000).perform() sleep(3) if __name__ == '__main__': pytest.main(['-s', '-v','test_TouchAction.py'])
def run_tests(self): import pytest sys.exit(pytest.main(self.test_args))
return 0 cur_val = sum_val + cur.val cur_cnt = int(cur_val == sum) return cur_cnt + getPathSum(cur.left, cur_val) + getPathSum( cur.right, cur_val) if not root: return 0 return getPathSum(root, 0) + self.pathSum( root.left, sum) + self.pathSum(root.right, sum) @pytest.mark.parametrize("kw,expected", [ [ dict(root=TreeNode( 10, left=TreeNode(5, left=TreeNode(3, TreeNode(3), TreeNode(-2)), right=TreeNode(2, right=TreeNode(1))), right=TreeNode(-3, right=TreeNode(11)), ), sum=8), 3 ], ]) def test_solutions(kw, expected): assert Solution().pathSum(**kw) == expected if __name__ == '__main__': pytest.main(["-q", "--color=yes", "--capture=no", __file__])
"""Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can have targattrfilters and search permissions and that ldapsearch works as expected. :id: e25d116e-7aa9-11e8-81d8-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = ' \ '"add=title:(title=arch*)")(version 3.0; acl "{}"; allow ' \ '(write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = Anonymous(topo.standalone).bind() user = UserAccount(conn, USER_DELADD) #targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected assert user.get_attr_vals('secretary') assert user.get_attr_vals('mail') assert user.get_attr_vals('objectclass') if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE)
code = buff.getvalue() m = _size(code) if m: size = int(m.group(1)) else: raise Exception('Internal error: PPM header not found') return code[m.end():], size def ppm_bw_as_matrix(buff, border): """\ Returns the QR code as list of [0, 1] lists. :param io.BytesIO buff: Buffer to read the matrix from. """ res = [] data, size = _image_data(buff) rgb_data = [unpack('>3B', data[i:i + 3]) for i in range(0, len(data), 3)] for i, offset in enumerate(range(0, len(rgb_data), size)): if i < border: continue if i >= size - border: break row_data = rgb_data[offset + border:offset + size - border] res.append([(0x0, 0x1)[rgb == (0, 0, 0)] for rgb in row_data]) return res if __name__ == '__main__': pytest.main([__file__])
assert pyenv_ver_to_travis_ver("3.4.6") == "3.4" assert pyenv_ver_to_travis_ver("3.5.3") == "3.5" assert pyenv_ver_to_travis_ver("3.6.2") == "3.6" assert pyenv_ver_to_travis_ver("3.6-dev") == "3.6-dev" assert pyenv_ver_to_travis_ver("3.7-dev") == "3.7-dev" assert pyenv_ver_to_travis_ver("anaconda-4.0.0") == "2.7" assert pyenv_ver_to_travis_ver("anaconda2-4.4.0") == "2.7" assert pyenv_ver_to_travis_ver("anaconda3-4.4.0") == "3.4" vers = [ "ironpython-dev", "ironpython-2.7.7", "jython-dev", "jython-2.7.0", "jython-2.7.1b3", "pyston-0.6.1", "stackless-dev", "stackless-3.4.2", ] for v in vers: with raises(Exception): pyenv_ver_to_travis_ver(v) if __name__ == "__main__": import os basename = os.path.basename(__file__) pytest.main([basename, "-s", "--tb=native"])
'b': 1 }, 'c': 0 } def test_pprint(capsys): test_config = Config(config_name) test_config.config = {'x': 1, 'y': {'a': 2}} test_config.pprint() cap_out = capsys.readouterr()[0] assert cap_out == """{'x': 1, 'y': {'a': 2}}\n""" def test_to_dict(): test_config = Config(config_name) test_config.config = {'x': 1, 'y': {'a': 2}} d = test_config.to_dict() assert d == test_config.config # make sure we copied d['z'] = 3 d['y']['b'] = 4 assert d != test_config.config assert d['y'] != test_config.config['y'] if __name__ == '__main__': import sys import pytest sys.exit(pytest.main(sys.argv))
def test(): # pragma: no cover """Run the full testing suite of phy.""" import pytest pytest.main()
self.adminLoginPage.goto_admin_login_page() # 测试管理员登录验证码错误 # @pytest.mark.skip() @pytest.mark.dependency(name='admin_login') @pytest.mark.parametrize('username,pwd,captcha,expected', admin_login_data) def test_admin_login(self, username, pwd, captcha, expected): self.adminLoginPage.input_username(username) self.adminLoginPage.input_pwd(pwd) if captcha != '666': captcha = util.get_code(self.driver, 'captchaImg') self.adminLoginPage.input_captcha(captcha) self.adminLoginPage.click_admin_login_btn() if captcha != '666': WebDriverWait(self.driver, 5).until(EC.title_is(expected)) assert expected == self.driver.title else: WebDriverWait(self.driver, 5).until(EC.alert_is_present()) alert = self.driver.switch_to.alert assert alert.text == expected alert.accept() sleep(5) if __name__ == '__main__': pytest.main(['testAdminLogin.py'])
logger.info("=" * 40) # # ################################## # Test cases start here. # ################################## # # def test_ospf_dual_stack(request): """OSPF test dual stack.""" tc_name = request.node.name write_test_header(tc_name) # Don't run this test if we have any failure. tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) global topo step("Bring up the base configuration as per the JSON topology") reset_config_on_routers(tgen) write_test_footer(tc_name) if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args))
browser, adapter = prepare_mock_browser() mock_get(adapter, url=url, reply=str(initial_page)) browser.open(url) browser.select_form() mock_get(adapter, url=url, reply=str(reload_page), additional_matcher=lambda r: 'Referer' not in r.headers) browser.refresh() assert browser.get_url() == url assert browser.get_current_page() == reload_page assert browser.get_current_form() is None def test_refresh_error(): browser = mechanicalsoup.StatefulBrowser() # Test no page with pytest.raises(ValueError): browser.refresh() # Test fake page with pytest.raises(ValueError): browser.open_fake_page('<p>Fake empty page</p>', url='http://fake.com') browser.refresh() if __name__ == '__main__': pytest.main(sys.argv)
@Contact : [email protected] @MTime : 2019/11/13 18:07 @Author: zhangyun @Version: 1.0 @Description: 首页登录测试用例 """ import sys from os.path import dirname, abspath from Page.operate_login_page import OperateLoginPage import pytest from time import sleep # sys.path.insert(0, dirname(dirname(abspath(__file__)))) class TestLogin: """首页登录""" def test_operate_login_case(self, browser, base_url): """账户登录成功""" page = OperateLoginPage(browser) page.get(base_url) page.login_phone = '18989846103' page.login_password = '******' page.login_button.click() assert browser.title == 'Civa运营管理平台' if __name__ == '__main__': pytest.main(["-v", "-s", "test_login.py"])
def run_tests(self): import pytest errno = pytest.main(self.test_args) sys.exit(errno)
1.32980760e00, ]), ), (0.01, 3, np.array([0.0, 13.29807601, 13.29807601])), ], ) def test_against_scipy_density(bw, n, expected_result): """ Test against the following function call in SciPy: data = np.array([0, 0.1, 1]) x = np.linspace(-1, 1, {n}) bw = {bw}/np.asarray(data).std(ddof=1) density_estimate = gaussian_kde(dataset = data, bw_method = bw) y = density_estimate.evaluate(x) # Note that scipy weights its bandwidth by the covariance of the # input data. To make the results comparable to the other methods, # we divide the bandwidth by the sample standard deviation here. """ data = np.array([0, 0.1, 1]) x = np.linspace(-1, 1, num=n) y = TreeKDE(kernel="gaussian", bw=bw).fit(data).evaluate(x) error = np.mean((y - expected_result)**2) assert error < 1e-10 if __name__ == "__main__": # --durations=10 <- May be used to show potentially slow tests pytest.main(args=[".", "--doctest-modules", "-v"])
loop_cnt += 1 return (i, k, loop_cnt) """)) out = self.interp("func()") assert out == (0, 4, 5) def test_nested_break(self): self.interp(textwrap.dedent(""" def func_w(): for k in range(5): if k == 4: break k = 100 return k """)) assert 4 == self.interp("func_w()") class TestCase2(unittest.TestCase): def test_stringio(self): """ test using stringio for output/errors """ out = StringIO() err = StringIO() intrep = Interpreter(writer=out, err_writer=err) intrep("print('out')") self.assertEqual(out.getvalue(), 'out\n') if __name__ == '__main__': pytest.main(['-v', '-x', '-s'])
import pytest __import__("sys").path[0:0] = "." from src.context import Context from src.get_editor_command import * def test_when_favorite_editor_is_set(): config_path = Path("test") / "workspace" / "config.json" config_path.write_text('{"editor_command": "mock_favorite_command"}') context = Context("mockOS") assert get_editor_command(context, Path("foobar")) == "mock_favorite_command foobar" def test_with_unsupported_platform(): with pytest.raises(UnsupportedOSError): get_editor_command(Context("MS-DOS"), Path("foobar")) def test_with_mock_os(): config_path = Path("test") / "workspace" / "config.json" if config_path.exists(): config_path.unlink() # Python 3.8 and newer: use `missing_ok`` parameter. context = Context("mockOS") assert get_editor_command(context, Path("foobar")) == "mock_default_command foobar" if __name__ == "__main__": # pragma: no cover pytest.main(["-qq", __import__("sys").argv[0]])
def test(*args): options = [resource_filename('pygridtools', '')] options.extend(list(args)) return pytest.main(options)
@ray.remote(resources={"node3": 1}, num_cpus=0) class Borrower: def get_objects(self, refs): for ref in refs: ray.get(ref) return True owner = Owner.remote() creator = Creator.remote() borrower = Borrower.remote() # Make sure the owner actor is alive. ray.get(owner.warmup.remote()) ray.get(creator.gen_object_refs.remote(owner)) ray.kill(creator) assert ray.get(owner.remote_get_object_refs.remote(borrower), timeout=60) if __name__ == "__main__": import pytest import os import sys if os.environ.get("PARALLEL_CI"): sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__])) else: sys.exit(pytest.main(["-sv", __file__]))
def test_automatic_completions_hide_complete(lsp_codeeditor, qtbot): """Test on-the-fly completion closing when already complete. Regression test for issue #11600 and pull request #11824. """ code_editor, _ = lsp_codeeditor completion = code_editor.completion_widget code_editor.toggle_code_snippets(False) code_editor.set_text('some = 0\nsomething = 1\n') cursor = code_editor.textCursor() code_editor.moveCursor(cursor.End) # Complete some -> [some, something] with qtbot.waitSignal(completion.sig_show_completions, timeout=10000) as sig: qtbot.keyClicks(code_editor, 'some') assert "some" in [x['label'] for x in sig.args[0]] assert "something" in [x['label'] for x in sig.args[0]] # No completion for 'something' as already complete qtbot.keyClicks(code_editor, 'thing') qtbot.wait(500) assert completion.isHidden() code_editor.toggle_code_snippets(True) if __name__ == '__main__': pytest.main(['test_completion.py', '--run-slow'])
def run_tests(self): import pytest pytest.main(self.test_args)
qutest.expect("%timestamp BSP_DISPLAY s11-ENTRY;") qutest.expect("%timestamp Trg-Done QS_RX_EVENT") qutest.dispatch("C_SIG") qutest.expect("%timestamp BSP_DISPLAY s1-C;") qutest.expect("%timestamp BSP_DISPLAY s11-EXIT;") qutest.expect("%timestamp BSP_DISPLAY s1-EXIT;") qutest.expect("%timestamp BSP_DISPLAY s2-ENTRY;") qutest.expect("%timestamp BSP_DISPLAY s2-INIT;") qutest.expect("%timestamp BSP_DISPLAY s21-ENTRY;") qutest.expect("%timestamp BSP_DISPLAY s211-ENTRY;") qutest.expect("%timestamp Trg-Done QS_RX_EVENT") qutest.dispatch("C_SIG") qutest.expect("%timestamp BSP_DISPLAY s2-C;") qutest.expect("%timestamp BSP_DISPLAY s211-EXIT;") qutest.expect("%timestamp BSP_DISPLAY s21-EXIT;") qutest.expect("%timestamp BSP_DISPLAY s2-EXIT;") qutest.expect("%timestamp BSP_DISPLAY s1-ENTRY;") qutest.expect("%timestamp BSP_DISPLAY s1-INIT;") qutest.expect("%timestamp BSP_DISPLAY s11-ENTRY;") qutest.expect("%timestamp Trg-Done QS_RX_EVENT") # the end if __name__ == "__main__": options = ['-x', '-v', '--tb=short'] options.extend(sys.argv) pytest.main(options)
try: # 81 上添加2条相同静态路由 第一条不启用 login_web(browser, url=dev1) add_static_route_single_wxw(browser, ip='192.168.11.0', mask='24', out_device=interface_name_2, gateway='12.1.1.2', enable='no') add_static_route_single_wxw(browser, ip='192.168.11.0', mask='24', out_device=interface_name_2, gateway='12.1.1.2', enable='yes') # 获取信息 info1 = browser.find_element_by_xpath('//*[@id="box"]/div[3]/ul/li[2]').text # 删除路由 del_ipv4_static_route_bydestination(browser, destination='192.168.11.0/255.255.255.0') try: assert "静态路由已存在" in info1 rail_pass(test_run_id, test_id) except: rail_fail(test_run_id, test_id) assert "静态路由已存在" in info1 except Exception as err: # 如果上面的步骤有报错,重新设备,恢复配置 print(err) rail_fail(test_run_id, test_id) reload(hostip=dev1) assert False if __name__ == '__main__': pytest.main(["-v", "-s", "test_c"+str(test_id)+".py"])
assert allclose(t, y.flatten(), yhat.flatten(), plotter=Plotter(Simulator), filename='test_synapse.test_general.pdf') def test_synapseparam(): """SynapseParam must be a Synapse, and converts numbers to LowPass.""" class Test(object): sp = SynapseParam(default=nengo.Lowpass(0.1)) inst = Test() assert isinstance(inst.sp, nengo.Lowpass) assert inst.sp.tau == 0.1 # Number are converted to LowPass inst.sp = 0.05 assert isinstance(inst.sp, nengo.Lowpass) assert inst.sp.tau == 0.05 # None has meaning inst.sp = None assert inst.sp is None # Non-synapse not OK with pytest.raises(ValueError): inst.sp = 'a' if __name__ == "__main__": nengo.log(debug=True) pytest.main([__file__, '-v'])
def test_ars(self): check_support( "ARS", { "num_workers": 1, "noise_size": 1500000, "num_rollouts": 1, "rollouts_used": 1 }) def test_es(self): check_support( "ES", { "num_workers": 1, "noise_size": 1500000, "episodes_per_batch": 1, "train_batch_size": 1 }) if __name__ == "__main__": import pytest import sys # One can specify the specific TestCase class to run. # None for all unittest.TestCase classes in this file. class_ = sys.argv[1] if len(sys.argv) > 0 else None sys.exit( pytest.main( ["-v", __file__ + ("" if class_ is None else "::" + class_)]))
self.driver.implicitly_wait(6) def teardown(self): self.driver.quit() def test_daka(self): # self.driver.find_element(MobileBy.XPATH, "//*[@text=‘工作台’]").click() self.driver.find_element_by_android_uiautomator( 'new UiSelector().text("工作台")').click() # 滚动查找元素 self.driver.find_element( MobileBy.ANDROID_UIAUTOMATOR, 'new UiScrollable(new UiSelector()\ .scrollable(true).instance(0))\ .scrollIntoView(new UiSelector()\ .text("打卡").instance(0));').click() self.driver.update_settings({"waitForIdleTimeout": 0}) # self.driver.find_element(MobileBy.XPATH,"//*[@text='外出打卡']").click() self.driver.find_element_by_android_uiautomator( 'new UiSelector().text("外出打卡")').click() # self.driver.find_element(MobileBy.XPATH,"//*[contains(@text,'次外出')]").click() self.driver.find_element_by_android_uiautomator( 'new UiSelector().textContains("次外出")').click() # 检查点 # assert "外出打卡成功" in self.driver.page_source WebDriverWait(self.driver, 10).until(lambda x: "外出打卡成功" in x.page_source) if __name__ == '__main__': pytest.main('test_weixin.py')