def run_tests(self, tests_mapping): """ run testcase/testsuite data """ # parse tests parsed_tests_mapping = parse.parse_tests(tests_mapping) # add tests to test suite test_suite = self._add_tests(parsed_tests_mapping) # run test suite cur_path = os.path.dirname(__file__) conf_path = os.path.join(cur_path, 'conf/conf.ini') conf = Conf(conf_path) try: REPORT_DIR = conf.get_value('data_addr', 'report_addr') except (configparser.NoSectionError, configparser.NoOptionError): REPORT_DIR = os.path.join(cur_path, 'report/') if not os.path.exists(REPORT_DIR): os.makedirs(REPORT_DIR) filename = os.path.join( REPORT_DIR, time.strftime('%Y%m%d%H%M%S', time.localtime()) + '_api_report.html') # 定义个报告存放路径,支持相对路径 f = open(filename, 'wb') # 结果写入HTML 文件 runner = HTMLTestRunner.HTMLTestRunner( stream=f, title='api_report', description='Report_description', verbosity=2) # 使用HTMLTestRunner配置参数,输出报告路径、报告标题、描述 runner.run(test_suite)
def exec_file(file, answer): """ Execute a file and get similarity between ground truth Args: * file File to execute * answer Ground truth file to compare output to Returns: """ # Run the given file runner.run(file) # Get file name without extension to find # output file and to compare to answer # ex: test.java --> find test.out.txt name = os.path.splitext(file.split(sep)[-1])[0] out = name + ".out.txt" # Compare differences diff = get_diff(out, answer) return(str(diff) + "%\n")
def main(): # Add seed random_seed = 42 torch.manual_seed(random_seed) args = parser.get() X_train = load('./datas/X_train.npy') y_train = load('./datas/y_train.npy') X_test = load('./datas/X_test.npy') train_dataset = data.DatasetXy(X_train, y_train) test_dataset = data.DatasetX(X_test) data_class = data.Dataloader(args, train_dataset, test_dataset) train, test = data_class.train(), data_class.test() model = models.get(args) optimizer = optimizers.get(args, model.parameters()) criterion = torch.nn.CrossEntropyLoss() for epoch in range(args.epochs): train_metrics = runner.run( model, criterion, optimizer, train, True, { "loss": metrics.loss, "accuracy": metrics.accuracy }, ) metrics.print_metrics(train_metrics) y_test_pred = runner.run( model, criterion, optimizer, test, False, { "loss": metrics.loss, "accuracy": metrics.accuracy }, ) print(y_test_pred) y_test_pred = [item for sublist in y_test_pred for item in sublist] #print((y_test_pred[0]).shape) #_, y_pred = torch.max(y_test_pred, dim = 1) #y_pred = torch.round(y_test_pred) # _, y_pred = torch.max(y_test_pred, dim = 1) # y_pred = y_pred.cpu().numpy() #print(len(y_pred_list)) #print(y_pred.type) y_test = np.asarray(y_test_pred) pd.DataFrame({ "Id": np.arange(len(y_test)), "Category": y_test }).astype(int).to_csv("solution.csv", index=False)
def test_access_environ(): mock_logger = Mock() command = 'cat /proc/1/environ' env = {} run(mock_logger, command, env=env) mock_logger.info.assert_any_call('cat: /proc/1/environ: Permission denied')
def test_app_help(self): (noArg, result) = run() (arg, result) = run(['--help', 'ignored', 'arguments']) usageText = f'Usage: {app_name()} [options] file [options] [time1 time2 ...]\nOptions:\n --version Print program version and exit.\n --help Print usage information and exit.\n -v Verbose; print the current total in a human friendly way.\n It\'s only active when reading.\nExample:\n {app_name()} log.txt 1:27 -11:01 +2:15:23 127\n' self.assertEqual(noArg, usageText) self.assertEqual(arg, usageText)
def test_register_list_unregister_device(): ret = run("registry --register --events pollution --device") uuid = ret.split()[1] ret = run("registry --list --device") assert uuid in ret run("registry --unregister --uuid " + uuid)
def main( debug: bool, cpu_time: int, real_time: int, memory: int, stack: int, output_size: int, process: int, input: str, output: str, error: str, log: str, uid: int, gid: int, use_path_env: bool, no_memory_limit: bool, console_log: bool, command: list, ): """Run your commands in the sandbox.""" logger = logging.getLogger(logger_name) formatter = logging.Formatter( '%(asctime)s [%(levelname)s] %(filename)s:%(lineno)d [%(process)d:%(funcName)s] %(message)s' ) if log == '' or console_log: log_handler = logging.StreamHandler(sys.stderr) log_handler.setFormatter(formatter) logger.addHandler(log_handler) if log != '': file_handler = logging.FileHandler(log) file_handler.setFormatter(formatter) logger.addHandler(file_handler) if debug: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) logger.info('UniJudge Sandbox Runner') logger.info('Version 0.0.1') logger.debug('Command: [%s]' % ', '.join(command)) if len(command) == 0: logger.fatal('Cannot find command') exit(1) limit = ResourceLimit(cpu_time=cpu_time, real_time=real_time, memory=memory, stack=stack, output_size=output_size, process=process, no_memory_limit=no_memory_limit) redirect = Redirect( stdin=input, stdout=output, stderr=error, ) logger.debug(asdict(limit).__str__()) logger.debug(asdict(redirect).__str__()) run(command, limit, redirect, use_path_env)
def test_subscriber_exit_after(): def subscriber(): run("subscriber --traffic --exit-after 2") def injector(): run("injectors --run --event traffic --duration 0.02") run("registry --init") parallel_run([subscriber, injector], lambda func, thread: func())
def test_citypulse_pollution_download(): run("datasets --download --dataset citypulse --event pollution") import embers.datasets.citypulse.pollution as pollution t = pollution.Pollution() s = t.get_source(0) d = s.next() assert "carbon_monoxide" in d assert "nitrogen_dioxide" in d
def playbook(name, config, extra_vars=None, retries=0, delay=0, delay_factor=1): """Playbook must be located in ansible_data/playbooks""" playbook = _get_playbook_args(name, config, extra_vars) cwd = os.getcwd() # Need this to use ansible.cfg os.chdir(ANSIBLE_PATH) print("Running {}".format(playbook)) runner.run(playbook, retries=retries, delay=delay, delay_factor=delay_factor) os.chdir(cwd)
def destroy(network_config): env_vars = _create_env_vars(network_config) state_flag = _create_state_flag(network_config) cwd = os.getcwd() os.chdir(TF_PATH) # Destory runner.run(['terraform', 'destroy'] + env_vars + state_flag + ['--force']) os.chdir(cwd)
def do(args): # ip = ansible.get_node_ip(args.node, args.id) config = network.config.load(args.id) ip = tf.get_node_ip(args.node, config) cwd = os.getcwd() # Need this to use ansible.cfg os.chdir(BIN_PATH) runner.run( [os.path.join(BIN_PATH, "init_bond"), config['sawtooth_path'], ip]) os.chdir(cwd)
def download_hugo(): logger = get_logger('download-hugo') HUGO_VERSION_PATH = CLONE_DIR_PATH / HUGO_VERSION if HUGO_VERSION_PATH.is_file(): logger.info('.hugo-version found') hugo_version = '' with HUGO_VERSION_PATH.open() as hugo_vers_file: try: hugo_version = hugo_vers_file.readline().strip() hugo_version = shlex.quote(hugo_version) regex = r'^(extended_)?[\d]+(\.[\d]+)*$' hugo_version = re.search(regex, hugo_version).group(0) except Exception: raise RuntimeError('Invalid .hugo-version') if hugo_version: logger.info(f'Using hugo version in .hugo-version: {hugo_version}') else: raise RuntimeError(".hugo-version not found") ''' Downloads the specified version of Hugo ''' logger.info(f'Downloading hugo version {hugo_version}') failed_attempts = 0 while (failed_attempts < 5): try: dl_url = ('https://github.com/gohugoio/hugo/releases/download/v' + hugo_version.split('_')[-1] + f'/hugo_{hugo_version}_Linux-64bit.tar.gz') response = requests.get(dl_url, verify=CERTS_PATH) hugo_tar_path = WORKING_DIR_PATH / 'hugo.tar.gz' with hugo_tar_path.open('wb') as hugo_tar: for chunk in response.iter_content(chunk_size=128): hugo_tar.write(chunk) HUGO_BIN_PATH = WORKING_DIR_PATH / HUGO_BIN run(logger, f'tar -xzf {hugo_tar_path} -C {WORKING_DIR_PATH}', env={}, check=True) run(logger, f'chmod +x {HUGO_BIN_PATH}', env={}, check=True) return 0 except Exception: failed_attempts += 1 logger.info( f'Failed attempt #{failed_attempts} to download hugo version: {hugo_version}' ) if failed_attempts == 5: raise RuntimeError( f'Unable to download hugo version: {hugo_version}') time.sleep(2) # try again in 2 seconds
def check_and_run(rss_url, jenkins_id_file, do_force=False, do_loop=False): if (not os.path.exists(jenkins_id_file)) or do_force: with open(jenkins_id_file, 'w') as file: file.write('0') while True: # read jenkins id try: with open(jenkins_id_file) as file: old_id = int(file.read()) except (TypeError, ValueError, FileNotFoundError): old_id = -1 # check jenkins rss try: jenkins_rss = JenkinsRss(rss_url, old_id) except Exception: print('rss connect failed') if do_loop: print('trying again.') time.sleep(60) continue else: break if jenkins_rss.is_stable and jenkins_rss.is_new: # download apk apk_url = jenkins_rss.get_apk_link() res = requests.get(apk_url) with open(apk_file, 'wb') as file: file.write(res.content) # do testing try: runner.run(project_name=jenkins_rss.project_name, build_id=jenkins_rss.build_id) except Exception as err: print('error occured when run tests:') print(err) if do_loop: time.sleep(60) continue else: break # update jenkins id with open(jenkins_id_file, 'w') as file: file.write(str(jenkins_rss.build_id)) if do_loop: time.sleep(60) else: break
def test_citypulse_traffic_download(): run("datasets --download --dataset citypulse --event traffic") import os assert os.path.isdir("embers.datasets.citypulse") import embers.datasets.citypulse.traffic as traffic t = traffic.Traffic() s = t.get_source(0) d = s.next() assert "vehicleCount" in d assert "avgSpeed" in d
def test_prelude(self) -> None: env = bytecode.EvalEnv() runner.add_intrinsics(env) runner.add_builtins(env) runner.add_prelude(env) self.assertEqual( run(env, "(= (cons 42 (cons 13 (cons 'a []))) '(42 13 a))"), SBool(True)) self.assertEqual( run(env, "(= [1 2 [3 4 5] 6 [[7]]] [1 2 [3 4 5] 6 [[7]]])"), SBool(True)) self.assertEqual( run(env, "(= [1 2 [3 4 5] 6 [[7]]] [1 2 [3 4 5] 6 [[7]]])"), SBool(True))
def build_jekyll(branch, owner, repository, site_prefix, base_url='', config='', user_env_vars=[]): ''' Builds the cloned site with Jekyll ''' logger = get_logger('build-jekyll') JEKYLL_CONF_YML_PATH = CLONE_DIR_PATH / JEKYLL_CONFIG_YML # Add baseurl, branch, and the custom config to _config.yml. # Use the 'a' option to create or append to an existing config file. with JEKYLL_CONF_YML_PATH.open('a') as jekyll_conf_file: jekyll_conf_file.writelines([ '\n' f'baseurl: {base_url}\n', f'branch: {branch}\n', config, '\n', ]) jekyll_cmd = 'jekyll' GEMFILE_PATH = CLONE_DIR_PATH / GEMFILE if GEMFILE_PATH.is_file(): jekyll_cmd = f'bundle exec {jekyll_cmd}' run(logger, f'echo Building using Jekyll version: $({jekyll_cmd} -v)', cwd=CLONE_DIR_PATH, env={}, check=True, ruby=True) env = build_env(branch, owner, repository, site_prefix, base_url, user_env_vars) env['JEKYLL_ENV'] = 'production' return run(logger, f'{jekyll_cmd} build --destination {SITE_BUILD_DIR_PATH}', cwd=CLONE_DIR_PATH, env=env, node=True, ruby=True)
def test_cs_add_two_numbers_code(): out = run(cs_source_code_add_two_numbers, "cs", ["mcs"], ["mono", "Source.exe"], [tc1]) print(out) assert 1 == len(out) assert '57' == out[0].stdout assert Status.OK == out[0].status
def is_supported_ruby_version(version): ''' Checks if the version defined in .ruby-version is supported ''' is_supported = 0 if version: logger = get_logger('setup-ruby') RUBY_VERSION_MIN = os.getenv('RUBY_VERSION_MIN') is_supported = run( logger, f'ruby -e "exit Gem::Version.new(\'{shlex.split(version)[0]}\') >= Gem::Version.new(\'{RUBY_VERSION_MIN}\') ? 1 : 0"', # noqa: E501 cwd=CLONE_DIR_PATH, env={}, ruby=True ) upgrade_msg = 'Please upgrade to an actively supported version, see https://www.ruby-lang.org/en/downloads/branches/ for details.' # noqa: E501 if not is_supported: logger.error( 'ERROR: Unsupported ruby version specified in .ruby-version.') logger.error(upgrade_msg) if version == RUBY_VERSION_MIN: logger.warning( f'WARNING: Ruby {RUBY_VERSION_MIN} will soon reach end-of-life, at which point Federalist will no longer support it.') # noqa: E501 logger.warning(upgrade_msg) return is_supported
def scrape(arr): dispFilter = get_dispensary_filter(arr) leafbuyer_scraper = LeafbuyerDispensaryScraper( HttpClient(), LeafbuyerDispInfoExtractor()) result = run(dispFilter.get_state_names(), leafbuyer_scraper.produce, leafbuyer_scraper.consume) return json.dumps(result)
def insert(self): for i in range(0, 3): try: print "[ +D+ ] - Attempting to trace %s" % self.s_name logfile = runner.run(self.d_path, self.t_path, self.t_args, self.s_path, self.w_time, self.m_time) if logfile is not None: trace_data = { 'seed_name': self.s_name, 'data': logfile } trace = packer.pack(trace_data) # Set long TTR as trace processing may take a while self.bs.use(self.trace_results) self.bs.put(trace, ttr=600) self.job.delete() break else: print "[ +E+ ] - Error retrieving log file. Restarting." except: print "[ +D+ ] - Something went wrong. Restarting." else: print "[ +E+ ] - Reached max tries. Burying." self.job.bury()
def insert(self): for i in range(0, 3): try: print "[ +D+ ] - Attempting to trace %s" % self.s_name logfile = runner.run(self.d_path, self.t_path, self.t_args, self.s_path, self.w_time, self.m_time) if logfile is not None: trace_data = {'seed_name': self.s_name, 'data': logfile} trace = packer.pack(trace_data) # Set long TTR as trace processing may take a while self.bs.use(self.trace_results) self.bs.put(trace, ttr=600) self.job.delete() break else: print "[ +E+ ] - Error retrieving log file. Restarting." except: print "[ +D+ ] - Something went wrong. Restarting." else: print "[ +E+ ] - Reached max tries. Burying." self.job.bury()
def test_signal(self): if sys.platform == 'win32': py.test.skip("no death by signal on windows") res = runner.run( [sys.executable, "-c", "import os; os.kill(os.getpid(), 9)"], '.', py.path.local(self.fn)) assert res == -9
def _get_output(network_config): state_flag = _create_state_flag(network_config) # Get ips return yaml.load( runner.run(['terraform', 'output', '-json'] + state_flag, capture_stdout=True))
def Test_Main(Debug, CurrentTestSet, CurrentTSTest, CurrentRun): try : # clear output window TDOutput.Clear() # TODO: put your code here import sys, os WORKSPACE = r'C:\Comcast_QA' sys.path.insert(0, os.path.join(WORKSPACE, 'spares-test')) ALM_OBJECTS = { "Debug": Debug, "CurrentTestSet": CurrentTestSet, "CurrentTSTest": CurrentTSTest, "CurrentRun": CurrentRun, "TDHelper": TDHelper, "TDInput": TDInput, "TDOutput": TDOutput, "XTools": XTools } import runner result = runner.run(r'tests\api\advance_search\network_location.txt', **ALM_OBJECTS) if not Debug : TDOutput.Print("Put your code here") # handle errors except pythoncom.com_error, (hr, desc, exc, arg): TDOutput.Print("Run-time error %d: %s" % (hr, desc)) # update execution status in "Test" mode if not Debug : CurrentRun.Status = "Failed" CurrentTSTest.Status = "Failed"
def main() -> None: args = parse_args() if args.filename == '-': prog_text = sys.stdin.read() else: with open(args.filename) as f: prog_text = f.read() env = bytecode.EvalEnv( optimize_tail_calls=args.tail_calls, jit=args.jit, bytecode_jit=args.bytecode_jit, print_specializations=args.print_specializations, print_optimizations=args.print_optimizations, inline_threshold=args.inline_count, specialization_threshold=args.specialize_count, ) start = time.perf_counter() runner.add_intrinsics(env) runner.add_builtins(env) runner.add_prelude(env) startup = time.perf_counter() print(runner.run(env, prog_text)) end = time.perf_counter() env.stats.startup_time = startup - start env.stats.program_time = end - startup with Output(args) as out_f: args.out_file = out_f.file if args.machine_readable: report_stats_json(args, env) else: report_stats(args, env)
def test_c_add_two_numbers_code_compile_error(): out = run(c_source_code_add_two_numbers_compile_error, "c", ["gcc", "-o", "program"], ["./program"], [tc1]) assert 1 == len(out) assert ": error: expected" in out[0].stderr assert "sum = a b;" in out[0].stderr assert Status.COMPILE_ERROR == out[0].status
def test_run_os_failure(mock_popen): mock_logger = Mock() command = 'foobar' mock_popen.side_effect = OSError('ugh') result = run(mock_logger, command) mock_popen.assert_called_once_with(shlex.split(command), cwd=None, env=None, shell=False, executable=None, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, bufsize=1, encoding='utf-8', text=True, preexec_fn=setuser) mock_logger.error.assert_any_call('Encountered a problem executing `' + ' '.join(shlex.split(command)) + '`.') mock_logger.error.assert_any_call('ugh') assert result == 1
def main(): # notify('Test', body='Is this thing on?', icon='dialog-question', urgency='LOW') with open(SPOOL_FILE, 'r+') as spool_file: try: fcntl.flock(spool_file, fcntl.LOCK_EX | fcntl.LOCK_NB) except BlockingIOError as e: print('Sched is already running – exiting…') return 2 spool = yaml.safe_load(spool_file) run_result = run(spool) print(run_result) if not run_result['did_something']: # no need to write that file every minute return 0 else: spool_file.seek(0) yaml.dump({'jobs': run_result['jobs']}, spool_file, default_flow_style=False) spool_file.truncate() if run_result['has_errors']: return 1
def test_run(mock_popen): mock_logger = Mock() command = 'foobar' mock_popen.return_value = Mock( returncode=0, stdout=Mock(readline=Mock(return_value='foobar'))) result = run(mock_logger, command) mock_popen.assert_called_once_with( shlex.split(command), cwd=None, env=None, shell=False, executable=None, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, bufsize=1, encoding='utf-8', text=True, preexec_fn=setuser, ) mock_logger.info.assert_called_once_with('foobar') assert result == 0
def get_reco(): data = [ concentration, year_entry, sem_entry, course1, int(difficulty1), int(teaching1), int(material1), course2, int(difficulty2), int(teaching2), int(material2), course3, int(difficulty3), int(teaching3), int(material3), course4, int(difficulty4), int(teaching4), int(material4), int(nextdifficulty), int(nextteaching), int(nextmaterial), ] courses = runner.run(data) fifth_window(courses)
def _run_task(self, pattern, task, host_list=None, conditional=False): ''' ''' if host_list is None: host_list = self.host_list instructions = task['do'] (comment, module_details) = instructions (module_name, module_args) = module_details namestr = "%s/%s" % (pattern, comment) if conditional: namestr = "subset/%s" % namestr print "TASK [%s]" % namestr runner = self._get_task_runner( pattern=pattern, host_list=host_list, module_name=module_name, module_args=module_args, ) results = runner.run()
def runp(cmd): return run(logger, cmd, cwd=CLONE_DIR_PATH, env={}, check=True, node=True)
def testRunner( self ): co = functionTouchFile.__code__ ret = runner.run( co ) try: with open( 'test.test', 'r'): pass except IOError: self.assertTrue( False, 'Code object file not made.' )
def _run_lola(self, lola_file_name, formula): """ Run LoLA for a certain file and formula """ LOG.info("Running LoLA in temporal file for formula:") LOG.info("'{0}'".format(formula)) command = ["lola", lola_file_name, "--formula={0}".format(formula)] (ret, _, stderr) = run(command) return check_result(stderr)
def main(args=None): if args is None: args = sys.argv[1:] # Initialize colors init() # Highlight subprocess output if args and args[0] != '--': run(args, postprocessor=highlight_traceback_lines) return # Highlight from args or stdin if args: s = ' '.join(args[1:]) else: s = sys.stdin.read() print('\n'.join(highlight_tracebacks(s)))
def main(): hardware = Hardware() state = State() runner.run(hardware, state)
sock.sendall(identity) sock.sendall("request.") config_file = sock.recv(4) #No files remaining, time to finish if config_file == "wait": print "All remaining config files are in progress. Sleeping." sleep(timeout) print "Waking up." elif config_file == "done": print "All config files complete. Shutting down." done = True #A new file has been sent. Copy it locally, then run it. else: config_file = sock.recv(int(config_file)) #Get the name of the file print "Running config file: " + config_file f = open(config_file,"w") data = sock.recv(8) #Get the data data = sock.recv(int(data)) f.write(data) f.close() sock.close() runner.run(config_file, HOST, True, identity) #Run it os.remove(config_file) #Clean up sock = socket.socket() sock.connect((HOST,PORT)) sock.sendall(identity) sock.send("complete") #We finished the file, tell the server
def test_timeout_syscall(self): res = runner.run([sys.executable, "-c", "import socket; s=s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM); s.bind(('', 0)); s.recv(1000)"], '.', py.path.local(self.fn), timeout=3) assert res == -999
def test_timeout_success(self): res = runner.run([sys.executable, "-c", "print 42"], '.', py.path.local(self.fn), timeout=2) assert res == 0 out = py.path.local(self.fn).read('r') assert out == "42\n"
def test_timeout(self): res = runner.run([sys.executable, "-c", "while True: pass"], '.', py.path.local(self.fn), timeout=3) assert res == -999
def test_timeout_lock(self): res = runner.run([sys.executable, "-c", "import threading; l=threading.Lock(); l.acquire(); l.acquire()"], '.', py.path.local(self.fn), timeout=3) assert res == -999
def test_error(self): res = runner.run([sys.executable, "-c", "import sys; sys.exit(3)"], '.', py.path.local(self.fn)) assert res == 3
def test_timeout_success(self, out): res = runner.run([sys.executable, "-c", "print 42"], '.', out, timeout=2) assert res == 0 out = out.read() assert out == "42\n"
for (benchmark, input_size) in benchmarks.items(): print >> sys.stderr, 'Benchmark:', benchmark runner.build(benchmark, 'gcc') output_file = OUTPUT_DIR + '/' + benchmark + '.log' # Run once to ensure inputs are unpacked # Not required, now that everything is ready #runner.run(benchmark=benchmark, config='gcc', coz=False, runs=1, size=input_size) print >> sys.stderr, 'Starting clean runs' times = runner.run(benchmark=benchmark, config='gcc', size=input_size, runs=RUNS, coz=False, keep_inputs=True) for t in times: print >> results, ','.join([benchmark, 'clean', str(t)]) results.flush() # Remove the output file if os.path.isfile(output_file): os.remove(output_file) print >> sys.stderr, 'Starting zero delay runs' times = runner.run(benchmark=benchmark, config='gcc', runs=RUNS, size=input_size,
from sleep import sleep import runner def task(name): print(name, 1) yield sleep(1) print(name, 2) yield sleep(2) print(name, 3) if __name__ == '__main__': runner.run((task('hsfzxjy'), task('Jack')))
table = {} def fib(n): global table if n in [1, 2]: return 1 if n in table: return table[n] value = fib(n-2) + fib(n-1) table[n] = value return value def fib_fast(n): if n in [1, 2]: return 1 a, b = 1, 1 while (b < n): a, b = b, a+b return b N = 35 r.run(fib_fast, N) r.run(fib, N) r.run(fib_slow, N)
def test_run(self, out): res = runner.run([sys.executable, "-c", "print 42"], '.', out) assert res == 0 assert out.read() == "42\n"
def test_error(self, out): res = runner.run([sys.executable, "-c", "import sys; sys.exit(3)"], '.', out) assert res == 3
#!/usr/bin/env python import os import sys # Force our parent directory (the `pyrun` package) into python path: sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from runner import run # -------------------------------------------------- if __name__ == "__main__": print("########################") print("Running a script...\n") run(package='scripts/hello', function='run', path_extras=["scripts"], verbose=True) print("\n########################") print("Calling a model method...\n") run(package='models/hello', model='Hello', method='hi', verbose=True) print("\n########################") print("Done.")
def test_signal(self): if sys.platform == 'win32': py.test.skip("no death by signal on windows") res = runner.run([sys.executable, "-c", "import os; os.kill(os.getpid(), 9)"], '.', py.path.local(self.fn)) assert res == -9
from runner import run s = ''' a a1 a_done.signal() b_done.wait() a2 b b1 b_done.signal() a_done.wait() b2 a_done = Semaphore(0) b_done = Semaphore(0) ''' r = run(s)#.show(pause=True) #print len(r) assert all(r('a1 < b2')) assert all(r('b1 < a2')) assert any(r('a1 < b1')) assert any(r('a1 > b1')) assert any(r('a2 < b2')) assert any(r('a2 > b2'))
def test_timeout(self, out): res = runner.run([sys.executable, "-c", "while True: pass"], '.', out, timeout=3) assert res == -999
def test_knn_state(self): train = pure_multivariate_signal( 40, 3 ) test = pure_multivariate_signal( 20, 3 ) clf = kNN(k=10) clf.train(train) clf.ca.enable(['estimates', 'predictions', 'distances']) p = clf.predict(test.samples) self.assertTrue(p == clf.ca.predictions) self.assertTrue(len(clf.ca.estimates) == 80) self.assertTrue(set(clf.ca.estimates[0].keys()) == set(test.targets)) self.assertTrue(clf.ca.distances.shape == (80,160)) self.assertTrue(not clf.ca.distances.fa is train.sa) # Those are deep-copied now by default so they should not be the same self.assertTrue(not (clf.ca.distances.fa['chunks'] is train.sa['chunks'])) self.assertTrue(not (clf.ca.distances.fa.chunks is train.sa.chunks)) def suite(): # pragma: no cover return unittest.makeSuite(KNNTests) if __name__ == '__main__': # pragma: no cover import runner runner.run()
from runner import run if __name__ == '__main__': run()