def main(): ''' main testing function... ''' # print len(sys.argv) if not (len(sys.argv) >= 2 and len(sys.argv) <= 4): print "Invalid Number of Arguments: Please provide path to output directories [input directory path optional], i.e." print "python demo.py path_to_odir path_to_idir[optional] number_of_experiments[optional]" return else: odir = sys.argv[1] print 'Output Directory = ', odir idir = '' nexp = 3 if len(sys.argv) == 3: idir = sys.argv[2] print 'Input Directory = ', idir if len(sys.argv) == 4: nexp = int(sys.argv[3]) print 'Number of Experiments = ', nexp if idir != '' and not os.path.exists(idir): raise ValueError("Input Image directory %s does not exist" % idir) # print "Idir= %s, Odir=%s" % (idir, odir) # cfnames = genConfigFiles(idir, odir) cfnames = parseConfigFile(idir, odir) for i in range(0, min(nexp, len(cfnames))): print "Running Test case Number %d" % (i + 1) sys.argv = ['', '--configfile=' + os.path.join(os.path.pardir, cfnames[i])]; print sys.argv # now call the running routine... r.main()
def test_main_exit(args, rcfile, expected, setup): with pytest.raises(SystemExit) as exc: run.main(args, rcfile) out = exc.value.code assert out == expected
def SvcDoRun(self): import servicemanager ### # Write a 'started' event to the event log... ### win32evtlogutil.ReportEvent(self._svc_name_, ### servicemanager.PYS_SERVICE_STARTED, ### 0, # category ### servicemanager.EVENTLOG_INFORMATION_TYPE, ### (self._svc_name_, '')) baseDir = self.getBaseDir() # below log event under 'Python Service'. How to change the source to MindRetrieve? servicemanager.LogInfoMsg('Starting MindRetrieve at [%s]' % baseDir) os.chdir(baseDir) run.main(['', '--start']) # wait for beeing stopped... win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE) # and write a 'stopped' event to the event log. win32evtlogutil.ReportEvent( self._svc_name_, servicemanager.PYS_SERVICE_STOPPED, 0, # category servicemanager.EVENTLOG_INFORMATION_TYPE, (self._svc_name_, ''))
def main(argv): """ This script runs several inversions for Helheim glacier using different values of the regularization parameter in order to generate the familiar L-curve plot. """ rmin = float(argv[0]) rmax = float(argv[1]) nr = int(argv[2]) rs = np.logspace(rmin, rmax, nr) for n in range(nr): r = rs[n] log_file_name = "helheim_lambda-" + str(r) + ".txt" run.main(["-g", "helheim", "-r", str(r), "-p", "4", "-i", "30", "-o", log_file_name]) archive.main(["-g", "helheim", "-o", "helheim_lambda-" + str(r) + ".tar.gz", "-x", log_file_name])
def main(argv): """ This script runs inversions for Helheim at successively higher mesh resolution. """ glacier = "helheim" dxs = [300, 150, 75] for dx in dxs: os.chdir("meshes") make_meshes.main(["-l", str(dx)]) os.chdir("../") os.chdir("elmer") make_elmer_meshes.main(argv) os.chdir("../") directory = glacier + "_grid_dependence" os.mkdir(directory) log_file_name = directory + "/log_dx-" + str(dx) + ".txt" run.main(["-g", glacier, "-i", "20", "-o", log_file_name]) for filename in glob.glob("meshes/" + glacier + "/" + glacier + ".[1-2]*"): shutil.move(filename, directory) shutil.move("elmer/" + glacier + "3d", directory) shutil.rmtree("elmer/" + glacier)
def test_setup_noargs(mock_open): sys.argv[1:] = [] mock_file = mock.MagicMock() mock_open().__enter__.return_value = mock_file run.main() mock_file.write.assert_has_calls([mock.call('HF')])
def test_setup_two_args(mock_open): sys.argv[1:] = ['LDA', 'B3LYP'] mock_file = mock.MagicMock() mock_open().__enter__.return_value = mock_file run.main() mock_file.write.assert_has_calls([mock.call('HF\nLDA\nB3LYP')])
def test_setup_file_args(mock_open): sys.argv[1:] = ['-f', 'somefile'] mock_infile = mock.MagicMock() mock_infile.read.return_value="F1\nF2" mock_outfile = mock.MagicMock() mock_open().__enter__.side_effect = [mock_infile, mock_outfile] run.main() mock_infile.read.assert_called_once_with() mock_outfile.write.assert_called_once_with("HF\nF1\nF2")
def main(days, years): for day, year in zip(days, years): d = date.closest_date(year + day/365.25, "data/processed/") # Run the inversion for basal shear stress run.main(day, year, regularization, frac, partitions, output) # Put all the results into a .tar archive archive.main(day, year, "jakobshavn-" + d + ".tar.gz", ["output.txt"]) return
def test_runs_setup_functions( self, run_scheduler_mock, app_mock, ensure_indices_mock): created_app = mock.MagicMock() host = mock.sentinel.host port = mock.sentinel.port created_app.config = {'HOST': host, 'PORT': port} app_mock.return_value = created_app main() run_scheduler_mock.assert_called_with(created_app) ensure_indices_mock.assert_called_with(created_app) created_app.run.assert_called_with(host=host, port=port)
def test_main(iswrite, capsys, setup): args = ['-v', '2', os.path.join(fixtures_dir, 'onetep.dat')] if iswrite: args.append('--output') rcfile = os.path.join(fixtures_dir, 'teptoolsrc') run.main(args, rcfile) out, err = capsys.readouterr() if iswrite: assert os.path.isfile('onetep.out') assert not err
def setUp(self): self.list = run.main(['-t']) self.config = {} with open("config.properties", "r") as f: for line in f: property = line.split('=') self.config[property[0]] = property[1]
def run_test(skip_run): """Run the regression test on the 'official' test data and check results. This is invoked once the `safe_dir` has been set up. This will install the 'official' test input data, downloading and unpacking the files if necessary. Then the CCC code is executed, followed by the compare_results code. """ # First off, we need the 'official' test data downloaded and unpacked # if necessary. if ref_test_data.install_and_check_test_files() != 0: return 1 if install_inputs() != 0: return 1 if not skip_run: sys.stdout.write("Executing CCC code...\n") ret = run.main() if ret != 0: return 1 test_result_dir = os.path.join(ref_test_data.test_data_dir, "result") sys.stdout.write("Comparing CCC results with 'official' results...\n") ret = compare_results.main([sys.argv[0], "result", test_result_dir]) if ret != 0: return 1
def make(self, args): # Should only be called once self.cpu, self.dolphin = run.main(args) print("Running cpu.") self.cpu.run(dolphin_process=self.dolphin) self._character = self.cpu.characters[_RL_AGENT_INDEX] # Huh. cpu.py puts our pad at index 0 which != _RL_AGENT_INDEX. self._pad = self.cpu.pads[0] self._opponent_character = self.cpu.characters[0] self._opponent_pad = self.cpu.pads[1]
def main(*args,**kwargs): args = ['--bibcodes'] with open(os.path.join(PROJECT_HOME,'tests','merge_test_cases.txt'),'r') as fp: lines = fp.readlines() MONGO['DATABASE'] = 'tests_%s' % MONGO['DATABASE'] args.extend([L.strip().split()[0] for L in lines if L and not L.startswith('#')]) #args.extend(['--classic-databases','PHY','AST']) records = run.main(LOGGER,MONGO,args) with open(os.path.join(PROJECT_HOME,'tests','tests_output.txt'),'w') as fp: fp.write(json.dumps(records))
def main(): insert_test_jenkins_path() import run # if virtualbox test results directory exists, use it instead of /root/salt virtualbox_dir = '/media/sf_test_results' salt_config = yaml.load(open('/etc/salt/minion')) minion_dir = os.path.join(virtualbox_dir, salt_config['id']) if not os.path.isdir(minion_dir): os.mkdir(minion_dir) log_dir = os.path.join( minion_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') ) if os.path.isdir(log_dir): raise ValueError("How %s can exist now?" % log_dir) os.mkdir(log_dir) print 'Logs of this build will be in: %s' % log_dir run.main(suffix='> {0}/stdout.log 2> {0}/stderr.log'.format(log_dir)) # write to stderr the results with open('{0}/stderr.log'.format(log_dir)) as stderr: for line in stderr: sys.stderr.write(line)
def testGR(self): serverId = "GR60011" process =processtool.getOneProcessByKeyword("pypy run.py %s" % serverId) print "process of %s : %s" % (serverId, process) if process : os.kill(process.pid, signal.SIGINT) config_redis=("172.16.8.111", 6379, 0) # config_redis=("192.168.10.73", 6379, 0) sys.argv = sys.argv[0:1] sys.argv.append(serverId) sys.argv.append(config_redis[0]) sys.argv.append(config_redis[1]) sys.argv.append(config_redis[2]) print sys.path print sys.argv self.testSuit() import run run.main()
def test_main(monkeypatch, capsys, posted_results): monkeypatch.setattr("run.process_package", fake_process_package) monkeypatch.setattr("sys.argv", ["run.py", "--limit=2", "--workers=1"]) monkeypatch.setattr("colorama.init", lambda autoreset, strip: None) monkeypatch.setenv("POST_KEY", "my cat's breath smells like cat food") main() out, err = capsys.readouterr() assert err == "" assert out == dedent( """\ Processing 2 packages with 1 workers pytest-plugin-a-0.1.1 PASSED 0.0s [% 50] pytest-plugin-b-0.2.2 PASSED 0.0s [%100] Posted 2 new results All done, congratulations :) """ ) assert posted_results == [ { ("pytest-plugin-a", "0.1.1"): (0, "whatever", "the description 1"), ("pytest-plugin-b", "0.2.2"): (0, "whatever", "the description 2"), } ]
def main(*args,**kwargs): #args = ['--bibcodes'] #with open(os.path.join(PROJECT_HOME,'tests','merge_test_cases.txt'),'r') as fp: # lines = fp.readlines() MONGO['DATABASE'] = 'tests_%s' % MONGO['DATABASE'] args = ['--bibcode-files'] args.extend([os.path.join(PROJECT_HOME,'tests','merge_test_cases.txt')]) #args.extend([L.strip().split()[0] for L in lines if L and not L.startswith('#')]) #args.extend(['--classic-databases','PHY','AST']) if sys.argv and len(sys.argv) > 1: args.extend(sys.argv[1:]) records = run.main(LOGGER,MONGO,args) if not records: with open(os.path.join(PROJECT_HOME,'tests','export_pp.json'),'r') as fp: records = json.load(fp) print "Loaded records from export_pp.json" SolrUpdater.solrUpdate(records)
default=os.path.join("config", "thresholds.json"), help="The configuration file that stores thresholds for " "each combination of detector and username") parser.add_argument("-n", "--numCPUs", default=None, help="The number of CPUs to use to run the " "benchmark. If not specified all CPUs will be used.") args = parser.parse_args() space = cartesian_product(['median', 'mean'], ['std', 'mad'], range(20, 51, 10)) print('mode,std,window,score') for mode, std, window in space: for tup in get_files(data_path): subdir, file = tup d = subdir.split('/')[-1] # print(file) data = pd.read_csv(os.path.join(subdir, file)) values = data['value'] data['anomaly_score'] = detect_anomalies(data=values, window=window, mode=mode, std=std) d = subdir.split('/')[-1] labels = pd.read_csv(os.path.join(os.path.join(null_path, d), 'null_' + file))['label'] data['label'] = labels data.to_csv(os.path.join(os.path.join(res_path, d), '3sigma_' + file), index=False) score = run.main(args) result = str.format('{},{},{},{}', mode, std, window, score) print(result)
def main(args): if args.cluster: hostname = socket.gethostname() if args.cancel: if args.cluster: util.cancel_all_my_non_bash_jobs() if args.rm: dir_ = "save/" if Path(dir_).exists(): shutil.rmtree(dir_, ignore_errors=True) print("-------------------------------") print("-------------------------------") print("-------------------------------") util.logging.info("Launching {} runs on {}".format( len(list(get_sweep_argss(test_run=args.test_run))), f"cluster ({hostname})" if args.cluster else "local", )) print("-------------------------------") print("-------------------------------") print("-------------------------------") for sweep_args in get_sweep_argss(test_run=args.test_run): if args.cluster: if args.eval_logp: for algorithm in ["sleep", "rws", "vimco"]: eval_logp_args = eval_logp.get_arg_parser().parse_args([]) eval_logp_args.algorithm = algorithm eval_logp_args.checkpoint_path = util.get_checkpoint_path( sweep_args) # SBATCH AND PYTHON CMD args_str = args_to_str(eval_logp_args) if args.no_repeat: sbatch_cmd = "sbatch" time_option = "12:0:0" python_cmd = ('--wrap="MKL_THREADING_LAYER=INTEL=1 ' f'python -u eval_logp.py {args_str}""') else: sbatch_cmd = "om-repeat sbatch" time_option = "2:0:0" python_cmd = ( f"MKL_THREADING_LAYER=INTEL=1 python -u eval_logp.py {args_str}" ) # SBATCH OPTIONS logs_dir = f"{util.get_save_dir(sweep_args)}/logs" Path(logs_dir).mkdir(parents=True, exist_ok=True) job_name = util.get_save_job_name_from_args(sweep_args) if args.priority: partition_option = "--partition=tenenbaum " else: partition_option = "" if args.good_gpu: gpu_option = ":titan-x" else: gpu_option = "" gpu_memory_gb = 22 cpu_memory_gb = 16 if "openmind" in hostname: gpu_memory_option = f"--constraint={gpu_memory_gb}GB " else: gpu_memory_option = "" sbatch_options = (f"--time={time_option} " + "--ntasks=1 " + f"--gres=gpu{gpu_option}:1 " + gpu_memory_option + f"--mem={cpu_memory_gb}G " + partition_option + f'-J "{job_name}" ' + f'-o "{logs_dir}/%j.out" ' + f'-e "{logs_dir}/%j.err" ') cmd = " ".join([sbatch_cmd, sbatch_options, python_cmd]) util.logging.info(cmd) subprocess.call(cmd, shell=True) else: # SBATCH AND PYTHON CMD args_str = args_to_str(sweep_args) if args.no_repeat: sbatch_cmd = "sbatch" time_option = "12:0:0" python_cmd = ( f'--wrap="MKL_THREADING_LAYER=INTEL=1 python -u run.py {args_str}""' ) else: sbatch_cmd = "om-repeat sbatch" time_option = "2:0:0" python_cmd = f"MKL_THREADING_LAYER=INTEL=1 python -u run.py {args_str}" # SBATCH OPTIONS logs_dir = f"{util.get_save_dir(sweep_args)}/logs" Path(logs_dir).mkdir(parents=True, exist_ok=True) job_name = util.get_save_job_name_from_args(sweep_args) if args.priority: partition_option = "--partition=tenenbaum " else: partition_option = "" if args.good_gpu: gpu_option = ":titan-x" else: gpu_option = "" gpu_memory_gb = 22 cpu_memory_gb = 16 if "openmind" in hostname: gpu_memory_option = f"--constraint={gpu_memory_gb}GB " else: gpu_memory_option = "" sbatch_options = (f"--time={time_option} " + "--ntasks=1 " + f"--gres=gpu{gpu_option}:1 " + gpu_memory_option + f"--mem={cpu_memory_gb}G " + partition_option + f'-J "{job_name}" ' + f'-o "{logs_dir}/%j.out" ' + f'-e "{logs_dir}/%j.err" ') cmd = " ".join([sbatch_cmd, sbatch_options, python_cmd]) util.logging.info(cmd) subprocess.call(cmd, shell=True) else: if args.eval_logp: for algorithm in ["sleep", "rws", "vimco"]: eval_logp_args = eval_logp.get_arg_parser().parse_args([]) eval_logp_args.algorithm = algorithm eval_logp_args.checkpoint_path = util.get_checkpoint_path( sweep_args) eval_logp.main(eval_logp_args) else: run.main(sweep_args)
""" Created on Sat Nov 2 00:02:43 2019 @author: bochen """ import function import run # change the tempo filename_in = "example/Bach_FrenchSuite_1.mid" filename_out = "example/Bach_FrenchSuite_1_tempo1.5.mid" function.modify_midi_tempo(filename_in, filename_out, 1.5) run.main(filename_out) ''' # change the pitch filename_in = "example/Bach_FrenchSuite_1.mid" filename_out = "example/Bach_FrenchSuite_1_pitch+15.mid" function.modify_midi_pitch(filename_in, filename_out, 15) run.main(filename_out) '''
#!/usr/bin/env python import run run.main(nas=True)
#Magic words for starting the game.. if __name__ == '__main__': from run import main main()
def post(self): pred = run.main() self.write(pred)
pass def teardown(flow): pass ####################### # ####################### case_list = [ #('Contacts3G', 20, 2, test_call_from_contacts_3G, MDevice(EndCall, backto_contacts)), #('Contacts3GLTE', 60, 2, test_call_from_contacts_LTE, MDevice(EndCall, backto_contacts)), #('History3G', 20, 5, Test_Call_from_search, MDevice(EndCall, backto_contacts)), #('History3GLTE',60, 2, test_call_from_history_LTE , MDevice(EndCall, backto_contacts)), #('CallReceive', 100, 2,test_call_receive, SDevice(EndCall, backto_contacts)), #('ContactsAdd', 20, 2, test_contacts_add, MDevice(EndCall, backto_contacts)), #('ContactsDel', 20, 2, test_contacts_remove, MDevice(EndCall, backto_contacts)), ("Send message ways",20,1,Test_send_message_ways,clear), #("Send message from different ways",20,1,Test_send_message_differentways,clear), #("Get differt type message",20,1,Test_get_message_include_differenttype,clear), #("Reply message from talk",20,1,ll,clear), ] module_total_time = sum([item[1] for item in case_list]) module_mini_total_time = sum([item[2] for item in case_list]) run_list = StabilityTestTemplate(case_list, setup, teardown) if __name__ == '__main__': import run run.main(modu='Messaging')
def main(): """Main function in chain.py""" ### Check for existence of .env file in local directory. If not there, ### exit. file_status = str(os.path.exists('.env')) if file_status != 'True': print ('\n The .env file does not exist. Create this file and ' \ 'poplulate it with your API keys per the README.\n') print('Exiting...\n') exit() ### Load Tenable IO API keys from the .env file stored in the current ### directory. Make sure you chmod the file 600 to protect it, and ### assign ownership as necessary. A check below will set the perms to ### 600 if it's not already. Note this check does not run if using the ### -h/--help flag. It only works with create/delete/run args. ### Format of the .env is: # ACCESS_KEY=<access key goes here> # SECRET_KEY=<secret key goe shere> ### load_dotenv() access_key = os.getenv('ACCESS_KEY') secret_key = os.getenv('SECRET_KEY') ### Check for blank API keys or the stub value in the .env file if access_key == '' or secret_key == '' or \ access_key == '<access_key>' or secret_key == '<secret_key>': print ('\n One or more API keys were not declared in the .env file ' \ ' properly.') print(' Exiting...\n') exit() ###Configure command line options using argparse parser = argparse.ArgumentParser(formatter_class=\ argparse.RawTextHelpFormatter) parser.add_argument('--action', choices=[ 'create', 'run', 'delete', 'create-run', 'delete-create', 'delete-create-run', 'info' ], help=textwrap.dedent(''' NOTES: - You cannot delete and then run scans (delete-run), nor create and then delete scans (create-delete). Similarly, create-delete-run is not supported. - The "info" flag will output the folder, scanner, policy, and tag name:ID dictionaries to log.info to assist in buildiling scan definitions in the scans.ini file. Run "./chain.py --action info" to geneate this log. The data is also sent to STDOUT, however it is easier to search/grep from the log.info flat file. - The create/delete/run scripts all log to log.chain in the logs subdirectory. The log file is set to rotate when the size reaches 100K bytes, and keeps a history of 5 log files (log.chain.5 being the oldest and log.chain being the current). Successive instantiations of the run.py script will also log to log.chain. The script name is in the log.chain file, such as "create.py" in the second field (fields delimited by double-colons). Note that the log.info file is cleared on every run of the create/delete/run scripts. This is fine as this data is mutable and should be queried every time a scan in scans.ini is configured or updated. ''')) args = parser.parse_args() # Test to see if logs subdir exists, and if not, create/chmod it if not os.path.exists('logs'): os.makedirs('logs') os.chmod('logs', 0o700) # Test if .env file exists, and if so, chmod it. This file contains # your API keys, so needs to be secured as much as file permissions # allow. Note this check is not executed when using the -h/--help # argument. These perms should alrady be 600, but double checking. fix_perms('./.env') # Check for --action arguments. Will import # necessary .py scripts to create .pyc bytecode # files. Also run fix_perms function to chmod file. if args.action == 'create': import info info.main(access_key, secret_key) import create create.main(access_key, secret_key, info.folder_dict, \ info.scanner_dict, info.policies_dict, info.tag_dict) fix_perms('./logs/log.chain') elif args.action == 'run': import run run.main(access_key, secret_key) fix_perms('./logs/log.chain') elif args.action == 'delete': import delete delete.main(access_key, secret_key) fix_perms('./logs/log.chain') elif args.action == 'create-run': import info info.main(access_key, secret_key) import create create.main(access_key, secret_key, info.folder_dict, \ info.scanner_dict, info.policies_dict, info.tag_dict) import run run.main(access_key, secret_key) fix_perms('./logs/log.chain') elif args.action == 'delete-create': import delete delete.main(access_key, secret_key) import info info.main(access_key, secret_key) import create create.main(access_key, secret_key, info.folder_dict, \ info.scanner_dict, info.policies_dict, info.tag_dict) fix_perms('./logs/log.chain') elif args.action == 'delete-create-run': import delete delete.main(access_key, secret_key) import info info.main(access_key, secret_key) import create create.main(access_key, secret_key, info.folder_dict, \ info.scanner_dict, info.policies_dict, info.tag_dict) import run run.main(access_key, secret_key) fix_perms('./logs/log.chain') elif args.action == 'info': import info info.main(access_key, secret_key) fix_perms('./logs/log.info') else: print( '\n ERROR. No arguments supplied when runing the chain.py script.' ) print(' Run "./chain.py --help" to see usage info.\n') print(' Exiting...\n') exit()
def test_full_large(self): run.main(.5, 4, .1)
def test_run(): # Test running the training for the bug model. run.main(run.parse_args(["--train", "--goal", "defect"])) # Test loading the trained model. run.main(run.parse_args(["--goal", "defect"]))
def main(): return run.main(paths)
def post(self): companyName = self.get_argument('companyName') pred = run.main() data = json.dump(pred) self.write(data)
type=str, help='Which level to run, Eg. 1-1', default='1-1', nargs='?') args = parser.parse_args() if (args.mode.upper() == "TRAIN" or args.mode.upper() == "CONT_TRAIN") and args.gen is None: parser.error("Please specify number of generations!") if args.mode.upper() == "CONT_TRAIN" and args.file is None: parser.error( "Please specify checkpoint file (" "./Files/neat-checkpoint-2492 can be used to start from generation 2492)!" ) if args.mode.upper() == "TRAIN": t = t.Train(args.gen, args.parallel, args.level) t.main(config_file=args.config) elif args.mode.upper() == "CONT_TRAIN": c = ct.Train(args.gen, args.file, args.parallel, args.level) c.main(config_file=args.config) elif args.mode.upper() == "RUN": args.file = "finisher.pkl" if args.file is None else args.file r.main(args.config, args.file, args.level) else: print("Please enter 'train' or 'mode' or 'cont_train")
#!/usr/bin/env python # -*- coding: utf-8 -*- import wx import wx.adv phoenix = ("""\ A phoenix is a mythical bird with a colorful plumage and a tail of gold and scarlet (or purple and blue, according to some sources). It has a 500 to 1,000 year life-cycle, near the end of which it builds itself a nest of myrrh twigs that then ignites; both nest and bird burn fiercely and are reduced to ashes, from which a new, young phoenix or phoenix egg arises, reborn anew to live again. The new phoenix is destined to live as long as its old self. """) class SampleBanners(wx.Panel): def __init__(self, parent): # ... create the frame itself ... wx.Panel.__init__(self, parent) pnxBmp = wx.Bitmap('bitmaps/phoenix_top.png') bmpsz = pnxBmp.GetSize() # Create and initialize the banner. whitePanel = wx.Panel(self, -1, size=(-1, bmpsz[1])) whitePanel.SetBackgroundColour(wx.WHITE) # Create and initialize the 1st banner and define a bitmap. banner1 = wx.adv.BannerWindow(whitePanel, dir=wx.BOTTOM) banner1.SetBitmap(pnxBmp) whiteSizer = wx.BoxSizer(wx.HORIZONTAL) whiteSizer.Add(banner1, 1) whitePanel.SetSizer(whiteSizer) # Create and initialize the 2nd banner and define the gradient text. banner2 = wx.adv.BannerWindow(self, dir=wx.TOP) banner2.SetGradient(start='#FF8000', end='#FFFFFF') banner2.SetText("Phoenix", phoenix)
#!/usr/bin/env python # -*- coding: utf-8 -*- import wx import wx.adv phoenix = ("""\ A phoenix is a mythical bird with a colorful plumage and a tail of gold and scarlet (or purple and blue, according to some sources). It has a 500 to 1,000 year life-cycle, near the end of which it builds itself a nest of myrrh twigs that then ignites; both nest and bird burn fiercely and are reduced to ashes, from which a new, young phoenix or phoenix egg arises, reborn anew to live again. The new phoenix is destined to live as long as its old self. """) class SampleBanners(wx.Panel): def __init__(self, parent): # ... create the frame itself ... wx.Panel.__init__(self, parent) pnxBmp = wx.Bitmap('bitmaps/phoenix_top.png') bmpsz = pnxBmp.GetSize() # Create and initialize the banner. whitePanel = wx.Panel(self, -1, size=(-1, bmpsz[1])) whitePanel.SetBackgroundColour(wx.WHITE) # Create and initialize the 1st banner and define a bitmap. banner1 = wx.adv.BannerWindow(whitePanel, dir=wx.BOTTOM) banner1.SetBitmap(pnxBmp) whiteSizer = wx.BoxSizer(wx.HORIZONTAL) whiteSizer.Add(banner1, 1) whitePanel.SetSizer(whiteSizer) # Create and initialize the 2nd banner and define the gradient text. banner2 = wx.adv.BannerWindow(self, dir=wx.TOP) banner2.SetGradient(start='#FF8000', end='#FFFFFF')
def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) run.main(['', '--inproc_stop']) win32event.SetEvent(self.hWaitStop)
#!/usr/bin/env python import run run.main(rodinia=True)
def main(arg): save_path = run.main(arg) thumb_32x32.main(save_path) pass
def test_full_small(self): run.main(.01, 3, .05)
import run import network import updates d = run.defaults d['game'] = 'space_invaders' d['reshape'] = 'mean' d['phi'] = 'phi2' d['screen_size'] = 84 d['no_replay'] = True d['visualize'] = 'ale' d['replay_start_size'] = 100 d['log_frequency'] = 10 d['final_epsilon'] = 0.1 d['initial_epsilon'] = 0.1 d['weights_dir'] = 'dqn20/weights' d['network'] = network.build_nature_with_pad #d['updates'] = updates.deepmind_rmsprop d['show_mood'] = run.Plot run.main(**d)
for y in range(10): for x in range(10): dc.DrawCircle(x * 400 + 20, y * 400 + 20, 200) dc.DrawText("Right click and drag in the direction you want to scroll.", 20, 20) dc.DrawText("The distance from the start of the drag determines the speed.", 20, 50) def OnRightDown(self, event): self.scroller.Start(event.GetPosition()) def OnRightUp(self, event): self.scroller.Stop() # ------------------------------------------------------------------------------- overview = """<html><body> <h2>DragScroller</h2> <p> A helper class that adds scrolling to a wx.ScrolledWindow in the direction of the drag. </body></html> """ if __name__ == "__main__": import sys, os import run run.main(["", os.path.basename(sys.argv[0])])
def manual(bot): global led1 global led2 global led3 global update_id # led status if led1.closed: led1 = LED(12) if led2.closed: led2 = LED(21) if led3.closed: led3 = LED(16) for update in bot.get_updates(offset=update_id, timeout=10): update_id = update.update_id + 1 action_list_manual = [["یک روشن", "دو روشن" , "سه روشن" , "همه روشن"] , ["یک خاموش","دو خاموش","سه خاموش","همه خاموش"], ["بازگشت"],[] ] update.message.reply_text("لطفا یکی از گزینههای زیر را انتخاب نمائید : ", reply_markup=telegram.ReplyKeyboardMarkup(action_list_manual, one_time_keyboard=True)) if update.message: if update.message.text == "یک روشن": led1.on() bot.sendMessage(update.message.chat_id , "لامپ شماره یک روشن شد.") sleep(1) elif update.message.text == "دو روشن": led2.on() update.message.reply_text("لامپ شماره دو روشن شد.") sleep(1) elif update.message.text == "سه روشن": led3.on() update.message.reply_text("لامپ شماره سه روشن شد.") sleep(1) elif update.message.text == "همه روشن": led1.on() led2.on() led3.on() update.message.reply_text("همه ی لامپ ها روشن شدند .") sleep(1) if update.message.text == "یک خاموش": led1.off() update.message.reply_text("لامپ شماره یک خاموش شد.") sleep(1) elif update.message.text == "دو خاموش": led2.off() update.message.reply_text("لامپ شماره دو خاموش شد.") sleep(1) elif update.message.text == "سه خاموش": led3.off() update.message.reply_text("لامپ شماره سه خاموش شد.") sleep(1) elif update.message.text == "همه خاموش": led1.off() led2.off() led3.off() update.message.reply_text("همه ی لامپ ها خاموش شدند .") sleep(1) elif update.message.text == "بازگشت": import run led1.close() led2.close() led3.close() sleep(1) run.main() sleep(1) else : sleep(1)
def test_all(): """ DEBUG = 1, don't go to the document detail page, just fetch documents page """ main()
def run_state_dict(d): run.main(**d)
## if lines != 0: ## lines = lines * linesPer ## vsx, vsy = self.GetViewStart() ## scrollTo = vsy - lines ## self.Scroll(-1, scrollTo) #--------------------------------------------------------------------------- def runTest(frame, nb, log): win = MyCanvas(nb) return win #--------------------------------------------------------------------------- overview = """ <html> <body> The wx.ScrolledWindow class manages scrolling for its client area, transforming the coordinates according to the scrollbar positions, and setting the scroll positions, thumb sizes and ranges according to the area in view. </body> </html> """ if __name__ == '__main__': import sys, os import run run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
#!/usr/bin/python3 # -*- coding: utf-8 -*- """ Created on Mon Sep 2 21:36:42 2013 @Author: Daddiego Lucas """ #It's up there so no library is imported if not windows. if __name__=="__main__": import run run.main() """ Imports """ import socketserver, threading import win32gui,win32ui, dde #dde library needs win32ui import time """ Classes """ class DdeExecute(): def __init__(self,app,topic): self.conversation = None self.app = app self.topic = topic self._setup_dde() def _setup_dde(self): self.serv = dde.CreateServer() self.serv.Create("TC")
import argparse import train as t import run as r parser = argparse.ArgumentParser(description='Run the program') parser.add_argument('mode', metavar='mode', type=str, help="Specify 'train' or 'run' to run or train the model") args = parser.parse_args() if args.mode.upper() == "TRAIN": t.main() if args.mode.upper() == "RUN": r.main() else: print("Please enter 'train' or 'mode'")
import run import network d = run.d d['game'] = 'simple_breakout' d['replay'] = 'uniform' d['visualize'] = 'q' d['dqn.replay_start_size'] = 50000 d['show_mood'] = None #d['final_exploration_action'] = 100000 d['dqn.network'] = network.build_simple_breakout import updates d['dqn.updates'] = lambda loss, params: updates.deepmind_rmsprop(loss, params, learning_rate=.00025, rho=.95, epsilon=.01) d['weights_dir'] = 'weights-sb-2' #d['weights_dir'] = 'weights-sb-1' #d['algo'] = 'dqn' #d['last_action_no'] = 2000000 #d['max_actions_per_game'] = 1000 d['repeat_action'] = 1 d['skip_n_frames_after_lol'] = 0 d['target_network_update_frequency'] = 10000 d['final_exploration_frame'] = 100000 d['dqn.replay_memory_size'] = 400000 d['run_test_every_n'] = 50000 run.main(**d)
def test_CLI(sim_data): """ Validates how args are passed without actually using the CLI """ run.main(sim_data['args'])
import os, sys import run # Configure Paramters import params as p p.scram_arch = "slc6_amd64_gcc530" p.cmssw_ver = "CMSSW_8_0_5" #p.sweepRoot_scripts = [ "sweepRoot.sh", "sweepRoot.C" ] p.sweepRoot_scripts = [] p.merging_scripts = [ "mergeScript.sh", "mergeHadoopFiles.C" ] p.exit_when_done = True # Main run.main(instructions="instructions_2017.txt",params=p)
output_file_name = "-".join([c[0] for c in configs] + [c.__name__ for c in model_types] + [n.__name__ for n in controller_types] + [s.__name__ for s in struct_types]) output_file_name = "stacknn-experiments/log-" + output_file_name + ".txt" logger = Logger(output_file_name) for config_name, config in configs: for model_type in model_types: for controller_type in controller_types: for struct_type in struct_types: experiment_name = "-".join([config_name, model_type.__name__, controller_type.__name__, struct_type.__name__]) config_dir = os.path.join(results_dir, experiment_name) os.makedirs(config_dir) final_accs = [] for i in xrange(n_trials): # TODO: Should export figures, results, logs here too. save_path = os.path.join(config_dir, "%i.dat" % i) results = run.main(config, model_type, controller_type, struct_type, save_path=save_path) final_accs.append(results["final_acc"]) print("Trial accuracies:", ["{:.1f}".format(acc) for acc in final_accs]) del logger
logmsg = '' # Start compiling update.status ('COMPILE', SUBID, -1) compret = os.system (COMPILER + ' ' + LANGUAGE) compret /= 256 update.status ('COMPILE', SUBID, compret) if compret == 124: # Compile time limit exceeded, refer to Gnu timeout manual logmsg = 'Compile time limit exceeded' elif compret: # Unspecified Compilation error logmsg = 'Compilation error' else: # Start running update.status ('RUN', SUBID, -1, -1); arr = run.main (CONFIG) charstat = 'CWTMRU' # [ correct, wrong, time, memory, runtime, unexpected ] for status in arr: if status > 5: logmsg += '[' + str(status - 6) + ']' else: logmsg += charstat[status] os.system (LOGGER + ' LOG grading ' + prefix + logmsg) if HOST != "": update.conn.close()
from run import main main()
date = TestDateControl(self, -1, pos = (30,30)) #---------------------------------------------------------------------- def runTest(frame, nb, log): win = TestPanel(nb, log) return win #---------------------------------------------------------------------- overview = """<html><body> <h2><center>PopupControl</center></h2> PopupControl is a class that can display a value and has a button that will popup another window similar to how a wx.ComboBox works. The popup window can contain whatever is needed to edit the value. This example uses a wx.CalendarCtrl. <p>Currently a wx.Dialog is used for the popup. Eventually a wx.PopupWindow should be used... </body></html> """ if __name__ == '__main__': import sys,os import run run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
def main(): run.main()