def run_image(image_path): print(image_path) image = mpimg.imread(image_path) runner = Runner(image.shape) runner.run(image) plt.figure() plt.imshow(image) plt.show()
class RunnerWrapper: def __init__(self): self.runner = None def run(self, image): if self.runner is None: self.runner = Runner(image.shape) return self.runner.run(image)
def main() -> None: args = parse_args() exp = Experiment(args.exp_name, args, mode=args.mode) if args.cfg is None: cfg_path = exp.cfg_path else: cfg_path = args.cfg cfg = Config(cfg_path) exp.set_cfg(cfg, override=False) device = ( torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda") ) runner = Runner(cfg, exp, device, resume=args.resume) if args.mode == "train": try: runner.train() except KeyboardInterrupt: logging.info("Training interrupted.")
def setup(self): self.mock_props = mock.MagicMock() self.mock_git = mock.MagicMock() self.mock_release_notes = mock.MagicMock() self.mock_index = mock.MagicMock() self.sut = Runner() self.sut.props = self.mock_props self.sut.git = self.mock_git self.sut.release_notes = self.mock_release_notes self.sut.index = self.mock_index
def run(self): # TODO: add loop here to check taskQueue while True: try: if not self.runnerQueue.empty(): runner = self.runnerQueue.get() if not runner.is_alive(): running_result = runner.getResult() err_code = running_result['err_code'] retval = running_result['retval'] errmess = running_result['errmess'] toPut = {"runningResult": running_result, "score": 0} if err_code: toPut['score'] = 0 else: # use similiar rate to judge for two equal one it will compute 100 print(self.rightAnswerDict[runner.getJudgeId()]) toPut['score'] = self._similiar( retval, self.rightAnswerDict[ runner.getJudgeId()]) * 100 print("TOPUT: ", toPut) self.doneQueue.put(toPut) else: self.runnerQueue.put(runner) print("put back") if not self.taskQueue.empty(): toJudge = self.taskQueue.get() runner = Runner(toJudge['lang'], toJudge['code'], toJudge['input'], subId=toJudge['judge_id']) runner.start() self.runnerQueue.put(runner) except Exception as e: print(e)
def main(): args = parse_args() exp = Experiment(args.exp_name, args, mode=args.mode) if args.cfg is None: cfg_path = exp.cfg_path else: cfg_path = args.cfg cfg = Config(cfg_path) exp.set_cfg(cfg, override=False) device = torch.device('cpu') if not torch.cuda.is_available() or args.cpu else torch.device('cuda') runner = Runner(cfg, exp, device, view=args.view, resume=args.resume, deterministic=args.deterministic) if args.mode == 'train': try: runner.train() except KeyboardInterrupt: logging.info('Training interrupted.') runner.eval(epoch=args.epoch or exp.get_last_checkpoint_epoch(), save_predictions=args.save_predictions)
class TestRunner: def setup(self): self.mock_props = mock.MagicMock() self.mock_git = mock.MagicMock() self.mock_release_notes = mock.MagicMock() self.mock_index = mock.MagicMock() self.sut = Runner() self.sut.props = self.mock_props self.sut.git = self.mock_git self.sut.release_notes = self.mock_release_notes self.sut.index = self.mock_index @mock.patch.object(Runner, '_Runner__commit_and_push') @mock.patch.object(Runner, '_Runner__generate_index') @mock.patch.object(Runner, '_Runner__generate_release_notes') @mock.patch.object(Runner, '_Runner__update_local_repos') def test_run_calls_composites(self, mock_ulr, mock_grn, mock_gi, mock_cap): ver_num = "1.2.3.4" self.mock_props.validate_version_num.return_value = ver_num self.sut.run(ver_num) eq_(ver_num, self.sut.version_num) mock_ulr.assert_called_once() mock_grn.assert_called_once() mock_gi.assert_called_once() mock_ulr.assert_called_once() @mock.patch.object(Runner, '_Runner__generate_all_release_notes') def test_run_all_calls_composites(self, mock_garn): self.sut.run_all() mock_garn.assert_called_once def test__update_local_repos_operates_as_expected(self): git = self.mock_git p = self.mock_props assert_false(self.sut.repos_updated) self.sut._Runner__update_local_repos() git.fetch.assert_called_once_with(p.project_remote, p.project_path) git.checkout.assert_called_once_with(p.wiki_branch, p.wiki_path) git.pull.assert_called_once_with( p.wiki_remote, p.wiki_branch, p.wiki_path) ok_(self.sut.repos_updated) self.sut._Runner__update_local_repos() eq_(1, git.fetch.call_count) eq_(1, git.checkout.call_count) eq_(1, git.pull.call_count) def test__get_commit_log_operates_as_expected(self): git = self.mock_git p = self.mock_props vnum = self.sut._version_num = "2.3.4.5" tag = p.get_tag_name_from_version_num.return_value = "tag1.2.3.4" tags = git.get_tags_by_pattern.return_value = [ "tag1.2.3.4", "tag2.3.4.5"] crange = git.get_tag_range.return_value = "tag1.2.3.4..tag2.3.4.5" log = git.log.return_value = "the log" actual = self.sut._Runner__get_commit_log() eq_(log, actual) p.get_tag_name_from_version_num.assert_called_once_with(vnum) git.get_tags_by_pattern.assert_called_once_with( p.tag_pattern, p.project_path) git.get_tag_range.assert_called_once_with(tag, tags) git.log.assert_called_once_with(crange, p.project_path) @mock.patch.object(Runner, '_Runner__get_commit_log') @mock.patch('lib.runner.util') def test__generate_release_notes_operates_as_expected(self, mock_util, mock_gcl): p = self.mock_props rn = self.mock_release_notes git = self.mock_git vnum = self.sut._version_num = "2.3.4.5" rn_str = rn.__str__.return_value = "rn as string" log = mock_gcl.return_value = "the log" self.sut._Runner__generate_release_notes() mock_gcl.assert_called_once rn.generate.assert_called_once_with(vnum, log) mock_util.write_file.assert_called_once_with( rn.absolute_filepath, rn_str) git.add.assert_called_once_with(rn.absolute_filepath, p.wiki_path) @mock.patch('lib.runner.util') def test__generate_index_operates_as_expected(self, mock_util): p = self.mock_props i = self.mock_index i_str = i.__str__.return_value = "i as string" vnum = self.sut._version_num = "2.3.4.5" self.sut._Runner__generate_index() self.mock_index.generate.assert_called_once_with(vnum) mock_util.write_file.assert_called_once_with( i.absolute_filepath, i_str) self.mock_git.add.assert_called_once_with( i.absolute_filepath, p.wiki_path) def test__commit_and_push_operates_as_expected(self): p = self.mock_props vnum = self.sut._version_num = "2.3.4.5" msg = "Add release notes for " + vnum self.sut._Runner__commit_and_push() self.mock_git.commit.assert_called_once_with(msg, p.wiki_path) self.mock_git.push.assert_called_once_with( p.wiki_remote, p.wiki_branch, p.wiki_path) @mock.patch.object(Runner, 'run') def test__generate_all_release_notes_operates_as_expected(self, mock_run): tags = ["tag1", "tag2", "tag3", "tag4"] self.mock_git.get_tags_by_pattern.return_value = tags self.mock_props.get_version_num_from_tag_name.side_effect = [ "1", "2", "3", "4"] self.sut._Runner__generate_all_release_notes() calls = [mock.call("1"), mock.call( "2"), mock.call("3"), mock.call("4")] mock_run.assert_has_calls(calls)
the previous 5 2000ms updates. """ def start(self, control): self.series = pd.DataFrame() self.control = control def process(self, kline): self.series = self.series.append(kline) if self.series.shape[0] > 5: print("Average price change over past 5 windows: ", pd.to_numeric(self.series[-5:]["PriceChange"]).mean()) if __name__ == "__main__": # Example Usage: # # Instantiate a `Runner`; providing API credentials, the symbol you wish to # run your strategy against, and the actual strategy itself. Then simply call # `.run()` to execute the strategy. cfg = configparser.ConfigParser() cfg.read('bot.ini') strategy = ExampleStrategy() runner = Runner(apiKey=cfg['credentials']['ApiKey'], apiSecret=cfg['credentials']['ApiSecret'], symbol=cfg['strategy']['Symbol'], runnable=strategy) runner.run()
try: if hasattr( pymongo, 'version_tuple' ) and pymongo.version_tuple[0] >= 2 and pymongo.version_tuple[1] >= 4: from pymongo import MongoClient from pymongo.read_preferences import ReadPreference connection = MongoClient(host=options.server, read_preference=ReadPreference.SECONDARY) else: from pymongo.connection import Connection connection = Connection(options.server, slave_okay=True) except AutoReconnect, ex: print 'Connection to %s failed: %s' % (options.server, str(ex)) return -1 runner = Runner(connection, options.delay) rc = runner.run() if rc == -3: print 'Screen size too small' return rc if __name__ == '__main__': sys.exit(main()) ########NEW FILE########
from lib.runner import Runner Runner()
tau_dm=args.tau_dm, target_mode=args.target_mode) ## model echo_dict = dict(tau=args.tau, dt=1, scale=args.rho_scale, spars_echo=0.1, scale_echo=args.scale_echo, spars_p=0.1, init_mode="mode_a") model = Echo1(inp_size, args.n_echo, args.num_dm, echo_dict, dm_dict=dm_dict) optimizer = optim.Adam(model.parameters(), lr=args.lr) runner = Runner(model, optimizer) # """ search the optimal parameters. tau: runner.model.simple_echo.alpha = 1/tau rho: runner.model.simple_echo.scale = rho tau [tau_begin,tau_end,tau_step] rho [rho_begin,rho_end,rho_step] """ rho_params = np.arange(args.rho_begin, args.rho_end, args.rho_step).tolist() tau_params = np.arange(args.tau_begin, args.tau_end, args.tau_step).tolist() tau_dm_params = np.arange(args.tau_dm_begin, args.tau_dm_end,
from lib.classifier import NaiveBayes, SVM from lib.runner import Runner import numpy as np runner = Runner(verbose = False) f=open('data\params\\rbf\paraC.txt', 'w') accuracies = [] j=0 for gamma in [2**i for i in range(-6,6,1)]: f.write('gamma=' + str(gamma) + ': \n a=[') for C in np.linspace(0, 1,101): if C!=0: j+=1 svm = SVM(kernel='rbf', C=C, gamma=gamma) accuracy, testAccuracy=runner.run(svm) accuracies.append((accuracy, testAccuracy, C, gamma)) f.write(str(C) + ' ' + str(accuracy) +';')#tu promjena kad se mijenja poredak petlji -> parametar najdublje petlje... print str((float(j) / (100*12)) * 100) + "%" print repr(accuracies[-1]) f.write(']\n') print repr(max(accuracies)) ''' accuracy = runner.run(svm) f=open('data\params\linear\paraC.txt', 'w') f.write('a=[') accuracies = [] j=0 for C in np.linspace(0, 1,101): if C!=0: j=j+1 svm = SVM(kernel='linear', C=C) accuracy, testAccuracy=runner.run(svm)
def main(): runner = Runner() runner.handle_args() runner.handle_invalid_input() runner.execute()
reload(sys) sys.setdefaultencoding('utf8') # project imports from lib.git import Git from lib.properties import Properties from lib.release_notes import ReleaseNotes from lib.release_notes_index import ReleaseNotesIndex from lib.arg_loader import ArgLoader from lib.runner import Runner script_root = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) props = Properties(script_root, script_root + '/props.json') runner = Runner() git = Git() runner.props = props runner.git = git runner.release_notes = ReleaseNotes(props) runner.index = ReleaseNotesIndex(props) args = ArgLoader(sys.argv[1:]).get_args() if args.version_number == "all": runner.run_all() else: runner.run(args.version_number)
def run(self, image): if self.runner is None: self.runner = Runner(image.shape) return self.runner.run(image)
(options, _) = parser.parse_args() try: if hasattr(pymongo, 'version_tuple') and pymongo.version_tuple[0] >= 2 and pymongo.version_tuple[1] >= 4: from pymongo import MongoClient from pymongo.read_preferences import ReadPreference connection = MongoClient(host=options.server, read_preference=ReadPreference.SECONDARY) else: from pymongo.connection import Connection connection = Connection(options.server, slave_okay=True) except AutoReconnect, ex: print 'Connection to %s failed: %s' % (options.server, str(ex)) return -1 runner = Runner(connection, options.delay) rc = runner.run() if rc == -3: print 'Screen size too small' return rc if __name__ == '__main__': sys.exit(main()) ########NEW FILE########
from lib.classifier import NaiveBayes, SVM from lib.runner import Runner runner = Runner() runner.run( NaiveBayes(), "Naive Bayes" ) runner.run( SVM(kernel='rbf', C=0.91, gamma=0.03125), "SVM (RBF)" ) runner.run( SVM(kernel='linear', C=0.05), "SVM (Linear)" ) runner.run( SVM(kernel='poly', C=0.7, degree=2, gamma=0.25), "SVM (Polynomial)" )