def test_discount_mask(self): updater = Updater(self.net, self.lr, entropy_const=self.entropy_const, value_const=self.value_const, gamma=self.gamma, _lambda=self._lambda) array = [1,1,1] mask = [0,1,0] expected_output = [1.99, 1, 1] output = updater.discount(array, mask, self.gamma) self.assertTrue(np.array_equal(expected_output, output))
def __check_for_updates(self): updater = Updater() updater.check_for_updates() if not updater.is_latest: response = QMessageBox.question(self.__main_widget, "Update Available!", "A New version of this tool is available, would you like to download it now?", (QMessageBox.Yes|QMessageBox.No), ) if response == QMessageBox.Yes: webbrowser.open(updater.latest_url)
def test_download_tarball(self): """ download_tarball should download the tarball found at get_tarball_url, to get_target_directory """ mock_resource = self.mox.CreateMockAnything() mock_file = self.mox.CreateMockAnything() mock_http_message = self.mox.CreateMockAnything() mock_contents = self.mox.CreateMockAnything() self.mox.StubOutWithMock(Updater, "get_tarball_url") self.mox.StubOutWithMock(urllib2, "urlopen") self.mox.StubOutWithMock(__builtin__, "open") self.mox.StubOutWithMock(os.path, "isdir") os.path.isdir(self.__update_dir).AndReturn(True) Updater.get_tarball_url(self.__repo).AndReturn(self.__tarball_url) urllib2.urlopen(self.__tarball_url).AndReturn(mock_resource) mock_resource.info().AndReturn(mock_http_message) mock_http_message.get("Content-Disposition").AndReturn(self.__content_string) __builtin__.open(self.__target_filename, "wb").AndReturn(mock_file) mock_file.__enter__().AndReturn(mock_file) mock_file.write(mock_resource.read().AndReturn(mock_contents)) mock_file.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() updater = Updater(repo = self.__repo, update_dir = self.__update_dir) self.assertEquals(self.__target_filename, updater.download_tarball())
def updater_init(self): print("\nGenerating script...") self.us = Updater(self.is_64bit) if self.model != "Unknown": self.us.check_device(self.model, self.ext_models) self.us.blank_line() self.us.ui_print("Updating from %s" % self.verify_info[1]) self.us.ui_print("to %s" % self.verify_info[2]) self.us.ui_print("It may take several minutes, please be patient.") self.us.ui_print(" ") self.us.blank_line() self.us.ui_print("Remount /system ...") self.us.add("[ $(is_mounted /system) == 1 ] || umount /system") self.us.mount("/system") self.us.add("[ -f /system/build.prop ] || {", end="\n") self.us.ui_print(" ", space_no=2) self.us.abort("Failed to mount /system!", space_no=2) self.us.add("}") self.us.blank_line() if self.pt_flag: self.us.ui_print("Remount /vendor ...") self.us.add("[ $(is_mounted /vendor) == 1 ] || umount /vendor") self.us.mount("/vendor") self.us.blank_line() self.us.ui_print("Verify Rom Version ...") self.us.add( "[ $(file_getprop /system/build.prop %s) == \"%s\" ] || {" % (self.verify_info[0], self.verify_info[1]), end="\n") self.us.ui_print(" ", space_no=2) self.us.abort("Failed! Versions Mismatch!", space_no=2) self.us.add("}")
class AP_UpdateScreen(Screen, ConfigListScreen): skin = '<screen name="AP_MainMenu" title="AirPlayer Settings" position="center,center" size="565,370">\n\t\t<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />\n\t\t<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />\n\t\t<widget name="info" position="5,50" size="555,45" halign="center" valign="center" font="Regular;20" />\n\t\t<ePixmap pixmap="skin_default/div-h.png" position="0,95" zPosition="1" size="565,2" />\n\t\t<widget name="changelog" position="5,100" size="555,250" halign="top" valign="left" font="Regular;20" />\n\t</screen>' def __init__(self, session, args = None): self.skin = AP_UpdateScreen.skin Screen.__init__(self, session) self._session = session self._hasChanged = False self.updater = Updater(session) self['key_red'] = StaticText(_('Start Update')) self['actions'] = ActionMap(['OkCancelActions', 'ColorActions'], {'red': self.keyStartUpdate, 'cancel': self.close}, -2) self['info'] = Label() self['info'].setText('AirPlayer Enigma2 Plugin\nyou are on Version: %s\n' % config.plugins.airplayer.version.value) self.onLayoutFinish.append(self.setCustomTitle) self['changelog'] = Label() self['changelog'].setText('searching for updates...\n') link = self.updater.checkForUpdate('', 0) if link != '' and link != 'up to date': self['changelog'].setText('Update Available:\n\n' + self.updater.getChangeLog()) else: self['changelog'].setText('no Updates available you are \nup to date\n') self.onLayoutFinish.append(self.setCustomTitle) def keyStartUpdate(self): self.updater.startUpdate() def _changed(self): self._hasChanged = True def setCustomTitle(self): self.setTitle(_('AirPlayer Updates'))
def test_download_tarball_download_directory_does_not_exist(self): """ if the folder in which the tarball is to be stored does not exist, it should be created. """ mock_resource = self.mox.CreateMockAnything() mock_file = self.mox.CreateMockAnything() mock_http_message = self.mox.CreateMockAnything() mock_contents = self.mox.CreateMockAnything() self.mox.StubOutWithMock(Updater, "get_tarball_url") self.mox.StubOutWithMock(urllib2, "urlopen") self.mox.StubOutWithMock(__builtin__, "open") self.mox.StubOutWithMock(os.path, "isdir") self.mox.StubOutWithMock(os, "mkdir") os.path.isdir(self.__update_dir).AndReturn(False) os.mkdir(self.__update_dir) Updater.get_tarball_url(self.__repo).AndReturn(self.__tarball_url) urllib2.urlopen(self.__tarball_url).AndReturn(mock_resource) mock_resource.info().AndReturn(mock_http_message) mock_http_message.get("Content-Disposition").AndReturn(self.__content_string) __builtin__.open(self.__target_filename, "wb").AndReturn(mock_file) mock_file.__enter__().AndReturn(mock_file) mock_file.write(mock_resource.read().AndReturn(mock_contents)) mock_file.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() updater = Updater(repo = self.__repo, update_dir = self.__update_dir) self.assertEquals(self.__target_filename, updater.download_tarball())
def __init__(self, download_path, url, title=u"自动更新", kill_process_name="MyClient.exe"): QDialog.__init__(self) self.setupUi(self) self.setWindowTitle(title) self.download_path = os.path.join(download_path, "update") if not os.path.exists(self.download_path): os.mkdir(self.download_path) self.download_files = [] self.updater = Updater(url) self.kill_process_name = kill_process_name self.total_progressbar.setValue(0) self.total_progressbar.setMaximum(100) self.progressbar.setValue(0) self.progressbar.setMaximum(100) self.btn.clicked.connect(self.check_update) self.update_progressbar_signal.connect(self.on_update_progressbar) self.finish_update_signal.connect(self.on_finish_update)
def __init__(self, session, args=None): self.skin = AP_UpdateScreen.skin Screen.__init__(self, session) self._session = session self._hasChanged = False self.updater = Updater(session) self['key_red'] = StaticText(_('Start Update')) self['actions'] = ActionMap(['OkCancelActions', 'ColorActions'], { 'red': self.keyStartUpdate, 'cancel': self.close }, -2) self['info'] = Label() self['info'].setText( 'AirPlayer Enigma2 Plugin\nyou are on Version: %s\n' % config.plugins.airplayer.version.value) self.onLayoutFinish.append(self.setCustomTitle) self['changelog'] = Label() self['changelog'].setText('searching for updates...\n') link = self.updater.checkForUpdate('', 0) if link != '' and link != 'up to date': self['changelog'].setText('Update Available:\n\n' + self.updater.getChangeLog()) else: self['changelog'].setText( 'no Updates available you are \nup to date\n') self.onLayoutFinish.append(self.setCustomTitle)
def brute_force(self, current_season): actual_scores = self.get_actual_scores(current_season) updater = Updater(current_season) updater.update_all(request_new=True) progress = 0 n = 25 best_accuracy = -1 best = None for form_diff_multiplier in np.linspace(0, 5, n): for home_advantage_multiplier in np.linspace(0, 30, n): predictor = Predictor(current_season, home_advantage_multiplier, form_diff_multiplier) accuracy, results_accuracy = self.score_predictions( predictor, actual_scores, updater.json_data, updater.data.team_names, updater.data.form, updater.data.home_advantages) if accuracy > best_accuracy: best_accuracy = accuracy best = ('form:', form_diff_multiplier, 'home advantage:', home_advantage_multiplier) print('New best found:', best) print(' Accuracy:', accuracy) print(' Results accuracy:', results_accuracy) print(round((progress / (n**2)) * 100, 2), '%') progress += 1 print('FINAL BEST:', best) print(' Accuracy:', best_accuracy)
def test_set_results(self): updater = Updater("localhost", 1337, TESTS) results = updater.set_results(TestStatus.RUNTIME_ERROR) self.assertEqual(len(results), 3, "There must be exactly three results") for i in range(1, 3): self.assertEqual(results[i]["score"], 0) self.assertEqual(results[i]["status"], TestStatus.RUNTIME_ERROR.name)
def runUpdate(self, projects=None, force=False): for pro in self.config["projects"]: try: if pro["hold"] and not force: continue except KeyError: pass try: pro["currentVersion"] except KeyError: pro.update({"currentVersion": ""}) if projects == None or pro["name"] in projects: try: pro_proxy = pro["proxy"] except KeyError: pro_proxy = self.config["requests"]["proxy"] obj = Updater(pro["name"], pro["path"], pro_proxy) new_version = obj.run(force, pro["currentVersion"]) if new_version: pro_index = self.config["projects"].index(pro) self.config["projects"][pro_index].update( {"currentVersion": new_version}) self.config.dumpconfig() try: for line in pro["post-cmds"]: line = line.replace("%PATH", '"%s"' % pro["path"]) line = line.replace("%NAME", pro["name"]) os.system(line) except KeyError: pass
def testIncrementRemoteSetup(self): file = '/setup.py' pattern = "\s+version='(\d+\.\d+\.\d+)'," u = Updater(gittoken=self.gh_token, config=self.config, branch='develop', file=file, patterns=pattern, repo=self.repo) setup = u.get_file_from_git() version = u.get_version_from_string(pattern, setup) incremented = u.increment_version(version) newsetup = u.increment_version_in_content(setup) u.update_file(newsetup) brand_new_setup = u.get_file_from_git() newversion = u.get_version_from_string(pattern, brand_new_setup) self.assertEqual(newversion, incremented, "Version from Git matches expected value")
def test_mergeListToEntries(self): p, e = TestHelper.createPlan(self, "R") r = p.step_list entries = ParseText.planText_toEntries( load.loadText(join("material/test/test_update1.txt"))) u = Updater(r, entries) new_list = u.mergeListToEntries() self.assertEqual(r[0], new_list[0])
def main(): args = get_args() if not args.silent: save_path = os.path.abspath(script_path + args.save_path) if not os.path.exists(save_path):os.mkdir(save_path) save_path = os.path.abspath(save_path + "/" + args.name) if not os.path.exists(save_path):os.mkdir(save_path) preview_path = os.path.abspath(save_path + "/preview") if not os.path.exists(preview_path):os.mkdir(preview_path) dataset = Dataset(args) if args.max_epoch is not None: epoch_iter = dataset.train_data_len // args.batch_size if dataset.train_data_len % args.batch_size != 0:epoch_iter += 1 args.max_iter = args.max_epoch * epoch_iter progress = print_progress(args.max_iter, args.batch_size, dataset.train_data_len) if args.gpu_num != 0: cuda.get_device_from_array(xp.array([i for i in range(args.gpu_num)])).use() model = make_model(args, dataset) netG_opt = make_optimizer(model.netG_0, args.adam_alpha, args.adam_beta1, args.adam_beta2) netD_opt = make_optimizer(model.netD_0, args.adam_alpha, args.adam_beta1, args.adam_beta2) updater = Updater(model, netG_opt, netD_opt, args.n_dis, args.batch_size, args.gpu_num, args.KL_loss_iter, args.KL_loss_conf, args.epoch_decay, args.max_iter) print("==========================================") print("Info:start train") start = time.time() for i in range(args.max_iter): data = toGPU(dataset.next(), args.gpu_num) updater.update(data, dataset.now_epoch) if dataset.now_iter % args.display_interval == 0: elapsed = time.time() - start progress(elapsed, dataset.get_state) np.save(save_path + "/loss_hist.npy", updater.loss_hist) start = time.time() if dataset.now_iter % args.snapshot_interval == 0 and not args.silent: data = dataset.sampling(args.sample_size) sample = sample_generate(model.netG_0, data, args.noise_dim, args.noise_dist) Image.fromarray(sample).save(preview_path + f"/image_{dataset.now_iter:08d}.png") serializers.save_npz(save_path + f"/Generator_{dataset.now_iter:08d}.npz",model.netG_0) serializers.save_npz(save_path + f"/Discriminator_{dataset.now_iter:08d}.npz",model.netD_0) if not args.silent: data = dataset.sampling(args.sample_size) sample = sample_generate(model.netG_0, data, args.noise_dim, args.noise_dist) Image.fromarray(sample).save(preview_path + f"/image_{dataset.now_iter:08d}.png") serializers.save_npz(save_path + f"/Generator_{dataset.now_iter:08d}.npz",model.netG_0) serializers.save_npz(save_path + f"/Discriminator_{dataset.now_iter:08d}.npz",model.netD_0) print("\n\n\n\n==========================================") print("Info:finish train")
def test_doesEntryListContain(self): p, e = TestHelper.createPlan(self, "R") r = p.step_list entries = ParseText.planText_toEntries( load.loadText(join("material/test/test_update1.txt"))) u = Updater(r, entries) contains, index = u.doesEntryListContain(r[0]) self.assertEqual(index, 0)
def __init__(self, words, steps=[2,3,4]): self.mus = list() for step in steps: mu = Updater(step) mu.main_update(words) self.mus.append(mu) self.words = words self.max_length = max(map(len, self.words)) self.min_length = min(map(len, self.words))
async def get(self): print(f'GET {self.request.uri}, version 0.1') self.set_status(200, "data recv") u = Updater( self.get_query_argument("requestType")) # initialize updater args = self.get_arguments("ign") resp = u.run(args) print( f'**************RESPONSE {resp}*************************************************************' ) self.write(resp)
def test_download_update(self): """ download_update should default to calling download_tarball. This allows for nifty override, e.g. pullin with Git by subtype. """ self.mox.StubOutWithMock(Updater, "download_tarball") Updater.download_tarball() self.mox.ReplayAll() updater = Updater(self.__repo) updater.download_update()
def updater__liquidate_assets(self, args): from environment import Environment from updater import Updater # # INITIALIZATION # environment_directory = str(args[1]) identifier = str(args[2]) log_directory = str(args[3]) # Configure logging parameters so we get output while the program runs logging.basicConfig( format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %H:%M:%S", filename=log_directory + identifier + ".log", level=logging.INFO, ) logging.info( "START logging for test updater__liquidate_assets in run: %s", environment_directory + identifier + ".xml" ) # # TEST CODE # environment = Environment(environment_directory, identifier) # create a test environment with standardised banks print environment.banks[0] # print environment.banks[1] # print environment.banks[2] updater = Updater(environment) environment.banks[1].active = -1 environment.banks[2].active = -1 # # execute the update code # updater.do_update_phase1(environment, 0, "debug") updater.do_update_phase2(environment, 0, "info") print environment.banks[0] # print environment.banks[1] # print environment.banks[2] # # MEASUREMENT AND LOGGING # logging.info( "FINISHED logging for test updater__liquidate_assets in run: %s \n", environment_directory + identifier + ".xml", )
def run(self): if self.update: from updater import Updater updater = Updater(addrs=slaveUpdateAddrs) updater.start() listen = socket.socket() listen.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listen.bind((socket.gethostname(), masterPort)) listen.listen(20) while True: t, addr = listen.accept() threading.Thread(target=self.on_new_query, args=(t, addr)).start()
def main(args): print(logo, flush=True) if not args.target: parser.print_help() return Updater.run() if os.path.isfile(args.target): scan_list(args) else: scan(args)
def testGetSetup(self): test_setup = fixtures.setup() file = '/setup.py' u = Updater(gittoken=self.gh_token, config=self.config, branch='master', file=file, repo=self.repo) setup = u.get_file_from_git() self.assertEqual(setup, test_setup, 'Setup from Git equals test setup')
def testGetMetadata(self): test_metadata = fixtures.metadata() file = '/metadata.rb' u = Updater(gittoken=self.gh_token, config=self.config, branch='master', file=file, repo=self.repo) metadata = u.get_file_from_git() self.assertEqual(metadata, test_metadata, 'Metadata from Git equals test metadata')
def load_info(self): """ Get deputy's information if the file is json formatted, else update the file and return the information. :return: """ if self.check_format(): deputies = Updater().get_list() self.info = deputies[self.json_index] else: u = Updater() u.update() return self.load_info()
def __init__(self, conf="config.json"): os.chdir(sys.path[0]) self.configpath = conf if not os.path.exists(self.configpath): configdir_upper = os.getenv("APPDATA") if configdir_upper == None: configdir_upper = os.path.join(os.getenv("HOME"), ".config") configdir = os.path.join(configdir_upper, "updater-rpc") try: os.makedirs(configdir) except FileExistsError: pass self.configpath = os.path.join(configdir, "config.json") self.config = JsonConfig(self.configpath) if "projects" in self.config and type(self.config["projects"]) == dict: new_projects = [] for pro in self.config["projects"]: new_pro = {"name": pro, "path": self.config["projects"][pro]} new_projects.append(new_pro) self.config.update({"projects": new_projects}) if "proxy" in self.config: self.config["requests"]["proxy"] = self.config["proxy"] self.config.pop("proxy") self.config.set_defaults(self.default) self.config.dumpconfig() Updater.setBins(self.config["binarys"]["aria2c"], self.config["binarys"]["7z"]) Updater.setAria2Rpc(self.config["aria2"]["ip"], self.config["aria2"]["rpc-listen-port"], self.config["aria2"]["rpc-secret"]) Updater.setDefaults(self.config["defaults"]) Updater.setRequestsArgs(self.config["requests"]["retry"], self.config["requests"]["timeout"]) if self.config["aria2"]["ip"] == "127.0.0.1" or self.config["aria2"][ "ip"] == "localhost" or self.config["aria2"]["ip"] == "127.1": pass else: try: Updater.setRemoteAria2(self.config["aria2"]["remote-dir"], self.config["aria2"]["local-dir"]) except KeyError: raise KeyError( "you must set remote-dir and local-dir to use remote aria2" )
def updater__updater1(self, args): from environment import Environment from updater import Updater # # INITIALIZATION # environment_directory = str(args[1]) identifier = str(args[2]) log_directory = str(args[3]) # Configure logging parameters so we get output while the program runs logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S', filename=log_directory + identifier + ".log", level=logging.INFO) logging.info('START logging for test updater__updater1 in run: %s', environment_directory + identifier + ".xml") # # TEST CODE # environment = Environment() environment.initialize(environment_directory, identifier) # create a test environment with standardised banks print environment.banks[0] print environment.banks[1] print environment.banks[2] updater = Updater(environment) # # execute the update code # updater.do_update_phase1(environment.get_state(0), environment.network, environment.network.contracts.nodes(), 0, "info") print environment.banks[0] print environment.banks[1] print environment.banks[2] # # MEASUREMENT AND LOGGING # logging.info( 'FINISHED logging for test updater__updater1 in run: %s \n', environment_directory + identifier + ".xml")
def testGetPom(self): test_pom = fixtures.pom() file = '/pom.xml' pattern = '' u = Updater(gittoken=self.gh_token, config=self.config, branch='master', file=file, patterns=pattern, repo=self.repo) setup = u.get_file_from_git() self.assertEqual(setup, test_pom, 'Pom from Git equals test pom')
def run(self): try: if self._workload is None: print 'Assign workload before invoking run method - cannot proceed' self._logger.info( 'No workload assigned currently, please check your script') raise ValueError(expected_value='set of pipelines', actual_value=None) else: populator = Populator(workload=self._workload, pending_queue=self._pending_queue) populator.start_population() helper = Helper(pending_queue=self._pending_queue, executed_queue=self._executed_queue) helper.start_helper() updater = Updater(workload=self._workload, executed_queue=self._executed_queue) updater.start_update() pipe_count = len(self._workload) while pipe_count > 0: time.sleep(1) for pipe in self._workload: if pipe.completed: pipe_count -= 1 # Terminate threads self._logger.info('Closing populator thread') populator.terminate() self._logger.info('Populator thread closed') self._logger.info('Closing updater thread') updater.terminate() self._logger.info('Updater thread closed') self._logger.info('Closing helper thread') helper.terminate() self._logger.info('Helper thread closed') except Exception, ex: self._logger.error('Fatal error while running appmanager') # Terminate threads self._logger.info('Closing populator thread') populator.terminate() self._logger.info('Populator thread closed') self._logger.info('Closing updater thread') updater.terminate() self._logger.info('Updater thread closed') self._logger.info('Closing helper thread') helper.terminate() self._logger.info('Helper thread closed') raise UnknownError(text=ex)
def VideoMainMenu(): if not Dict['auth']: return BadAuthMessage() oc = ObjectContainer(title2=TITLE, no_cache=True) Updater(PREFIX_V + '/update', oc) oc.add( DirectoryObject(key=Callback(VideoListChannels, uid=Prefs['username']), title=u'%s' % L('My channels'))) oc.add( DirectoryObject(key=Callback(VideoListGroups, uid=Prefs['username']), title=u'%s' % L('My groups'))) oc.add( DirectoryObject(key=Callback(VideoListFriends, uid=Prefs['username']), title=u'%s' % L('My friends'))) oc.add( DirectoryObject(key=Callback(VideoListChannels), title=u'%s' % L('All channels'))) oc.add( DirectoryObject(key=Callback(VideoCatalogueGroups), title=u'%s' % L('Catalogue'))) oc.add( InputDirectoryObject(key=Callback(VideoSearch, title=u'%s' % L('Search Video')), title=u'%s' % L('Search'), prompt=u'%s' % L('Search Video'))) return AddVideoAlbums(oc, Prefs['username'])
def exec_helper(self, add_info, path_source, tests, run_config, expected_results): updater = Updater("fake/endpoint", 42, []) updater_results = [] add_info.side_effect = lambda result: updater_results.append(result) # Configure fake paths to the solution and its executable and compile it language = common.get_language_by_source_name(path_source) path_executable = os.path.join(config.PATH_SANDBOX, "solution.{}".format(common.get_executable_extension(language))) compilation_status = Compiler.compile( language=language, path_source=path_source, path_executable=path_executable ) self.assertEqual(compilation_status, "") run_config.executable_path = path_executable try: for test in tests: execute_problem(updater=updater, submit_id=42, result_id=0, test=test, run_config=run_config) except: self.fail("Failed during execution of tests.") self.assertEqual(add_info.call_count, len(tests) * 2) for result in updater_results: if result["status"] != TestStatus.TESTING.name: # print(result) found = False for i in range(len(expected_results)): if result["status"] == expected_results[i].name: found = True del expected_results[i] break self.assertTrue(found, msg="Status '{}' not among expected results.".format(result["status"]))
def MainMenu(): """Setup Main Menu, Includes Updater""" oc = ObjectContainer(title2=TITLE, no_cache=True) mhref = '/movie' Updater(PREFIX + '/updater', oc) oc.add( DirectoryObject(key=Callback(DirectoryList, title='Most Recent', href='%s?sort=published' % mhref, page=1), title='Most Recent', thumb=R(ICON_RECENT))) oc.add( DirectoryObject(key=Callback(SortList, title='Most Viewed', href=mhref), title='Most Viewed', thumb=R(ICON_VIEWS))) oc.add( DirectoryObject(key=Callback(SortList, title='Top Rated', href=mhref), title='Top Rated', thumb=R(ICON_LIKE))) oc.add( DirectoryObject(key=Callback(CategoryList), title='Categories', thumb=R(ICON_CAT))) oc.add( DirectoryObject(key=Callback(SortListC, title='Pornstars', href='/pornstar'), title='Pornstars', thumb=R(ICON_STAR))) oc.add( DirectoryObject(key=Callback(MyBookmarks), title='My Bookmarks', thumb=R(ICON_BM))) if Client.Product in DumbPrefs.clients: DumbPrefs(PREFIX, oc, title='Preferences', thumb=R('icon-prefs.png')) else: oc.add(PrefsObject(title='Preferences', thumb=R('icon-prefs.png'))) if Client.Product in DumbKeyboard.clients: DumbKeyboard(PREFIX, oc, Search, dktitle='Search', dkthumb=R('icon-search.png')) else: oc.add( InputDirectoryObject(key=Callback(Search), title='Search', summary='Search JavHiHi', prompt='Search for...', thumb=R('icon-search.png'))) return oc
def VideoMainMenu(): oc = ObjectContainer(title1=L('Title')) oc.add( DirectoryObject(key=Callback(SubMenu, title='Alle TV-Sendungen', url='pt-tv'), title='Alle TV-Sendungen')) oc.add( DirectoryObject(key=Callback(SubMenu, title='SRF 1', url='pr-srf-1'), title='SRF 1')) oc.add( DirectoryObject(key=Callback(SubMenu, title='SRF zwei', url='pr-srf-2'), title='SRF zwei')) oc.add( DirectoryObject(key=Callback(SubMenu, title='SRF info', url='pr-srf-info'), title='SRF info')) # Add Preferences to main menu. oc.add(PrefsObject(title=L('Preferences'))) # Show update item, if available try: Updater(PREFIX + '/updater', oc) except Exception as e: Log.Error(e) return oc
def VideoMainMenu(): if not Dict['token']: return BadAuthMessage() oc = ObjectContainer(title2=TITLE, no_cache=True) Updater(PREFIX_V + '/update', oc) oc.add( DirectoryObject(key=Callback(VideoListGroups, uid=Dict['user_id']), title=u'%s' % L('My groups'))) oc.add( DirectoryObject(key=Callback(VideoListFriends, uid=Dict['user_id']), title=u'%s' % L('My friends'))) oc.add( DirectoryObject(key=Callback(VideoListSubscriptions, uid=Dict['user_id']), title=u'%s' % L('My subscriptions'))) oc.add( InputDirectoryObject(key=Callback(Search, search_type='video', title=u'%s' % L('Search Video')), title=u'%s' % L('Search'), prompt=u'%s' % L('Search Video'))) return AddVideoAlbums(oc, Dict['user_id'])
def MainMenu(): """ Setup Main menu Free Cams', Free Cams by Age', Free Cams by Region, Free Cams by Status """ oc = ObjectContainer(title2=TITLE, art=R(ART), no_cache=True) Updater(PREFIX + '/updater', oc) for t in CAT_LIST: oc.add(DirectoryObject(key=Callback(SubList, title=t), title=t)) if Client.Product in DumbKeyboard.clients: DumbKeyboard(PREFIX, oc, Search, dktitle='Search', dkthumb=R('icon-search.png')) DumbKeyboard(PREFIX, oc, Hashtag, dktitle='#Hashtag', dkthumb=R('icon-search.png')) else: oc.add(InputDirectoryObject( key=Callback(Search), title='Search', summary='Search Chaturbate', prompt='Search for...', thumb=R('icon-search.png') )) oc.add(InputDirectoryObject( key=Callback(Hashtag), title='#Hashtag', summary='Hashtag Chaturbate', prompt='Search hashtag...', thumb=R('icon-search.png') )) return oc
def get_ui(self, cfg, id=None): ui = self.app.inflate('core:news') feed = Updater.get().get_feed() if feed is not None: for i in feed[:3]: ui.append('list', UI.CustomHTML(html='<li>%s</li>'%i['text'])) return ui
def MusicMainMenu(): if not Dict['auth']: return BadAuthMessage() oc = ObjectContainer(title2=TITLE, no_cache=True) Updater(PREFIX_M + '/update', oc) oc.add( DirectoryObject(key=Callback(MusicListGroups, uid=Prefs['username']), title=u'%s' % L('My groups'))) oc.add( DirectoryObject(key=Callback(MusicListFriends, uid=Prefs['username']), title=u'%s' % L('My friends'))) oc.add( DirectoryObject(key=Callback(MusicList, uid=Prefs['username'], title=L('My music')), title=u'%s' % L('My music'))) oc.add( DirectoryObject(key=Callback(MusicRecomendations, uid=Prefs['username'], title=L('Recomendations')), title=u'%s' % L('Recomendations'))) oc.add( DirectoryObject(key=Callback(MusicCollections), title=u'%s' % L('Collections'))) oc.add( InputDirectoryObject(key=Callback(MusicSearch, title=u'%s' % L('Search Music')), title=u'%s' % L('Search'), prompt=u'%s' % L('Search Music'))) return oc
def test_download_tarball_invalid_url(self): """ if urlopen raises URLError, None shall be returned from download_tarball. """ self.mox.StubOutWithMock(Updater, "get_tarball_url") self.mox.StubOutWithMock(urllib2, "urlopen") self.mox.StubOutWithMock(os.path, "isdir") os.path.isdir(self.__update_dir).AndReturn(True) Updater.get_tarball_url(self.__repo).AndReturn(self.__tarball_url) urllib2.urlopen(self.__tarball_url).AndRaise(urllib2.URLError("Nah-ah")) self.mox.ReplayAll() updater = Updater(repo = self.__repo, update_dir = self.__update_dir) self.assertEquals(None, updater.download_tarball())
def test_download_tarball_no_handler(self): """ if urlopen returns None (i.e. no handler), None shall be returned from download_tarball. """ self.mox.StubOutWithMock(Updater, "get_tarball_url") self.mox.StubOutWithMock(urllib2, "urlopen") self.mox.StubOutWithMock(os.path, "isdir") os.path.isdir(self.__update_dir).AndReturn(True) Updater.get_tarball_url(self.__repo).AndReturn(self.__tarball_url) urllib2.urlopen(self.__tarball_url).AndReturn(None) self.mox.ReplayAll() updater = Updater(repo = self.__repo, update_dir = self.__update_dir) self.assertEquals(None, updater.download_tarball())
def __init__(self, game): self.game = game self.floaters = Updater(self) self.ships = [] self.planets = [] self.specialOperations = [] self.onScreen = [] self.bg = BG(self.game) # the background layer self.playMusic(False)
class Runner(object): # from environment import Environment # # VARIABLES # # # METHODS # #------------------------------------------------------------------------- # __init__ #------------------------------------------------------------------------- def __init__(self): pass #------------------------------------------------------------------------- #------------------------------------------------------------------------- # initialize() #------------------------------------------------------------------------- def initialize(self, environment): self.environment = environment self.updater = Updater(self.environment) self.shocker = Shock() #------------------------------------------------------------------------- #------------------------------------------------------------------------- # do_run #------------------------------------------------------------------------- def do_run(self, measurement, debug): # loop over all time steps and do the updating for i in range(self.environment.parameters.numSweeps): # the update step self.updater.do_update(self.environment, i, debug) # check if there is a shock at the current time step if (int(self.environment.get_state(i).shockType) != 0): self.shocker.do_shock(self.environment, int(i)) self.environment.get_state(i).shockType = 0 # do the measurement measurement.do_measurement(self.environment.banks)
def updater__updater2(self, args): from environment import Environment from updater import Updater # # INITIALIZATION # environment_directory = str(args[1]) identifier = str(args[2]) log_directory = str(args[3]) # Configure logging parameters so we get output while the program runs logging.basicConfig( format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %H:%M:%S", filename=log_directory + identifier + ".log", level=logging.INFO, ) logging.info("START logging for test updater__updater2 in run: %s", environment_directory + identifier + ".xml") # # TEST CODE # environment = Environment(environment_directory, identifier) # create a test environment with standardised banks environment.banks[0].change_deposits(1.0) environment.banks[1].change_deposits(-1.0) updater = Updater(environment) # # execute the update code # updater.do_update( environment.get_state(0), environment.network, environment.network.contracts.nodes(), 0, "info" ) # # MEASUREMENT AND LOGGING # logging.info( "FINISHED logging for test updater__updater2 in run: %s \n", environment_directory + identifier + ".xml" )
class TestUpdater(unittest.TestCase): def setUp(self): self.updater = Updater("http://localhost/check.php") def test_check(self): self.updater.check() @staticmethod def next_file(remote_file): print "download file %s" % remote_file @staticmethod def download_progress(read_size, total_size): if total_size == 0: print "read size %d" % read_size return percent = float(read_size) / total_size print "read size %d of %d (%0.2f)" % (read_size, total_size, round(percent * 100, 2)) def test_update(self): self.updater.check() cur_dir = os.path.dirname(os.path.abspath(__file__)) print cur_dir self.updater.update(cur_dir, self.next_file, self.download_progress)
def __init__(self, session, args = None): self.skin = AP_UpdateScreen.skin Screen.__init__(self, session) self._session = session self._hasChanged = False self.updater = Updater(session) self['key_red'] = StaticText(_('Start Update')) self['actions'] = ActionMap(['OkCancelActions', 'ColorActions'], {'red': self.keyStartUpdate, 'cancel': self.close}, -2) self['info'] = Label() self['info'].setText('AirPlayer Enigma2 Plugin\nyou are on Version: %s\n' % config.plugins.airplayer.version.value) self.onLayoutFinish.append(self.setCustomTitle) self['changelog'] = Label() self['changelog'].setText('searching for updates...\n') link = self.updater.checkForUpdate('', 0) if link != '' and link != 'up to date': self['changelog'].setText('Update Available:\n\n' + self.updater.getChangeLog()) else: self['changelog'].setText('no Updates available you are \nup to date\n') self.onLayoutFinish.append(self.setCustomTitle)
def __init__(self, data): # Sleep for a very short while so werkzeug can print its log BEFORE we start printing from here sleep(0.01) # Submit information self.id = data["id"] self.source = data["source"] self.language = data["language"] self.time_limit = data["timeLimit"] self.memory_limit = data["memoryLimit"] * 1048576 # Given in MiB, convert to bytes # Front-end endpoint self.update_url = data["updateEndpoint"] # List of tests and endpoint where to download them from self.tests = data["tests"] self.tests_url = data["testsEndpoint"] # If a task with checker, there should also be an endpoint where to download it from self.checker = data["checker"] if ("checker" in data and data["checker"] != "") else None self.checker_url = data["checkerEndpoint"] if "checkerEndpoint" in data else None # If a game, there should also be a tester and a list of matches (opponents' names and solutions) self.tester = data["tester"] if ("tester" in data and data["tester"] != "") else None self.tester_url = data["testerEndpoint"] if "testerEndpoint" in data else None self.matches = data["matches"] if "matches" in data else None # Whether to use relative or absolute floating point comparison self.floats = data["floats"] # Path to sandbox and files inside self.path_sandbox = config.PATH_SANDBOX + "submit_{:06d}/".format(self.id) self.path_source = self.path_sandbox + config.SOURCE_NAME + common.get_source_extension(self.language) self.path_executable = self.path_sandbox + config.EXECUTABLE_NAME + common.get_executable_extension(self.language) # Frontend server update logic self.updater = Updater(self.update_url, self.id, self.tests) # Configure logger self.logger = logging.getLogger("evltr")
def update(): try: updater = Updater(manifest_url=MANIFEST_URL, base_dir=os.path.dirname(os.path.abspath(__file__))) updater.update() except HTTPError as e: print("Unable to download manifest file from " + MANIFEST_URL + " - skipping update. Error: " + str(e))
class QtUI(QDialog, Ui_Dialog): update_progressbar_signal = pyqtSignal(int) finish_update_signal = pyqtSignal(list) def __init__(self, download_path, url, title=u"自动更新", kill_process_name="MyClient.exe"): QDialog.__init__(self) self.setupUi(self) self.setWindowTitle(title) self.download_path = os.path.join(download_path, "update") if not os.path.exists(self.download_path): os.mkdir(self.download_path) self.download_files = [] self.updater = Updater(url) self.kill_process_name = kill_process_name self.total_progressbar.setValue(0) self.total_progressbar.setMaximum(100) self.progressbar.setValue(0) self.progressbar.setMaximum(100) self.btn.clicked.connect(self.check_update) self.update_progressbar_signal.connect(self.on_update_progressbar) self.finish_update_signal.connect(self.on_finish_update) def next_file(self, remote_file): self.info.appendPlainText(u"开始下载%s\n" % remote_file) self.total_progressbar.setValue(self.total_progressbar.value() + 1) def on_update_progressbar(self, value): self.progressbar.setValue(value) def on_finish_update(self, download_files): self.download_files = download_files self.info.appendPlainText(u"更新完毕\n") if QMessageBox.question(self, u"是否重启", u"更新完毕是否重启应用?", QMessageBox.Yes, QMessageBox.No) == QMessageBox.Yes: self.kill_process() self.copy_to_new() self.info.appendPlainText(u"应用新文件") sys.exit(0) def notify_process(self, read_size, total_size): self.update_progressbar_signal.emit(round(float(read_size) / total_size * 100)) def download(self): download_files = self.updater.update(self.download_path, self.next_file, self.notify_process) self.finish_update_signal.emit(download_files) def check_update(self): self.info.appendPlainText(u"检查更新..\n") remote_file_list = self.updater.check() if len(remote_file_list) == 0: self.info.appendPlainText(u"所有文件都是最新的,您不需要更新\n") return self.info.appendPlainText(u"需要更新文件的列表\n") for remote_file in remote_file_list: self.info.appendPlainText(str(remote_file) + "\n") self.total_progressbar.setMaximum(len(remote_file_list)) if QMessageBox.question(self, u"是否更新", u"发现新版本是否更新", QMessageBox.Yes, QMessageBox.No) == QMessageBox.Yes: self.btn.setEnabled(False) Thread(target=self.download).start() def kill_process(self): if os.name == "nt": startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW subprocess.call("taskkill /F /IM " + self.kill_process_name, startupinfo=startupinfo, shell=True) def copy_to_new(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) for download_file in self.download_files: shutil.copy(download_file, cur_dir) shutil.rmtree(self.download_path)
def runupdates(self): updater = Updater() updater.runrequest() self.result = updater.getResult()
def setUp(self): self.updater = Updater("http://localhost/check.php")
class SolarSystem: """A SolarSystem holds ships and other floaters, music, the background. It calls update() and draw() on its members and handles collisions..""" boundries = 1e8 drawEdgeWarning = False calmMusic = "res/sound/music simple.ogg" alertMusic = "res/sound/music alert.ogg" musicDuration = 98716 musicPos = 0 sun = None def __init__(self, game): self.game = game self.floaters = Updater(self) self.ships = [] self.planets = [] self.specialOperations = [] self.onScreen = [] self.bg = BG(self.game) # the background layer self.playMusic(False) def playMusic(self, alert = False): self.musicPos = ((self.musicPos + pygame.mixer.music.get_pos()) % self.musicDuration) pygame.mixer.music.stop() if alert: pygame.mixer.music.load(self.alertMusic) else: pygame.mixer.music.load(self.calmMusic) pygame.mixer.music.play(-1, self.musicPos / 1000.) pygame.mixer.music.set_volume(.15) def update(self, dt): """Runs the game.""" #note that self.floaters is an Updater, which provides optimizations. #update floaters: screen = Rect((self.game.player.x - self.game.width / 2, self.game.player.y - self.game.height / 2), (self.game.width, self.game.height)) self.floaters.update(dt, screen) #check collisions: collisions = self.floaters.collisions() for f1,f2 in collisions: collide(f1,f2) #keep ships inside system boundaries for now: if self.drawEdgeWarning: self.drawEdgeWarning -= 1. * self.game.dt if self.drawEdgeWarning <=0: self.drawEdgeWarning = False for floater in self.floaters.frame: if (floater.x ** 2 + floater .y ** 2) > self.boundries ** 2: if isinstance(floater, Ship): floater.x -= floater.dx / 4 floater.y -= floater.dy / 4 floater.dx, floater.dy = 0, 0 if floater == self.game.player: self.drawEdgeWarning = 1. else: floater.kill() #list floaters that are on screen now: self.onScreen = [] offset = (self.game.player.x - self.game.width / 2, self.game.player.y - self.game.height / 2) for floater in self.floaters.sprites(): r = floater.radius if (floater.x + r > offset[0] and floater.x - r < offset[0] + self.game.width and floater.y + r > offset[1] and floater.y - r < offset[1] + self.game.height): self.onScreen.append(floater) def draw(self, surface, offset): self.bg.draw(surface, self.game.player) for floater in self.onScreen: floater.draw(surface, offset) def add(self, floater): """adds a floater to this game.""" floater.system = self self.floaters.add(floater) if isinstance(floater, Ship): self.ships.append(floater) if isinstance(floater, Planet): self.planets.append(floater) def remove(self, floater): self.floaters.remove(floater) if floater in self.planets: self.planets.remove(floater) if floater in self.ships: self.ships.remove(floater) def empty(self): self.ships.empty() self.floaters.empty() self.planets.empty()
from updater import Updater import time updater_obj = Updater() while True: updater_obj.update() time.sleep(5)
def assertUpdate(self, file, result): # initialize debug level logging to console logging.basicConfig(level=logging.DEBUG) self.assertEqual(result, Updater.updateFile(Updater(), file, False))
from updater import Updater import pprint import time from misc import about_times from plotter import plot_time_per_rule try: with open('config.json') as config_file: config = json.load(config_file) except FileNotFoundError: print("Can not found config file") exit() generator = Generator(config) sender = Sender(config) updater = Updater(config) updater.update_switches() updater.update_ips() pp = pprint.PrettyPrinter(indent=4) installed_counter = 0 deleted_counter = 0 number_of_rules = 0 from_file = config["rule_generator"]["parameters"]["from_file"] if from_file: log_name = config["rule_generator"]["parameters"]["file_name"] try: rule_list = generator.create_from_file(log_name) number_of_rules = len(rule_list)
class Evaluator: def __init__(self, data): # Sleep for a very short while so werkzeug can print its log BEFORE we start printing from here sleep(0.01) # Submit information self.id = data["id"] self.source = data["source"] self.language = data["language"] self.time_limit = data["timeLimit"] self.memory_limit = data["memoryLimit"] * 1048576 # Given in MiB, convert to bytes # Front-end endpoint self.update_url = data["updateEndpoint"] # List of tests and endpoint where to download them from self.tests = data["tests"] self.tests_url = data["testsEndpoint"] # If a task with checker, there should also be an endpoint where to download it from self.checker = data["checker"] if ("checker" in data and data["checker"] != "") else None self.checker_url = data["checkerEndpoint"] if "checkerEndpoint" in data else None # If a game, there should also be a tester and a list of matches (opponents' names and solutions) self.tester = data["tester"] if ("tester" in data and data["tester"] != "") else None self.tester_url = data["testerEndpoint"] if "testerEndpoint" in data else None self.matches = data["matches"] if "matches" in data else None # Whether to use relative or absolute floating point comparison self.floats = data["floats"] # Path to sandbox and files inside self.path_sandbox = config.PATH_SANDBOX + "submit_{:06d}/".format(self.id) self.path_source = self.path_sandbox + config.SOURCE_NAME + common.get_source_extension(self.language) self.path_executable = self.path_sandbox + config.EXECUTABLE_NAME + common.get_executable_extension(self.language) # Frontend server update logic self.updater = Updater(self.update_url, self.id, self.tests) # Configure logger self.logger = logging.getLogger("evltr") def __del__(self): # Clean up remaining files self.cleanup() def evaluate(self): # Send an update that preparation has been started for executing this submission self.logger.info("[Submission {}] Evaluating submission {}".format(self.id, self.id)) self.updater.add_info("", None, TestStatus.PREPARING) # Create sandbox directory self.logger.info("[Submission {}] >> creating sandbox directory...".format(self.id)) if not self.create_sandbox_dir(): self.updater.add_info("Error while creating sandbox directory!", None, TestStatus.INTERNAL_ERROR) return # Download the test files (if not downloaded already) self.logger.info("[Submission {}] >> downloading test files...".format(self.id)) if not self.download_tests(): self.updater.add_info("Error while downloading test files!", None, TestStatus.INTERNAL_ERROR) return # Download and compile the checker (if not already available) if self.checker is not None and not path.exists(config.PATH_CHECKERS + self.checker): self.logger.info("[Submission {}] >> updating checker file...".format(self.id)) if not self.download_and_compile_utility_file(config.PATH_CHECKERS, self.checker, self.checker_url): self.updater.add_info("Error while setting up checker!", None, TestStatus.INTERNAL_ERROR) return # Download and compile the tester (if not already available) if self.tester is not None and not path.exists(config.PATH_TESTERS + self.tester): self.logger.info("[Submission {}] >> updating tester file...".format(self.id)) if not self.download_and_compile_utility_file(config.PATH_TESTERS, self.tester, self.tester_url): self.updater.add_info("Error while setting up tester!", None, TestStatus.INTERNAL_ERROR) return # Save the source to a file so we can compile it later self.logger.info("[Submission {}] >> writing source code to file...".format(self.id)) if not self.write_source(self.source, self.path_source): self.updater.add_info("Error while writing the source to a file!", None, TestStatus.INTERNAL_ERROR) return # Send an update that the compilation has been started for this submission self.updater.add_info("", None, TestStatus.COMPILING) # Compile self.logger.info("[Submission {}] >> compiling solution...".format(self.id)) compilation_status = self.compile(self.language, self.path_source, self.path_executable) if compilation_status != "": self.logger.info("[Submission {}] Compilation error! Aborting...".format(self.id)) self.updater.add_info(compilation_status, None, TestStatus.COMPILATION_ERROR) return # If a standard task, just run the solution on the given tests self.logger.info("[Submission {}] >> starting evaluation of solution...".format(self.id)) if not self.run_solution(): self.logger.info("[Submission {}] Error while processing the solution! Aborting...".format(self.id)) self.updater.add_info("Error while processing the solution!", None, TestStatus.INTERNAL_ERROR) return # Finished with this submission self.logger.info("[Submission {}] >> done with {}!".format(self.id, self.id)) self.updater.add_info("DONE", None, None) def create_sandbox_dir(self): try: # Delete if already present (maybe regrade?) if path.exists(self.path_sandbox): shutil.rmtree(self.path_sandbox) # Create the submit testing directory if not path.exists(self.path_sandbox): makedirs(self.path_sandbox) except OSError as ex: self.logger.error("[Submission {}] Could not create sandbox directory. Error was: {}".format(self.id, str(ex))) return False return True def download_test(self, test_name, test_hash): test_path = config.PATH_TESTS + test_hash # Download only if the file doesn't already exist if not path.exists(test_path): self.logger.info("[Submission {}] Downloading file {} with hash {} from URL: {}".format( self.id, test_name, test_hash, self.tests_url + test_name)) common.download_file(self.tests_url + test_name, test_path) def download_tests(self): # In case the directory for the tests does not exist, create it if not path.exists(config.PATH_DATA): makedirs(config.PATH_DATA) if not path.exists(config.PATH_TESTS): makedirs(config.PATH_TESTS) try: for test in self.tests: self.download_test(test["inpFile"], test["inpHash"]) self.download_test(test["solFile"], test["solHash"]) except Exception as ex: self.logger.error("[Submission {}] Could not download tests properly. Error was: {}".format(self.id, str(ex))) return False return True def compile(self, language, path_source, path_executable): try: return common.executor.submit(Compiler.compile, language, path_source, path_executable).result() except ValueError as ex: # If a non-compiler error occurred, log the message in addition to sending it to the user self.logger.error("[Submission {}] Could not compile file {}! Error was: {}".format( self.id, path_source, str(ex))) return "Internal Error: " + str(ex) def compile_utility_file(self, path_source, path_executable): # Only compile if not already compiled if not path.exists(path_executable): self.logger.info("[Submission {}] >> compiling utility file {}...".format( self.id, path.basename(path_source))) return self.compile("C++", path_source, path_executable) == "" return True def download_utility_file(self, url, destination): # Only download if not downloaded already if not path.exists(destination): self.logger.info("[Submission {}] >> downloading utility file {}".format(self.id, url.split('/')[-1])) try: common.download_file(url, destination) except RuntimeError: return False return True def download_and_compile_utility_file(self, directory, file_hash, url): path_source = directory + file_hash + config.SOURCE_EXTENSION_CPP path_executable = directory + file_hash + config.EXECUTABLE_EXTENSION_CPP if not self.download_utility_file(url, path_source): return False if not self.compile_utility_file(path_source, path_executable): return False return True def write_source(self, source, destination): try: with open(destination, "w") as file: file.write(source) except OSError as ex: self.logger.error("[Submission {}] Could not write source file. Error: ".format(self.id, str(ex))) return False return True def process_tests(self): start_time = perf_counter() runner = Runner(self) errors = "" test_futures = [] for result_id in range(len(self.tests)): test_futures.append([self.tests[result_id], common.executor.submit(runner.run, result_id, self.tests[result_id])]) for test, future in test_futures: try: # Wait for the test to be executed future.result() except Exception as ex: errors += "Internal error on test " + test["inpFile"] + "(" + test["inpHash"] + "): " + str(ex) self.logger.error("[Submission {}] Got exception: {}".format(self.id, str(ex))) break self.logger.info("[Submission {}] -- executed {} tests in {:.3f}s.".format( self.id, len(self.tests), perf_counter() - start_time)) return errors def process_games(self): start_time = perf_counter() runner = Runner(self) errors = "" result_id = 0 for match in self.matches: self.logger.info("[Submission {}] -- running game {} vs {}...".format( self.id, match["player_one_name"], match["player_two_name"])) # Get and compile the opponent's solution opponent_language = match["language"] opponent_path_source = self.path_sandbox + config.OPPONENT_SOURCE_NAME +\ common.get_source_extension(opponent_language) opponent_path_executable = self.path_sandbox + config.OPPONENT_EXECUTABLE_NAME +\ common.get_executable_extension(opponent_language) self.logger.info("[Submission {}] ++ writing opponent's source...".format(self.id)) if not self.write_source(match["source"], opponent_path_source): self.logger.error("[Submission {}] Could not write opponent's source!".format(self.id)) continue self.logger.info("[Submission {}] ++ compiling opponent's source...".format(self.id)) if self.compile(opponent_language, opponent_path_source, opponent_path_executable) != "": self.logger.error("[Submission {}] Could not compile opponent's source!".format(self.id)) continue # Run all of the game's tests for this pair of solutions test_futures = [] for test in self.tests: # Play forward game future = common.executor.submit(runner.play, result_id, test, self.tester, match["player_one_id"], match["player_one_name"], self.path_executable, match["player_two_id"], match["player_two_name"], opponent_path_executable) test_futures.append([test, future]) result_id += 1 # Play also reversed game (first player as second) so it is fair future = common.executor.submit(runner.play, result_id, test, self.tester, match["player_two_id"], match["player_two_name"], opponent_path_executable, match["player_one_id"], match["player_one_name"], self.path_executable) test_futures.append([test, future]) result_id += 1 for test_future in test_futures: test, future = test_future try: # Wait for the test to be executed future.result() except ValueError as ex: errors += "Internal error on test " + test["inpFile"] + "(" + test["inpHash"] + "): " + str(ex) self.logger.error("[Submission {}] {}".format(self.id, str(ex))) break except Exception as ex: self.logger.error("[Submission {}] Got exception: {}".format(self.id, str(ex))) self.logger.info("[Submission {}] -- executed {} matches in {:.3f}s.".format( self.id, len(self.matches), perf_counter() - start_time)) return errors def run_solution(self): if self.tester is None: run_status = self.process_tests() if run_status != "": self.logger.info("[Submission {}] Error while processing the tests: {}!".format(self.id, run_status)) return False # If a game, set-up the runner and opponents' solutions, then simulate the game else: run_status = self.process_games() if run_status != "": self.logger.info("[Submission {}] Error while processing the games: {}!".format(self.id, run_status)) return False return True def cleanup(self): self.logger.info("[Submission {}] Cleaning up sandbox...".format(self.id)) if path.exists(self.path_sandbox): shutil.rmtree(self.path_sandbox)
result = [] for _addon in _json_data['addons']: result.append({'name': _addon['name']}) pass return result working_path = os.getcwd() json_filename = os.path.join(working_path, 'config.json') #json_filename = os.path.join(working_path, 'debug_config.json') json_file = open(json_filename) json_data = json.load(json_file) json_file.close() updater = Updater(working_path, json_data) json_data = updater.execute() with open(json_filename, 'w') as json_file: json.dump(json_data, json_file, sort_keys=True, indent=4, encoding='utf-8') pass updated_addons = updater.get_updated_addons() #updated_addons = [{'name': 'plugin.video.youtube'}] #updated_addons = _collect_all(json_data) bb_code_writer = BBCodeWriter(working_path, json_data, updated_addons) bb_code_writer.write() print '================================================================================' print 'DONE'
class Parser: def __init__(self, update): self.update = update self.updater = Updater(update) def save(self): self.updater.saveAll() def parseRuleFile(self, paths): """Method to initiate parsing of a rule file. parseFile returns a method which feeds the file line by line to the updateRule method. paths argument contains a tuple with absolute and relative file path.""" # Get the absolute file path filename = path.basename(paths[0]) self.parseFile(self.updateRule, paths, filename=filename)() def parseClassificationFile(self, paths): """Method to initiate parsing of a classifications file. parseFile returns a method which feeds the file line by line to the updateClassification method. paths argument contains a tuple with absolute and relative file path.""" self.parseFile(self.updateClassification, paths)() def parseGenMsgFile(self, paths): """Method to initiate parsing of a gen-msg file. parseFile returns a method which feeds the file line by line to the updateGenMsg method. paths argument contains a tuple with absolute and relative file path.""" self.parseFile(self.updateGenMsg, paths)() def parseReferenceConfigFile(self, paths): """Method to initiate parsing of a reference config file. parseFile returns a method which feeds the file line by line to the updateReferenceConfig method. paths argument contains a tuple with absolute and relative file path.""" self.parseFile(self.updateReferenceConfig, paths)() def parseSidMsgFile(self, paths): """Method to initiate parsing of a sid-msg file. parseFile returns a method which feeds the file line by line to the updateSidMsg method. paths argument contains a tuple with absolute and relative file path.""" self.parseFile(self.updateSidMsg, paths)() def parseFilterFile(self, paths): """Method to initiate parsing of a threshold.conf/event_filter file. parseFile returns a method which feeds the file line by line to the updateFilter method. paths argument contains a tuple with absolute and relative file path.""" self.parseFile(self.updateFilter, paths)() def parseConfigFile(self, path, storeHash=True, **kwargs): """Method to parse an ASCII file with undefined content. Each line of file is sent to updateConfig, which tries to identify the content by matching the line to regex patterns defined in patterns-list.""" # Compile the re-patterns patterns = {} patterns["rule"] = re.compile(ConfigPatterns.RULE) patterns["reference"] = re.compile(ConfigPatterns.REFERENCE) patterns["class"] = re.compile(ConfigPatterns.CLASS) patterns["genmsg"] = re.compile(ConfigPatterns.GENMSG) patterns["sidmsg"] = re.compile(ConfigPatterns.SIDMSG) patterns["filter"] = re.compile(ConfigPatterns.EVENT_FILTER) filename = path.basename(path[0]) self.parseFile(self.updateConfig, path, storeHash, filename=filename, patterns=patterns, **kwargs)() def updateRule(self, raw, filename): """This method takes a raw rulestring, parses it, and sends each valid rule to the updater.""" logger = logging.getLogger(__name__) try: # Snowman is currently only handling rules with GID=1. # If we find a GID element with a value other than 1, we are parsing the wrong file. ruleGID = int(re.match(ConfigPatterns.GID, raw).group(1)) if ruleGID != 1: raise AbnormalRuleError except AttributeError: # If no GID element is found, GID is 1. ruleGID = 1 except ValueError: raise BadFormatError("Bad rule in file '" + filename + "': GID is not numeric! Rulestring: " + raw) # Construct a regex to match all elements a raw rulestring # must have in order to be considered a valid rule # (sid, rev, message and classtype): matchPattern = ConfigPatterns.RULE pattern = re.compile(matchPattern) # Match optional options: ruleset = re.match(ConfigPatterns.RULESET, raw) priority = re.match(ConfigPatterns.PRIORITY, raw) references = re.findall(ConfigPatterns.RULEREFERENCE, raw) eventFilter = re.match(ConfigPatterns.THRESHOLD, raw) detectionFilter = re.match(ConfigPatterns.DETECTION_FILTER, raw) # If the raw rule matched the regex: result = pattern.match(raw) if result: # Assign some helpful variable-names: if "#" in result.group(1): raw = raw.lstrip("# ") ruleActive = False else: ruleActive = True try: ruleSID = int(result.group(2)) ruleRev = int(result.group(3)) except ValueError: raise BadFormatError("Bad rule in '" + filename + "': SID or rev is not numeric! Rulestring: " + raw) ruleMessage = result.group(4) ruleClassName = result.group(5) # Ruleset name set to filename if not found in raw string: try: rulesetName = ruleset.group(1) except AttributeError: rulesetName = re.sub("\.rules$", "", filename) if priority: try: rulePriority = int(priority.group(0)) except ValueError: raise BadFormatError("Bad rule in '" + filename + "': priority is not numeric! Rulestring: " + raw) else: rulePriority = None # Remove filters from raw string before storage: replace = Replace("") filters = "" raw = re.sub(r"detection_filter:.*?;", replace, raw) filters += replace.matched or "" raw = re.sub(r"threshold:.*?;", replace, raw) filters += replace.matched or "" raw = " ".join(raw.split()) self.updater.addRuleSet(rulesetName) self.updater.addRule( ruleSID, ruleRev, raw, ruleMessage, ruleActive, rulesetName, ruleClassName, rulePriority, ruleGID ) if detectionFilter: dfTrack = detectionFilter.group(1) dfCount = int(detectionFilter.group(2)) dfSeconds = int(detectionFilter.group(3)) self.checkFilter(ruleGID, ruleSID, dfTrack, dfCount, dfSeconds) self.updater.addFilter(ruleSID, dfTrack, dfCount, dfSeconds) if eventFilter: efType = eventFilter.group(1) efTrack = eventFilter.group(2) efCount = int(eventFilter.group(3)) efSeconds = int(eventFilter.group(4)) self.checkFilter(ruleGID, ruleSID, efTrack, efCount, efSeconds, efType) self.updater.addFilter(ruleSID, efTrack, efCount, efSeconds, efType) if references: for reference in references: try: referenceTypeName = reference[0] referenceData = reference[1] self.updater.addReference(referenceTypeName, referenceData, ruleSID) except IndexError: logger.warning( "Skipping badly formatted reference for rule sid=" + ruleSID + " in file '" + filename + "': " + str(reference) ) def updateClassification(self, raw): """Method for parsing classification strings. Classification data consists of three comma-separated strings which are extracted with a regex, and split up in the three respective parts: classtype, description and priority. When a classification is deemed valid, it is sent to the updater.""" # Regex: Match "config classification: " (group 0), # and everything that comes after (group 1), which is the classification data. result = re.match(ConfigPatterns.CLASS, raw) if result: # Split the data and store as separate strings classification = result.group(1).split(",") try: classtype = classification[0] description = classification[1] priority = int(classification[2]) except (IndexError, ValueError): # If one or more indexes are invalid, the classification is badly formatted raise BadFormatError("Badly formatted rule classification: " + raw) self.updater.addClass(classtype, description, priority) def updateGenMsg(self, raw): """Method for parsing generator strings. Generator data consists of two numbers and a message string, all three separated with a ||. All lines conforming to this pattern are split up in the three respective parts: GID (generatorID), alertID and message. Valid generators are sent to updater.""" # Regex: Match a generator definition: int || int || string # If the line matches, it is stored in group(0) result = re.match(ConfigPatterns.GENMSG, raw) if result: # Split the line into GID, alertID and message # (becomes generator[0], [1] and [2] respectively) generator = result.group(0).split(" || ") try: gid = int(generator[0]) alertID = int(generator[1]) message = generator[2] self.updater.addGenerator(gid, alertID, message) except (ValueError, IndexError): # If one or more indexes are invalid, or gid/alertID is not # numeric, the generator is badly formatted raise BadFormatError("Badly formatted generator: " + raw) def updateReferenceConfig(self, raw): """Method for parsing reference type strings, containing type name and url-prefix. Valid reference types are sent to updater.""" result = re.match(ConfigPatterns.REFERENCE, raw) if result: referenceType = result.group(1).strip() urlPrefix = result.group(2).strip() self.updater.addReferenceType(referenceType, urlPrefix) def updateSidMsg(self, raw): """The sid-msg.map file contains mappings between ruleSIDs, rule messages and ruleReferences. This method parses one line of this file (raw), and checks if the SID corresponds to a ruleRevision in this update. If this is the case, it updates the message in the ruleRevision and creates all ruleReferences. updatedRules is a dictionary with {SID:referenceID} entries. This is needed because rules are referenced by SID in sid-msg.map and by revisionID in Update.ruleRevisions.""" # Regex: Match a generator definition: SID || message (|| reference)* # SID is stored in group(1), and "message (|| reference)*" in group(2) result = re.match(ConfigPatterns.SIDMSG, raw) # If we have a match AND the SID is in updatedRules (rule was updated): if result: try: # We have a valid line, fetch the SID ruleSID = int(result.group(1)) except ValueError: raise BadFormatError("Expected numeric SID.") # Get message and ruleReferences, if any data = result.group(2).split(" || ") dataiter = iter(data) try: # Rule message is always the first element message = next(dataiter) self.updater.addMessage(ruleSID, message) # Any succeeding elements are ruleReferences, formatted # with referenceType,referenceValue: for reference in dataiter: referenceData = reference.split(",") referenceType = referenceData[0] referenceValue = referenceData[1] self.updater.addReference(referenceType, referenceValue, ruleSID) except (StopIteration, IndexError): raise BadFormatError("Badly formatted sid-msg: " + raw) def updateFilter(self, raw): eventFilter = re.match(ConfigPatterns.EVENT_FILTER, raw) if eventFilter: efGID = eventFilter.group(1) efSID = eventFilter.group(2) efType = eventFilter.group(3) efTrack = eventFilter.group(4) efCount = eventFilter.group(5) efSeconds = eventFilter.group(6) self.checkFilter(efGID, efSID, efTrack, efCount, efSeconds, efType) self.updater.addFilter(int(efSID), efTrack, int(efCount), int(efSeconds), efType) else: suppress = re.match(ConfigPatterns.SUPPRESS, raw) if suppress: supGID = suppress.group(1) supSID = suppress.group(2) supTrack = suppress.group(3) supIP = suppress.group(4) if supTrack != "" and supIP != "": if supTrack not in ["by_src", "by_dst"]: raise BadFormatError("Bad suppress: " + raw) supIP = supIP.lstrip("[") supIP = supIP.rstrip("]") supIP = supIP.split(",") for address in supIP: if not re.match(ConfigPatterns.VALIDIPMASK, address): raise BadFormatError("Bad IP address in suppress: " + raw) self.updater.addSuppress(supSID, supTrack, supIP) else: self.updater.addSuppress(supSID) def parseFile(self, fn, filePathTuple, storeHash=True, **kwargs): def parse(): """Method for simple parsing of a file defined by filePathTuple. Every line is sent to the function defined by fn.""" absoluteFilepath, relativeFilePath = filePathTuple logger = logging.getLogger(__name__) logger.info("Parsing file " + absoluteFilepath + ".") if storeHash: try: ruleFile = self.update.source.files.get(name=relativeFilePath) except UpdateFile.DoesNotExist: ruleFile = self.update.source.files.create(name=relativeFilePath, isParsed=False) oldHash = ruleFile.checksum newHash = md5sum(absoluteFilepath) if not storeHash or (oldHash != newHash): try: infile = open(absoluteFilepath, "r") except IOError: logger.info("File '%s' not found, nothing to parse." % absoluteFilepath) return if storeHash: ruleFile.isParsed = True ruleFile.checksum = newHash ruleFile.save() it = iter(enumerate(infile)) previous = "" for i, line in it: # Concatinate the current line with the previous line = previous + line previous = "" # If the line is incomplete, store what we have, and read next line. if re.match(r"(.*)\\$", line): previous = line.rstrip("\\\n") else: try: fn(raw=line, **kwargs) except AbnormalRuleError: logger.info("Skipping abnormal rule in '%s'" % absoluteFilepath) except BadFormatError, e: # Log exception message, file name and line number logger.error("%s in file '%s', around line %s." % (str(e), absoluteFilepath, str(i))) else:
#!/usr/bin/python # -*- coding: utf-8 -*- from Compiler import * from updater import Updater import os import _mysql import re import sys import fnmatch import paths import blacklist if len(sys.argv) > 1: if sys.argv[1] == 'update': updater = Updater() updater.update() print 'updated' exit() elif sys.argv[1] == 'help': print 'this program is used as command line tool to control pclp compiler daemon' print 'Usage' print 'main.py help show this message and exit' print 'main.py update update the file used as a test case and comparator, please run this parameter' print ' before you run the daemon' print 'main.py run the daemon' exit() else: print 'unknown argument(s)' while 1 == 1:
area53 ''' import sys import os import ConfigParser from updater import Updater basePath = os.path.realpath(os.path.dirname(sys.argv[0])) config = ConfigParser.ConfigParser() config.read(basePath + '/config.ini') key = config.get("aws_keys","key") access = config.get("aws_keys","access") my_updater = Updater(key,access,log=True) current_ip = my_updater.get_ip() my_updater.set_zone_ip('mysite.com',current_ip)
def initialize(self, environment): self.environment = environment self.updater = Updater(self.environment) self.shocker = Shock()
def __init__(self, update): self.update = update self.updater = Updater(update)