def __init__(self, uid, small = False, *args, **kwargs): QtGui.QListWidgetItem.__init__(self, *args, **kwargs) self.uid = uid self.name = None self.price = None self.activation = None self.description = None self.owned = 0 self.disabled = False self.small = small self.reinforcementType = "" self.canmove = False if small: self.FORMATTER_REINFORCEMENT = unicode(util.readfile("galacticwar/formatters/reinforcementSmall.qthtml")) self.TEXTWIDTH = 100 self.ICONSIZE = 64 self.PADDING = 10 self.WIDTH = self.ICONSIZE + self.TEXTWIDTH self.HEIGHT = 100 else: self.FORMATTER_REINFORCEMENT = unicode(util.readfile("galacticwar/formatters/reinforcement.qthtml")) self.TEXTWIDTH = 370 self.ICONSIZE = 64 self.PADDING = 10 self.WIDTH = self.ICONSIZE + self.TEXTWIDTH self.HEIGHT = 100 self.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsDropEnabled | QtCore.Qt.ItemIsDragEnabled) self.setHidden(True)
def __init__(self, client, *args, **kwargs): BaseClass.__init__(self, *args, **kwargs) self.setupUi(self) self.client = client self.client.coopTab.layout().addWidget(self) #Dictionary containing our actual games. self.games = {} #Ranked search UI self.ispassworded = False self.loaded = False self.coop = {} self.cooptypes = {} self.canChooseMap = False self.options = [] self.client.showCoop.connect(self.coopChanged) self.client.coopInfo.connect(self.processCoopInfo) self.client.gameInfo.connect(self.processGameInfo) self.coopList.header().setResizeMode( 0, QtGui.QHeaderView.ResizeToContents) self.coopList.setItemDelegate(CoopMapItemDelegate(self)) self.gameList.setItemDelegate(GameItemDelegate(self)) self.gameList.itemDoubleClicked.connect(self.gameDoubleClicked) self.coopList.itemDoubleClicked.connect(self.coopListDoubleClicked) self.coopList.itemClicked.connect(self.coopListClicked) self.client.coopLeaderBoard.connect(self.processLeaderBoardInfos) self.tabLeaderWidget.currentChanged.connect(self.askLeaderBoard) self.linkButton.clicked.connect(self.linkVanilla) #Load game name from settings (yay, it's persistent!) self.load_last_hosted_settings() self.leaderBoard.setVisible(0) self.stylesheet = util.readstylesheet("coop/formatters/style.css") self.FORMATTER_LADDER = unicode( util.readfile("coop/formatters/ladder.qthtml")) self.FORMATTER_LADDER_HEADER = unicode( util.readfile("coop/formatters/ladder_header.qthtml")) self.leaderBoard.setStyleSheet(self.stylesheet) self.leaderBoardTextGeneral.anchorClicked.connect(self.openUrl) self.leaderBoardTextOne.anchorClicked.connect(self.openUrl) self.leaderBoardTextTwo.anchorClicked.connect(self.openUrl) self.leaderBoardTextThree.anchorClicked.connect(self.openUrl) self.leaderBoardTextFour.anchorClicked.connect(self.openUrl) self.replayDownload = QNetworkAccessManager() self.replayDownload.finished.connect(self.finishRequest) self.selectedItem = None
def __init__(self, resource): template = resource['body'] self.errors = [] #For compatibility with older versions, we provide defaults #In case some attributes are missing if 'require-permissions' in resource: self.permissions = resource["require-permissions"] else: self.permissions = [] if 'require-method' in resource: self.methods = resource['require-method'] else: self.methods = ['POST','GET'] #Yes, I know this logic is ugly. if 'no-navheader' in resource: if resource['no-navheader']: header = util.readfile(os.path.join(directories.htmldir,'pageheader_nonav.html')) else: header = util.readfile(os.path.join(directories.htmldir,'pageheader.html')) else: header = util.readfile(os.path.join(directories.htmldir,'pageheader.html')) if 'no-header' in resource: if resource['no-header']: header = "" footer = util.readfile(os.path.join(directories.htmldir,'pagefooter.html')) templatesource = header + template + footer self.template = mako.template.Template(templatesource)
def page(self,module,dummy2,page,*args,**kwargs): #Permission handling for pages if 'require-permissions' in modules.ActiveModules[module][page]: for i in modules.ActiveModules[module][page]['require-permissions']: pages.require(i) with modules.modulesLock: #need to find an alternaive to this lock if modules.ActiveModules[module][page]['resource-type'] == 'page': #Allow a page to specify that it can only be accessed via POST or such if "require-method" in modules.ActiveModules[module][page]: if cherrypy.request.method not in modules.ActiveModules[module][page]['require-method']: #Raise a redirect the the wrongmethod error page raise cherrypy.HTTPRedirect('/errors/wrongmethod') #This is pretty much the worst perfoming piece of code in the system. #Every single request it compiles a new template and renders that, but not before loading two files from #Disk. But I don't feel like writing another ten pages of bookkeeping code today. []TODO if 'no-navheader' in modules.ActiveModules[module][page]: if modules.ActiveModules[module][page]['no-navheader']: header = util.readfile('pages/pageheader_nonav.html') else: header = util.readfile('pages/pageheader.html') else: header = util.readfile('pages/pageheader.html') return mako.template.Template( header+ modules.ActiveModules[module][page]['body']+ util.readfile('pages/pagefooter.html') ).render( kaithem = kaithem.kaithem, request = cherrypy.request, )
def page(self, module, dummy2, page, *args, **kwargs): #Permission handling for pages if 'require-permissions' in modules.ActiveModules[module][page]: for i in modules.ActiveModules[module][page][ 'require-permissions']: pages.require(i) with modules.modulesLock: #need to find an alternaive to this lock if modules.ActiveModules[module][page]['resource-type'] == 'page': #Allow a page to specify that it can only be accessed via POST or such if "require-method" in modules.ActiveModules[module][page]: if cherrypy.request.method not in modules.ActiveModules[ module][page]['require-method']: #Raise a redirect the the wrongmethod error page raise cherrypy.HTTPRedirect('/errors/wrongmethod') #This is pretty much the worst perfoming piece of code in the system. #Every single request it compiles a new template and renders that, but not before loading two files from #Disk. But I don't feel like writing another ten pages of bookkeeping code today. []TODO if 'no-navheader' in modules.ActiveModules[module][page]: if modules.ActiveModules[module][page]['no-navheader']: header = util.readfile('pages/pageheader_nonav.html') else: header = util.readfile('pages/pageheader.html') else: header = util.readfile('pages/pageheader.html') return mako.template.Template( header + modules.ActiveModules[module][page]['body'] + util.readfile('pages/pagefooter.html')).render( kaithem=kaithem.kaithem, request=cherrypy.request, )
def __init__(self, client, *args, **kwargs): BaseClass.__init__(self, *args, **kwargs) self.setupUi(self) self.client = client self.client.coopTab.layout().addWidget(self) #Dictionary containing our actual games. self.games = {} #Ranked search UI self.ispassworded = False self.loaded = False self.coop = {} self.cooptypes = {} self.canChooseMap = False self.options = [] self.client.showCoop.connect(self.coopChanged) self.client.coopInfo.connect(self.processCoopInfo) self.client.gameInfo.connect(self.processGameInfo) self.coopList.header().setResizeMode(0, QtGui.QHeaderView.ResizeToContents) self.coopList.setItemDelegate(CoopMapItemDelegate(self)) self.gameList.setItemDelegate(GameItemDelegate(self)) self.gameList.itemDoubleClicked.connect(self.gameDoubleClicked) self.coopList.itemDoubleClicked.connect(self.coopListDoubleClicked) self.coopList.itemClicked.connect(self.coopListClicked) self.client.coopLeaderBoard.connect(self.processLeaderBoardInfos) self.tabLeaderWidget.currentChanged.connect(self.askLeaderBoard) self.linkButton.clicked.connect(self.linkVanilla) #Load game name from settings (yay, it's persistent!) self.loadGameName() self.loadPassword() self.leaderBoard.setVisible(0) self.stylesheet = util.readstylesheet("coop/formatters/style.css") self.FORMATTER_LADDER = unicode(util.readfile("coop/formatters/ladder.qthtml")) self.FORMATTER_LADDER_HEADER = unicode(util.readfile("coop/formatters/ladder_header.qthtml")) self.leaderBoard.setStyleSheet(self.stylesheet) self.leaderBoardTextGeneral.anchorClicked.connect(self.openUrl) self.leaderBoardTextOne.anchorClicked.connect(self.openUrl) self.leaderBoardTextTwo.anchorClicked.connect(self.openUrl) self.leaderBoardTextThree.anchorClicked.connect(self.openUrl) self.leaderBoardTextFour.anchorClicked.connect(self.openUrl) self.replayDownload = QNetworkAccessManager() self.replayDownload.finished.connect(self.finishRequest) self.selectedItem = None
def __init__(self, client, *args, **kwargs): logger.debug("Lobby instantiating.") BaseClass.__init__(self, *args, **kwargs) SimpleIRCClient.__init__(self) self.setupUi(self) # CAVEAT: These will fail if loaded before theming is loaded import json self.OPERATOR_COLORS = json.loads( util.readfile("chat/formatters/operator_colors.json")) self.client = client self.channels = {} #avatar downloader self.nam = QNetworkAccessManager() self.nam.finished.connect(self.finishDownloadAvatar) #nickserv stuff self.identified = False #IRC parameters self.ircServer = IRC_SERVER self.ircPort = IRC_PORT self.crucialChannels = ["#aeolus"] self.optionalChannels = [] #We can't send command until the welcom message is received self.welcomed = False # Load colors and styles from theme self.specialUserColors = json.loads( util.readfile("chat/formatters/special_colors.json")) self.a_style = util.readfile("chat/formatters/a_style.qss") #load UI perform some tweaks self.tabBar().setTabButton(0, 1, None) #add self to client's window self.client.chatTab.layout().addWidget(self) self.tabCloseRequested.connect(self.closeChannel) #add signal handler for game exit self.client.gameExit.connect(self.processGameExit) self.replayInfo = fa.exe.instance.info #Hook with client's connection and autojoin mechanisms self.client.connected.connect(self.connect) self.client.publicBroadcast.connect(self.announce) self.client.autoJoin.connect(self.autoJoin) self.client.channelsUpdated.connect(self.addChannels) self.channelsAvailable = [] self.timer = QtCore.QTimer(self) self.timer.timeout.connect(self.poll) # disconnection checks self.canDisconnect = False
def __init__(self, client, *args, **kwargs): logger.debug("Lobby instantiating.") BaseClass.__init__(self, *args, **kwargs) SimpleIRCClient.__init__(self) self.setupUi(self) # CAVEAT: These will fail if loaded before theming is loaded import json self.OPERATOR_COLORS = json.loads(util.readfile("chat/formatters/operator_colors.json")) self.client = client self.channels = {} #avatar downloader self.nam = QNetworkAccessManager() self.nam.finished.connect(self.finishDownloadAvatar) #nickserv stuff self.identified = False #IRC parameters self.ircServer = IRC_SERVER self.ircPort = IRC_PORT self.crucialChannels = ["#aeolus"] self.optionalChannels = [] #We can't send command until the welcom message is received self.welcomed = False # Load colors and styles from theme self.specialUserColors = json.loads(util.readfile("chat/formatters/special_colors.json")) self.a_style = util.readfile("chat/formatters/a_style.qss") #load UI perform some tweaks self.tabBar().setTabButton(0, 1, None) #add self to client's window self.client.chatTab.layout().addWidget(self) self.tabCloseRequested.connect(self.closeChannel) #add signal handler for game exit self.client.gameExit.connect(self.processGameExit) self.replayInfo = fa.exe.instance.info #Hook with client's connection and autojoin mechanisms self.client.connected.connect(self.connect) self.client.publicBroadcast.connect(self.announce) self.client.autoJoin.connect(self.autoJoin) self.client.channelsUpdated.connect(self.addChannels) self.channelsAvailable = [] self.timer = QtCore.QTimer(self) self.timer.timeout.connect(self.poll) # disconnection checks self.canDisconnect = False
def _matcheol(file, origfile): "Convert EOL markers in a file to match origfile" tostyle = _eoltype(util.readfile(origfile)) if tostyle: data = util.readfile(file) style = _eoltype(data) if style: newdata = data.replace(style, tostyle) if newdata != data: util.writefile(file, newdata)
def invoke_mutabelle(theory, env, case, paths, dep_paths, playground): """Mutant testing for counterexample generators in Isabelle""" (loc_isabelle,) = paths (dep_isabelle,) = dep_paths more_settings = ''' ISABELLE_GHC="/usr/bin/ghc" ''' prepare_isabelle_repository(loc_isabelle, dep_isabelle, more_settings = more_settings) os.chdir(loc_isabelle) (return_code, log) = env.run_process('bin/isabelle', 'mutabelle', '-O', playground, theory) try: mutabelle_log = util.readfile(path.join(playground, 'log')) except IOError: mutabelle_log = '' mutabelle_data = dict( (tool, {'counterexample': c, 'no_counterexample': n, 'timeout': t, 'error': e}) for tool, c, n, t, e in re.findall(r'(\S+)\s+: C: (\d+) N: (\d+) T: (\d+) E: (\d+)', log)) return (return_code == 0 and mutabelle_log != '', extract_isabelle_run_summary(log), {'mutabelle_results': {theory: mutabelle_data}}, {'log': log, 'mutabelle_log': mutabelle_log}, None)
def get_item_count(): reader = readfile(new_train_file) item_count = defaultdict(lambda: defaultdict(int)) time_item_count = defaultdict(lambda:defaultdict(lambda: defaultdict(int))) idx = 0 for (__user, sku, category, __query, click_time) in reader: time_block = get_time_feature(click_time) idx += 1 item_count[category][sku] += magic_num time_item_count[time_block][category][sku] += magic_num item_sort = dict() for category in item_count: item_sort[category] = sorted(item_count[category].items(), \ key=lambda x: x[1], reverse=True) smooth_time_item_count = defaultdict(lambda:defaultdict(lambda: defaultdict(int))) for time_block in time_item_count: for cat in time_item_count[time_block]: for sku in time_item_count[time_block][cat]: smooth_time_item_count[time_block][cat][sku] = item_count[cat][sku] * 3.0 / block_size for time_block in time_item_count: for cat in time_item_count[time_block]: for sku in time_item_count[time_block][cat]: smooth_time_item_count[time_block][cat][sku] = time_item_count[time_block][cat][sku] if time_block == 0 or time_block == MAX_BLOCK: smooth_time_item_count[time_block][cat][sku] += time_item_count[time_block][cat][sku] if time_block >= 1: smooth_time_item_count[time_block][cat][sku] += time_item_count[time_block - 1][cat][sku] if time_block < MAX_BLOCK: smooth_time_item_count[time_block][cat][sku] += time_item_count[time_block + 1][cat][sku] return item_count, item_sort, smooth_time_item_count
def get_bigram_model(item_word, item_sort, cat_count): hot_sku_words = defaultdict(lambda: defaultdict(set)) for cat in item_word: for sku in item_word[cat]: hots = item_word[cat][sku].items() hot_sku_words[cat][sku] = set( [i[0] for i in hots if i[1] >= GLOBAL_BIGRAM_QUERY]) hot_words = dict() for cat in hot_sku_words: hot_words[cat] = set() for sku in hot_sku_words[cat]: hot_words[cat] = hot_words[cat].union(hot_sku_words[cat][sku]) reader = readfile(new_train_file) bigram_item_word = defaultdict( lambda: defaultdict(lambda: defaultdict(int))) idx = 0 for (__user, sku, category, raw_query, ___click_time) in reader: idx += 1 bound = cat_count[category][HOT_SIZE] popular = [i[0] for i in item_sort[category][0:bound]] if sku in popular: bigram = get_bigram_word(raw_query, hot_words, category) for w in bigram: bigram_item_word[category][sku][w] += magic_num cat_count[category][BIGRAM_HOT] += magic_num return bigram_item_word, cat_count, hot_words
def get_bigram_model(item_word, item_sort, cat_count): hot_sku_words = defaultdict(lambda: defaultdict(set)) for cat in item_word: for sku in item_word[cat]: hots = item_word[cat][sku].items() hot_sku_words[cat][sku] = set([i[0] for i in hots if i[1] >= GLOBAL_BIGRAM_QUERY]) hot_words = dict() for cat in hot_sku_words: hot_words[cat] = set() for sku in hot_sku_words[cat]: hot_words[cat] = hot_words[cat].union(hot_sku_words[cat][sku]) reader = readfile(new_train_file) bigram_item_word = defaultdict(lambda: defaultdict(lambda: defaultdict(int))) idx = 0 for (__user, sku, category, raw_query, ___click_time) in reader: idx += 1 bound = cat_count[category][HOT_SIZE] popular = [i[0] for i in item_sort[category][0:bound]] if sku in popular: bigram = get_bigram_word(raw_query, hot_words, category) for w in bigram: bigram_item_word[category][sku][w] += magic_num cat_count[category][BIGRAM_HOT] += magic_num return bigram_item_word, cat_count, hot_words
def make_query_correct(target_file, out_file, tp): local_cache = dict() lemmatizer = WordNetLemmatizer() print("Reading file") reader = readfile(target_file) print("Read complete") with open(out_file, 'w', encoding='utf-8') as writer: writer.write('user,sku,category,query,click_time\n') if tp == 'train': print("Making up training data") 'we do not use query_time here' for (user, sku, category, raw_query, click_time, __query_time) in reader: new_query = correct_query(raw_query, lemmatizer, local_cache) new_click_time = get_new_time(click_time) outline = ','.join( [user, sku, category, new_query, new_click_time]) writer.write(outline + '\n') elif tp == 'test': print("Making up test data") 'we do not use query_time here' for (user, category, raw_query, click_time, __query_time) in reader: new_query = correct_query(raw_query, lemmatizer, local_cache) new_click_time = get_new_time(click_time) outline = ','.join([user, category, new_query, new_click_time]) writer.write(outline + '\n') else: raise Exception('Error Query Correction Request!!!')
def read_clusters(): data = readfile(cluster_path) clusters = [] centers = [] for line in data: # 每一行一个cluster if line.startswith("center:"): # 这是一个类中心 center = {} items = line[8:-1].split(",") for item in items: item = item.strip() if item: wid, weight = item.split(":") center[int(wid.strip())] = float(weight.strip()) pass centers.append(center) else: c = {} docs = line.split(",") for doc in docs: id, sim = doc.split(":") id = int(id) sim = float(sim) c[id] = sim pass clusters.append(c) return clusters, centers
def transform_docs(): docs = readfile( corpus_path ) outf = open( new_docs_path,'w') voca = readvoca() outf.write( "# docs num %d,voca num:%d\n" % (len(docs),len(voca)) ) i = 0 for doc in docs: doc = doc.split()[-1] # input format has been changed doc_wn = {} # 统计doc中的每个词的词频 for ch in doc: if is_ch_char( ch ) and ch in voca: v_id = voca.index( ch ) if doc_wn.has_key( v_id ): doc_wn[v_id] += 1 # 第i个词又一次出现 else: doc_wn[v_id] = 1 # 第i个词第一次出现 pass # 将doc_wn写文档 #print "write new doc:%d" % i words_li = ["%d:%d" % (w,n) for w,n in doc_wn.items() ] words_str = ','.join( words_li ) outf.write( "%d\t%s\n" % (i,words_str) ) i += 1 pass outf.close()
def launch_user_grant(export: bool = False): "deploy the specifications to run the user grant website" config = configuration.get_config() if config.user_grant_domain == '': command.fail("no user_grant_domain specified in setup.yaml") if config.user_grant_email_domain == '': command.fail("no user_grant_email_domain specified in setup.yaml") skey, scert = keys.decrypt_https(config.user_grant_domain) skey64, scert64 = base64.b64encode(skey), base64.b64encode(scert) ikey = authority.get_decrypted_by_filename("./kubernetes.key") icert = authority.get_pubkey_by_filename("./kubernetes.pem") ikey64, icert64 = base64.b64encode(ikey), base64.b64encode(icert) _, upstream_cert_path = authority.get_upstream_cert_paths() if not os.path.exists(upstream_cert_path): command.fail( "user-grant-upstream.pem not found in homeworld directory") upstream_cert = util.readfile(upstream_cert_path).decode() launch_spec("//user-grant:kubernetes.yaml", { "SERVER_KEY_BASE64": skey64.decode(), "SERVER_CERT_BASE64": scert64.decode(), "ISSUER_KEY_BASE64": ikey64.decode(), "ISSUER_CERT_BASE64": icert64.decode(), "EMAIL_DOMAIN": config.user_grant_email_domain, "UPSTREAM_CERTIFICATE": upstream_cert, }, export=export)
def post_run(): docs = readfile(corpus_path) clusters, centers = read_clusters() voca = readvoca() outf = open(new_cluster_path, "w") for i in range(0, len(centers)): c = clusters[i] # outf.write( 30*'#'+'cluster '+str(i)+30*'#'+'\n' ) # # subsitute wordid # words = sorted(centers[i].items(),key=lambda d:d[1],reverse=True) # outf.write("# ") # for wid,weight in words[:num_center_words]: # outf.write( voca[ wid ].encode('gb2312') ) # outf.write( '[%f] ' % weight ) # pass # outf.write("\n") tmp_li = sorted(c.items(), key=lambda d: d[1], reverse=True) for doc_id, sim in tmp_li: outf.write(docs[doc_id].encode("gb2312") + "\n") # outf.write( '[%f]\n' % sim ) pass # the other cluster c = clusters[-1] # outf.write( 30*'#'+'cluster others'+30*'#'+'\n' ) tmp_li = sorted(c.items(), key=lambda d: d[1], reverse=True) for doc_id, sim in tmp_li: outf.write(docs[doc_id].encode("gb2312")) # outf.write( '[%f]\n' % sim ) print "[finished]result has been writen to %s" % new_cluster_path
class CommentItem(QtGui.QListWidgetItem): FORMATTER_COMMENT = unicode(util.readfile("modvault/comment.qthtml")) def __init__(self, parent, uid, *args, **kwargs): QtGui.QListWidgetItem.__init__(self, *args, **kwargs) self.parent = parent self.uid = uid self.text = "" self.author = "" self.date = None def update(self, dic): self.text = dic["text"] self.author = dic["author"] self.date = strtodate(dic["date"]) self.setText( self.FORMATTER_COMMENT.format(text=self.text, author=self.author, date=str(self.date))) def __ge__(self, other): return self.date > other.date def __lt__(self, other): return self.date <= other.date
def make_query_correct(target_file, out_file, tp): local_cache = dict() lemmatizer = WordNetLemmatizer() reader = readfile(target_file) with open(out_file, 'w') as writer: writer.write('data:\n') if tp == 'train': 'we do not use query_time here' for (user, sku, category, raw_query, click_time, __query_time) in reader: new_query = correct_query(raw_query, lemmatizer, local_cache) new_click_time = get_new_time(click_time) outline = ','.join( [user, sku, category, new_query, new_click_time]) writer.write(outline + '\n') elif tp == 'test': 'we do not use query_time here' for (user, category, raw_query, click_time, __query_time) in reader: new_query = correct_query(raw_query, lemmatizer, local_cache) new_click_time = get_new_time(click_time) outline = ','.join([user, category, new_query, new_click_time]) writer.write(outline + '\n') else: raise Exception('Error Query Correction Request!!!')
def loader(): docdir = os.path.join(util.datapath, 'help') path = os.path.join(docdir, topic + ".txt") doc = gettext(util.readfile(path)) for rewriter in helphooks.get(topic, []): doc = rewriter(topic, doc) return doc
def lint_file(path, kind): def import_script(import_path): # The user can specify paths using backslashes (such as when # linting Windows scripts on a posix environment. import_path = import_path.replace('\\', os.sep) import_path = os.path.join(os.path.dirname(path), import_path) return lint_file(import_path, 'js') def _lint_error(*args): return lint_error(normpath, *args) normpath = util.normpath(path) if normpath in lint_cache: return lint_cache[normpath] print normpath contents = util.readfile(path) lint_cache[normpath] = _Script() script_parts = [] if kind == 'js': script_parts.append((None, contents)) elif kind == 'html': for script in _findhtmlscripts(contents): if script['type'] == 'external': other = import_script(script['src']) lint_cache[normpath].importscript(other) elif script['type'] == 'inline': script_parts.append((script['pos'], script['contents'])) else: assert False, 'Invalid internal script type %s' % \ script['type'] else: assert False, 'Unsupported file kind: %s' % kind _lint_script_parts(script_parts, lint_cache[normpath], _lint_error, conf, import_script) return lint_cache[normpath]
def read_pls(self, plsname): """ This is the (pls) playlist reading function. Arguments: plsname - the playlist filename Returns: The list of interesting lines in the playlist """ try: lines = util.readfile(plsname) except IOError: print String(_('Cannot open file "%s"')) % list return 0 playlist_lines_dos = map(lambda l: l.strip(), lines) playlist_lines = filter(lambda l: l[0:4] == 'File', playlist_lines_dos) for line in playlist_lines: numchars=line.find("=")+1 if numchars > 0: playlist_lines[playlist_lines.index(line)] = \ line[numchars:] (curdir, playlistname) = os.path.split(plsname) os.chdir(curdir) for line in playlist_lines: if line.endswith('\r\n'): line = line.replace('\\', '/') # Fix MSDOS slashes if line.find('://') > 0: self.playlist.append(line) elif os.path.isabs(line): if os.path.exists(line): self.playlist.append(line) else: if os.path.exists(os.path.abspath(os.path.join(curdir, line))): self.playlist.append(os.path.abspath(os.path.join(curdir, line)))
def open(self, filepath=None, event=None, addmodule=True, is_preview=False): if filepath == None: filepath = filedialog.askopenfilename(defaultextension=".py") if not filepath: return tab = self.tab(filepath=filepath) self.close( self.tab_id(attr=("is_preview", True)) ) if tab: self.notebook.select(tab) else: self.add(os.path.basename(filepath), filepath, readfile(filepath), is_preview=is_preview) tab = self.tab(filepath=filepath) if addmodule: err = self.app.projecttree.addfile(filepath) if err: print ("\a") for j in err[1].split("\n"): if filepath in j: match = RE_LINE.search(j) if match: linenum = match.group(1) tab.text.linenumbers.error(linenum) # ValueError tab.showmessage(err[0].__class__.__name__, str(err[0])) if isinstance(err[0], NameError): name = RE_NAME_ERROR.search(str(err[0])).group(1) tab.text.highlight(name, "sel") elif isinstance(err[0], SyntaxError): tab.text.mark_set("insert", str(err[0].lineno) + "." + str(err[0].offset))
def bookmark_menu(self, arg=None, menuw=None): """ Bookmark list """ bookmarkfile = util.get_bookmarkfile(self.item.filename) items = [] for line in util.readfile(bookmarkfile): file = copy.copy(self.item) file.info = {} sec = int(line) hour = int(sec / 3600) min = int((sec - (hour * 3600)) / 60) sec = int(sec % 60) time = '%0.2d:%0.2d:%0.2d' % (hour, min, sec) # set a new title file.name = Unicode(_('Jump to %s') % (time)) if hasattr(file, 'tv_show'): del file.tv_show if not self.item.mplayer_options: self.item.mplayer_options = '' file.mplayer_options = str( self.item.mplayer_options) + ' -ss %s' % time items.append(file) if items: moviemenu = menu.Menu(self.item.name, items, fxd_file=self.item.skin_fxd) menuw.pushmenu(moviemenu) return
def get(path, templates={}): """ Load HTML from file and return as string templates is used for memoization """ if path not in templates: templates[path] = util.readfile("templates/" + path + ".html") return templates[path]
class PlanetaryItem(QtGui.QListWidgetItem): TEXTWIDTH = 370 ICONSIZE = 64 PADDING = 10 WIDTH = ICONSIZE + TEXTWIDTH FORMATTER_REINFORCEMENT = unicode(util.readfile("galacticwar/formatters/planetarydefenses.qthtml")) def __init__(self, uid, *args, **kwargs): QtGui.QListWidgetItem.__init__(self, *args, **kwargs) self.uid = uid self.structure = None self.price = None self.activation = None self.description = None self.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsDropEnabled | QtCore.Qt.ItemIsDragEnabled) self.setHidden(True) def data(self, role): if role == QtCore.Qt.UserRole : return self.uid return super(PlanetaryItem, self).data(role) def setEnabled(self): self.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsDropEnabled | QtCore.Qt.ItemIsDragEnabled) self.setText(self.FORMATTER_REINFORCEMENT.format(color="black", description = self.description, activation=self.activation, price=self.price)) def setDisabled(self): self.setFlags(QtCore.Qt.NoItemFlags) self.setText(self.FORMATTER_REINFORCEMENT.format(color="grey", description = self.description, activation=self.activation, price=self.price)) def update(self, message, client): '''update this item''' self.client = client self.structure = message['structure'] self.price = message['price'] self.activation = "%0.1f" % (message['activation']/60.0) self.description = message["description"] iconName = "%s_icon.png" % self.structure icon = util.iconUnit(iconName) self.setIcon(icon) self.setHidden(False) self.setText(self.FORMATTER_REINFORCEMENT.format(color="black", description = self.description, activation=self.activation, price=self.price)) def __ge__(self, other): ''' Comparison operator used for item list sorting ''' return not self.__lt__(other) def __lt__(self, other): ''' Comparison operator used for item list sorting ''' return self.price < other.price
def readvoca(): voca = [] lines = readfile( voca_path ) for line in lines: id,w,n = line.split() n = int(n) if n>1: voca.append( w ) return voca
def make_predictions(st_line, ed_line, out_file, pname, models): cat_count, item_count, item_sort, alpha, beta, item_word, bigram_item_word, time_cat_item_dict, cat_word, hot_words = models[ 0] reader = readfile(new_test_file) writer = writefile(out_file) line_idx = 0 for (user, category, raw_query, click_time) in reader: line_idx += 1 if line_idx < st_line: continue if line_idx > ed_line: break if line_idx % TEST_STEP == 0: print '%s--%d' % (pname, line_idx / TEST_STEP) time_block = get_time_feature(click_time) try: bound = cat_count[category][PREDICT_HOT_SIZE] hots = [x[0] for x in item_sort[category][0:bound]] except: writer.writerow(["0"]) continue try: bigram = get_bigram_word(raw_query, hot_words, category) words = get_words(raw_query) query_size = sum([cat_word[category][w] for w in words]) if query_size >= 100 and len(bigram) > 0: 'only queries hot enough and can generate bigram features can be predicted by boosting model' rank = [[ sku, boosting_bayes(bigram, words, category, sku, alpha, beta, item_word, bigram_item_word, item_count, cat_count, time_cat_item_dict, time_block) ] for sku in hots] elif query_size >= 100 and len(bigram) == 0: 'if hot enough but can not generate bigram features then use naive bayes with time information' rank = [[ sku, time_bayes_query_prediction(words, category, sku, alpha, beta, item_word, item_count, cat_count, time_cat_item_dict, time_block) ] for sku in hots] else: 'otherwise use plain naive bayes' rank = [[ sku, plain_bayes_query_prediction(words, category, sku, alpha, beta, item_word, item_count, cat_count) ] for sku in hots] rank = sorted(rank, key=lambda x: x[1], reverse=True) guesses = [i[0] for i in rank[0:5]] guesses = rerank_guess(guesses, user, raw_query) writer.writerow([" ".join(guesses)]) except (TypeError, KeyError): # a category we haven't seen before writer.writerow([" ".join(hots[0:5])])
class Formatters(object): FORMATTER_ANNOUNCEMENT = unicode(util.readfile("chat/formatters/announcement.qthtml")) FORMATTER_MESSAGE = unicode(util.readfile("chat/formatters/message.qthtml")) FORMATTER_MESSAGE_AVATAR = unicode(util.readfile("chat/formatters/messageAvatar.qthtml")) FORMATTER_ACTION = unicode(util.readfile("chat/formatters/action.qthtml")) FORMATTER_ACTION_AVATAR = unicode(util.readfile("chat/formatters/actionAvatar.qthtml")) FORMATTER_RAW = unicode(util.readfile("chat/formatters/raw.qthtml")) NICKLIST_COLUMNS = json.loads(util.readfile("chat/formatters/nicklist_columns.json"))
def load(self, t): '''Get the template for the given template name. Use a local cache.''' if not t in self.cache: try: self.cache[t] = util.readfile(self.map[t][1]) except KeyError, inst: raise util.Abort(_('"%s" not in template map') % inst.args[0]) except IOError, inst: raise IOError(inst.args[0], _('template file %s: %s') % (self.map[t][1], inst.args[1]))
def __init__(self, rootpath, v): self.name = v.get('name', 'JOE') self.version = v['version'] self.date = v.get('date', None) self.source = v.get('source', None) self.path = v.get('path', None) if self.path is not None: self.path = os.path.join(rootpath, self.path) self.comments = readfile(v.get('comments', None), rootpath) self.announce = readfile(v.get('announce', None), rootpath) self.changelog = readfile(v.get('changelog', None), rootpath) self.filedates = readfile(v.get('filedates', None), rootpath) self.note = v.get('note', None) self.fileset = FileSet(self.path) if self.path is not None else None self.msg = None if self.announce is not None: p = email.parser.FeedParser() p.feed(self.announce) self.msg = p.close()
def find_release_tag(commit): if commit.committer is not None and commit.committer[2] in committags: version = committags[commit.committer[2]] changelog = b'' if version in changelogs: changelog = util.readfile(changelogs[version]).encode('utf-8') return TagCommand( id=releasepfx + version.encode('utf-8'), from_=b':' + commit.mark, tagger=commit.author or commit.committer, message=changelog)
def read_ssr(self, ssrname): """ This is the (ssr) slideshow reading function. File line format:: FileName: "image file name"; Caption: "caption text"; Delay: "sec" The caption and delay are optional. @param ssrname: the slideshow filename @returns: the list of interesting lines in the slideshow """ (curdir, playlistname) = os.path.split(ssrname) os.chdir(curdir) out_lines = [] try: lines = util.readfile(ssrname) except IOError: print String(_('Cannot open file "%s"')) % list return 0 playlist_lines_dos = map(lambda l: l.strip(), lines) playlist_lines = filter(lambda l: l[0] != '#', lines) # Here's where we parse the line. See the format above. for line in playlist_lines: tmp_list = [] ss_name = re.findall('FileName: \"(.*?)\"', line, re.I) ss_caption = re.findall('Caption: \"(.*?)\"', line, re.I) ss_delay = re.findall('Delay: \"(.*?)\"', line, re.I) if ss_name != []: if ss_caption == []: ss_caption += [""] if ss_delay == []: ss_delay += [5] for p in self.get_plugins: if os.path.isabs(ss_name[0]): curdir = ss_name[0] else: curdir = os.path.abspath(os.path.join(curdir, ss_name[0])) for i in p.get(self, [curdir]): if i.type == 'image': i.name = Unicode(ss_caption[0]) i.duration = int(ss_delay[0]) self.playlist.append(i) break self.autoplay = True
def __init__(self, client): super(BaseClass, self).__init__() self.setupUi(self) self.client = client client.ladderTab.layout().addWidget(self) self.client.statsInfo.connect(self.processStatsInfos) self.client = client self.webview = QtWebKit.QWebView() self.LadderRatings.layout().addWidget(self.webview) self.loaded = False self.client.showLadder.connect(self.updating) self.webview.loadFinished.connect(self.webview.show) self.leagues.currentChanged.connect(self.leagueUpdate) self.pagesDivisions = {} self.pagesDivisionsResults = {} self.pagesAllLeagues = {} self.floodtimer = time.time() self.currentLeague = 0 self.currentDivision = 0 self.FORMATTER_LADDER = unicode( util.readfile("stats/formatters/ladder.qthtml")) self.FORMATTER_LADDER_HEADER = unicode( util.readfile("stats/formatters/ladder_header.qthtml")) self.stylesheet = util.readstylesheet("stats/formatters/style.css") self.leagues.setStyleSheet(self.stylesheet) #setup other tabs self.mapstat = mapstat.LadderMapStat(self.client, self)
def get_dataset(filename, num_epochs, shuffle): dataset = tf.data.Dataset.from_generator(lambda: readfile(filename), output_types=(tf.int32, tf.int32), output_shapes=([None], [])) if shuffle: dataset = dataset.shuffle(buffer_size=10000) dataset = dataset.repeat(num_epochs) dataset = dataset.prefetch(1) return dataset
def __init__(self, client): super(BaseClass, self).__init__() self.setupUi(self) self.client = client client.ladderTab.layout().addWidget(self) self.client.statsInfo.connect(self.processStatsInfos) self.client = client self.webview = QtWebKit.QWebView() self.globalTab.layout().addWidget(self.webview) self.loaded = False self.client.showLadder.connect(self.updating) self.webview.loadFinished.connect(self.webview.show) self.leagues.currentChanged.connect(self.leagueUpdate) self.pagesDivisions = {} self.pagesDivisionsResults = {} self.pagesAllLeagues = {} self.floodtimer = time.time() self.currentLeague = 0 self.currentDivision = 0 self.FORMATTER_LADDER = unicode(util.readfile("stats/formatters/ladder.qthtml")) self.FORMATTER_LADDER_HEADER = unicode(util.readfile("stats/formatters/ladder_header.qthtml")) self.stylesheet = util.readstylesheet("stats/formatters/style.css") self.leagues.setStyleSheet(self.stylesheet) #setup other tabs self.mapstat = mapstat.LadderMapStat(self.client, self)
class UIModWidget(FormClass, BaseClass): FORMATTER_UIMOD = unicode(util.readfile("modvault/uimod.qthtml")) def __init__(self, parent, *args, **kwargs): BaseClass.__init__(self, *args, **kwargs) self.setupUi(self) self.parent = parent self.setStyleSheet(self.parent.client.styleSheet()) self.setWindowTitle("Ui Mod Manager") self.doneButton.clicked.connect(self.doneClicked) self.modList.itemEntered.connect(self.hoverOver) allmods = modvault.getInstalledMods() self.uimods = {} for mod in allmods: if mod.ui_only: self.uimods[mod.totalname] = mod self.modList.addItem(mod.totalname) names = [mod.totalname for mod in modvault.getActiveMods(uimods=True)] for name in names: l = self.modList.findItems(name, QtCore.Qt.MatchExactly) if l: l[0].setSelected(True) if len(self.uimods) != 0: self.hoverOver(self.modList.item(0)) @QtCore.pyqtSlot() def doneClicked(self): selected_mods = [ self.uimods[str(item.text())] for item in self.modList.selectedItems() ] succes = modvault.setActiveMods(selected_mods, False) if not succes: QtGui.QMessageBox.information( None, "Error", "Could not set the active UI mods. Maybe something is wrong with your game.prefs file. Please send your log." ) self.done(1) @QtCore.pyqtSlot(QtGui.QListWidgetItem) def hoverOver(self, item): mod = self.uimods[str(item.text())] self.modInfo.setText( self.FORMATTER_UIMOD.format(name=mod.totalname, description=mod.description))
def get_unigram_model(item_sort, cat_count): reader = readfile(new_train_file) item_word = defaultdict(lambda: defaultdict(lambda: defaultdict(int))) cat_word = defaultdict(lambda: defaultdict(int)) idx = 0 for (__user, sku, category, raw_query, ___click_time) in reader: idx += 1 bound = cat_count[category][HOT_SIZE] popular = [i[0] for i in item_sort[category][0:bound]] if sku in popular: words = get_words(raw_query) for w in words: item_word[category][sku][w] += magic_num cat_word[category][w] += magic_num return item_word, cat_word
def bookmark_menu(self,arg=None, menuw=None): """ Bookmark list """ bookmarkfile = util.get_bookmarkfile(self.item.filename) items = [] for line in util.readfile(bookmarkfile): item = BookmarkItem(self.item, int(line)) items.append(item) if items: item = menu.MenuItem(name=_('Clear all Bookmarks'), action=self.__clear_bookmarks, arg=self.item) items.append(item) moviemenu = menu.Menu(self.item.name, items, fxd_file=self.item.skin_fxd) menuw.pushmenu(moviemenu) return
def get_docs(): print "[begin] get_docs ..." docs = [] lines = readfile( new_docs_path ) for i in range(0,len(lines)): doc = {'id':i,'words':{}} items = lines[i].split() if len(items)==2: word_nums = items[1].split(',') for w_n in word_nums: w,n = w_n.split(':') w = int( w ) doc['words'][w] = int(n) pass docs.append( doc ) return docs
def logmessage(ui, opts): """ get the log message according to -m and -l option """ message = opts.get('message') logfile = opts.get('logfile') if message and logfile: raise util.Abort(_('options --message and --logfile are mutually ' 'exclusive')) if not message and logfile: try: if logfile == '-': message = ui.fin.read() else: message = '\n'.join(util.readfile(logfile).splitlines()) except IOError, inst: raise util.Abort(_("can't read commit message '%s': %s") % (logfile, inst.strerror))
def read_m3u(self, plsname): """ This is the (m3u) playlist reading function. @param plsname: The playlist filename @returns: The list of interesting lines in the playlist """ try: lines = util.readfile(plsname) except IOError: print 'Cannot open file "%s"' % plsname return 0 try: playlist_lines_dos = map(lambda l: l.strip(), lines) playlist_lines = filter(lambda l: len(l) > 0, playlist_lines_dos) except IndexError: print 'Bad m3u playlist file "%s"' % plsname return 0 (curdir, playlistname) = os.path.split(plsname) #XXX this may not work if the curdir is not accessible os.chdir(curdir) for i in range(0, len(playlist_lines)): if playlist_lines[i][0] == "#": continue line = playlist_lines[i] line = line.replace('\\', '/') # Fix MSDOS slashes try: if line.find('://') > 0: if playlist_lines[i - 1].find('#EXTINF') > -1 and len( playlist_lines[i - 1].split(",")) > 1: self.playlist.append( (line, playlist_lines[i - 1].split(",")[1])) else: self.playlist.append(line) elif os.path.isabs(line): if os.path.exists(line): self.playlist.append(line) else: if os.path.exists( os.path.abspath(os.path.join(curdir, line))): self.playlist.append( os.path.abspath(os.path.join(curdir, line))) except TypeError: print 'Bad m3u playlist line in "%s":%r' % (plsname, line)
def loader(): if util.mainfrozen(): module = sys.executable else: module = __file__ base = os.path.dirname(module) for dir in ('.', '..'): docdir = os.path.join(base, dir, 'help') if os.path.isdir(docdir): break path = os.path.join(docdir, topic + ".txt") doc = gettext(util.readfile(path)) for rewriter in helphooks.get(topic, []): doc = rewriter(topic, doc) return doc
def _select_time(self, arg=None, menuw=None, which=None): bookmarkfile = util.get_bookmarkfile(self.item.filename) if not os.path.exists(bookmarkfile): self.error(_('No bookmarks are set for this video')) return menu_items = [] menu_items = [ menu.MenuItem(_('Do not set'), action=which, arg=None),] for line in util.readfile(bookmarkfile): sec = int(line) hour = int(sec/3600) min = int((sec-(hour*3600))/60) time = '%0.2d:%0.2d:%0.2d' % (hour,min,sec % 60) menu_items.append(menu.MenuItem(time, action=which, arg=sec)) encoding_menu = menu.Menu(_('Select Time'), menu_items, item_types = 'video encoding menu') encoding_menu.infoitem = self menuw.pushmenu(encoding_menu) menuw.refresh()
def loader(): if hasattr(sys, 'frozen'): module = sys.executable else: module = __file__ base = os.path.dirname(module) for dir in ('.', '..'): docdir = os.path.join(base, dir, 'help') if os.path.isdir(docdir): break path = os.path.join(docdir, topic + ".txt") doc = gettext(util.readfile(path)) for rewriter in helphooks.get(topic, []): doc = rewriter(topic, doc) return doc
def loader(): if util.mainfrozen(): module = sys.executable else: module = __file__ base = os.path.dirname(module) docdir = '/usr/share/mercurial/help' for dir in ('.', '..'): tmpdocdir = os.path.join(base, dir, 'help') if os.path.isdir(tmpdocdir): docdir = tmpdocdir break path = os.path.join(docdir, topic + ".txt") doc = gettext(util.readfile(path)) for rewriter in helphooks.get(topic, []): doc = rewriter(topic, doc) return doc
def read_m3u(self, plsname): """ This is the (m3u) playlist reading function. @param plsname: The playlist filename @returns: The list of interesting lines in the playlist """ try: lines = util.readfile(plsname) except IOError: print 'Cannot open file "%s"' % plsname return 0 try: playlist_lines_dos = map(lambda l: l.strip(), lines) playlist_lines = filter(lambda l: len(l) > 0, playlist_lines_dos) except IndexError: print 'Bad m3u playlist file "%s"' % plsname return 0 (curdir, playlistname) = os.path.split(plsname) #XXX this may not work if the curdir is not accessible os.chdir(curdir) for i in range(0,len(playlist_lines)): if playlist_lines[i][0] == "#": continue line = playlist_lines[i] line = line.replace('\\', '/') # Fix MSDOS slashes try: if line.find('://') > 0: if playlist_lines[i-1].find('#EXTINF') > -1 and len(playlist_lines[i-1].split(","))>1: self.playlist.append((line,playlist_lines[i-1].split(",")[1])) else: self.playlist.append(line) elif os.path.isabs(line): if os.path.exists(line): self.playlist.append(line) else: if os.path.exists(os.path.abspath(os.path.join(curdir, line))): self.playlist.append(os.path.abspath(os.path.join(curdir, line))) except TypeError: print 'Bad m3u playlist line in "%s":%r' % (plsname, line)
def collect_voca(): voca = {} docs = readfile( corpus_path ) for doc in docs: doc = doc.split()[-1] # input format has been changed for char in doc: if is_ch_char( char ): # only take care of Chinese char if char in voca: voca[char] += 1 # count occurrence of a char else: voca[char] = 1 pass # write voca to file outf = open( voca_path,'w') voca_li = sorted( voca.items(),key=lambda d:d[1],reverse=True ) i = 0 for w,n in voca_li: outf.write( "%d\t%s\t%d\n" % (i,w.encode("gb2312"),n) ) i += 1 print "[finished]write voca to %s" % voca_path
def make_predictions(st_line, ed_line, out_file, pname, models): cat_count, item_count, item_sort, alpha, beta, item_word, bigram_item_word, time_cat_item_dict, cat_word, hot_words = models[0] reader = readfile(new_test_file) writer = writefile(out_file) line_idx = 0 for (user, category, raw_query, click_time) in reader: line_idx += 1 if line_idx < st_line: continue if line_idx > ed_line: break if line_idx % TEST_STEP == 0: print '%s--%d' % (pname, line_idx / TEST_STEP) time_block = get_time_feature(click_time) try: bound = cat_count[category][PREDICT_HOT_SIZE] hots = [x[0] for x in item_sort[category][0:bound]] except: writer.writerow(["0"]) continue try: bigram = get_bigram_word(raw_query, hot_words, category) words = get_words(raw_query) query_size = sum([cat_word[category][w] for w in words]) if query_size >= 100 and len(bigram) > 0: 'only queries hot enough and can generate bigram features can be predicted by boosting model' rank = [[sku, boosting_bayes(bigram, words, category, sku, alpha, beta, item_word, bigram_item_word, item_count, cat_count, time_cat_item_dict, time_block)] for sku in hots] elif query_size >= 100 and len(bigram) == 0: 'if hot enough but can not generate bigram features then use naive bayes with time information' rank = [[sku, time_bayes_query_prediction(words, category, sku, alpha, beta, item_word, item_count, cat_count, time_cat_item_dict, time_block)] for sku in hots] else: 'otherwise use plain naive bayes' rank = [[sku, plain_bayes_query_prediction(words, category, sku, alpha, beta, item_word, item_count, cat_count)] for sku in hots] rank = sorted(rank, key=lambda x:x[1], reverse=True) guesses = [i[0] for i in rank[0:5]] guesses = rerank_guess(guesses, user, raw_query) writer.writerow([" ".join(guesses)]) except (TypeError, KeyError): # a category we haven't seen before writer.writerow([" ".join(hots[0:5])])
def judgement_day(base_path, theory, opts, env, case, paths, dep_paths, playground): """Judgement Day regression suite""" isa = paths[0] dep_path = dep_paths[0] os.chdir(path.join(playground, '..', base_path)) # Mirabelle requires specific cwd prepare_isabelle_repository(isa, dep_path) output = {} success_rates = {} some_success = False for atp in judgement_day_provers: log_dir = path.join(playground, 'mirabelle_log_' + atp) os.makedirs(log_dir) cmd = ('%s/bin/isabelle mirabelle -q -O %s sledgehammer[prover=%s,%s] %s.thy' % (isa, log_dir, atp, opts, theory)) os.system(cmd) output[atp] = util.readfile(path.join(log_dir, theory + '.log')) percentages = list(re.findall(r'Success rate: (\d+)%', output[atp])) if len(percentages) == 2: success_rates[atp] = { 'sledgehammer': int(percentages[0]), 'metis': int(percentages[1])} if success_rates[atp]['sledgehammer'] > 0: some_success = True else: success_rates[atp] = {} data = {'success_rates': success_rates} raw_attachments = dict((atp + "_output", output[atp]) for atp in judgement_day_provers) # FIXME: summary? return (some_success, '', data, raw_attachments, None)