def main2(): get_inferno = lambda: download_inferno([]) get_from_tucan = lambda: download_from_tucan(course_ids) get_from_inferno = lambda: download_from_inferno(module_ids) inferno = utils.json_read_or(prefix + "pre-inferno.json", get_inferno) regulations = list(inferno.keys()) course_ids = utils.json_read_or(prefix + "pre-tucan-pflicht.json", download_tucan_vv_pflicht) course_ids += utils.json_read_or(prefix + "pre-tucan-wahl.json", download_tucan_vv_wahl) course_ids += utils.json_read_or(prefix + "pre-tucan-search.json", download_tucan_vv_search) course_ids = list(sorted(set(tuple(i) for i in course_ids))) courses = utils.json_read_or(prefix + "tucan.json", get_from_tucan) # # three alternative ways to get list of courses: # get_fbs = lambda: download_tucan_vv_catalogue( # ("01", "02", "03", "04", "05", "11", "13", "16", "18", "20",)) # get_fb20 = lambda: download_tucan_vv_catalogue(("20",)) # get_anmeldung = lambda: download_tucan_anmeldung() # courses2 = utils.json_read_or(prefix+'tucan-FBs.json', get_fbs) # courses3 = utils.json_read_or(prefix+'tucan-FB20.json', get_fb20) # courses4 = utils.json_read_or(prefix+'tucan-anmeldung.json', get_anmeldung) module_ids = { module_id for course in courses for module_id in course['modules'] } module_ids |= { key for regulation in regulations for key in inferno[regulation].keys() } modules = utils.json_read_or(prefix + "inferno.json", get_from_inferno) modules = inner_join(courses, modules) pflicht = utils.json_read(prefix + "pre-tucan-pflicht.json") wahl = utils.json_read(prefix + "pre-tucan-wahl.json") for regulation in regulations: module_part = { k: v for k, v in modules.items() if regulation in str(v['regulations']) or k[0:10] in inferno[regulation] or (regulation.startswith("B.Sc.") and (any(title.startswith(k) for title, url in pflicht) or any( item["title"] == "Titel" and "f\u00fcr Inf" in item["details"] for item in v["details"]))) } short_regulation = "".join(c for c in regulation if c.isalnum()) utils.json_write(prefix + '-' + short_regulation + '.json', module_part) if True: # test support for other FBs, here FB 13: module_part = {k: v for k, v in modules.items() if k.startswith("13-")} short_regulation = "".join(c for c in regulation if c.isalnum()) utils.json_write(prefix + '-BauUmwelt.json', module_part) print()
def search_templates(self): """Display the names of all the user's Pandoc Templates. """ tmps = utils.json_read(self.wf.datafile('user_templates.json')) if not tmps: # Show default Templates if no user ones created tmps = utils.json_read( self.wf.workflowfile('pandoc_templates.json')) results = self._filter(tmps, lambda x: x['name']) # Prepare Alfred feedback for item in results: sub = "Uses default options? " + str(item['use_defaults']) self.wf.add_item(item['name'], sub, arg=item['name'], valid=True)
def search_defaults(self, data): """Search through options to set as default. """ results = self._filter(data, lambda x: ' '.join([x['full'], x['type']])) default_opts = utils.json_read(self.wf.datafile('user_defaults.json')) # Prepare Alfred feedback for item in results: icon = 'icons/pandoc.png' # Ignore user chosen default_opts options if item['flag'] in ('to', 'from'): continue elif (default_opts is not None and item['flag'] in default_opts ): icon = 'icons/pandoc_on.png' # Prepare item subtitle and icon subtitle = 'Type: {}'.format(item['type']) # Add item to Alfred results self.wf.add_item(item['flag'], subtitle, arg=item['flag'], valid=True, icon=icon)
def search_defaults(self, data): """Search through options to set as default. """ results = self._filter(data, lambda x: ' '.join([x['full'], x['type']])) default_opts = utils.json_read(self.wf.datafile('user_defaults.json')) # Prepare Alfred feedback for item in results: icon = 'icons/pandoc.png' # Ignore user chosen default_opts options if item['flag'] in ('to', 'from'): continue elif (default_opts is not None and item['flag'] in default_opts): icon = 'icons/pandoc_on.png' # Prepare item subtitle and icon subtitle = 'Type: {}'.format(item['type']) # Add item to Alfred results self.wf.add_item(item['flag'], subtitle, arg=item['flag'], valid=True, icon=icon)
def main(wf_obj): """Accept Alfred's args and pipe to proper Class""" key = "3KFT2HQ9" data = json_read(wf_obj.datafile("zotquery.json")) cited = _scan_cites(data, key) cite_id = parse_cite(data, cited) print cited print cite_id
def search_templates(self): """Display the names of all the user's Pandoc Templates. """ tmps = utils.json_read(self.wf.datafile('user_templates.json')) if not tmps: # Show default Templates if no user ones created tmps = utils.json_read(self.wf.workflowfile('pandoc_templates.json')) results = self._filter(tmps, lambda x: x['name']) # Prepare Alfred feedback for item in results: sub = "Uses default options? " + str(item['use_defaults']) self.wf.add_item(item['name'], sub, arg=item['name'], valid=True)
def run_template_cmd(self, template): """Run user-selected template command. """ tmps = utils.json_read(self.wf.datafile('user_templates.json')) if tmps == None: tmps = utils.json_read(self.wf.workflowfile('pandoc_templates.json')) for temp in tmps: if temp['name'] == template.strip(): args = temp['options'] if temp['use_defaults'] == True: defaults = [opt['full'] for opt in self.pandoc.options if opt['status'] == True] args.extend(defaults) args = self._format_template(args) return self.run_pandoc(args)
def get_server_settings(self): try: data = utils.json_read(self._settings_filepath) except FileNotFoundError: data = copy.deepcopy(self.DEFAULT_SETTINGS) utils.json_write(self._settings_filepath, data=data) # Update server name. if data["Server Name"] != self._server.name: data["Server Name"] = self._server.name utils.json_write(self._settings_filepath, data=data) # TODO: Add additional data verification with jsonschema return data
def MODEL_QEURY(model): request_data = json.loads(request.data.decode('utf-8')) train_id = request_data['train_id'] epochs = request_data['epochs'] file_name = ROOT_PATH + 'matchzoo_temp_files/data/' + train_id + '.json' format_stringio_to_json(train_id, file_name, epochs) # 由logger得到目标的json文件 try: ret_data = json_read(file_name) return jsonify(ret_data) except: ret_data = {} return jsonify(ret_data)
def _check_option(self, item): """Determine if item should be passed on or not. """ # Get all options user wants ignored ignored_opts = utils.json_read(self.wf.datafile('user_ignore.json')) # Ignore `input` and `output` options # or ignore any user selected ignore options if (item['flag'] in ('to', 'from') or (ignored_opts is not None and item['flag'] in ignored_opts)): return False else: return True
def run_template_cmd(self, template): """Run user-selected template command. """ tmps = utils.json_read(self.wf.datafile('user_templates.json')) if tmps == None: tmps = utils.json_read( self.wf.workflowfile('pandoc_templates.json')) for temp in tmps: if temp['name'] == template.strip(): args = temp['options'] if temp['use_defaults'] == True: defaults = [ opt['full'] for opt in self.pandoc.options if opt['status'] == True ] args.extend(defaults) args = self._format_template(args) return self.run_pandoc(args)
def add_default(self, value): """Store list of options to be defaults. """ arg_out = '' # If exit item selected if self.arg == '[done]': arg_out = '[pause]' else: defaults = utils.json_read(self.wf.datafile('user_defaults.json')) if defaults: defaults.extend([value]) clean = list(set(defaults)) utils.json_write(clean, self.wf.datafile('user_defaults.json')) else: utils.json_write([value], self.wf.datafile('user_defaults.json')) return arg_out
def _check_option(self, item): """Determine if item should be passed on or not. """ # Get all options user wants ignored ignored_opts = utils.json_read(self.wf.datafile('user_ignore.json')) # Ignore `input` and `output` options # or ignore any user selected ignore options if (item['flag'] in ('to', 'from') or (ignored_opts is not None and item['flag'] in ignored_opts ) ): return False else: return True
def add_ignore(self, value): """Store list of options to ignore. """ arg_out = '' # If exit item selected if self.arg == '[done]': arg_out = '[pause]' else: ignored = utils.json_read(self.wf.datafile('user_ignore.json')) if ignored: ignored.extend([value]) clean = list(set(ignored)) utils.json_write(clean, self.wf.datafile('user_ignore.json')) else: utils.json_write([value], self.wf.datafile('user_ignore.json')) return arg_out
def search_options(self, data): """Search `options`. """ results = self._filter(data, lambda x: ' '.join([x['full'], x['type']])) # Get all option keys already assigned runner_opts = [] if self.runner is not None: runner_opts = [ k for k in self.runner.keys() if k not in RUNNER_KEYS ] # Prepare Alfred feedback for item in results: if self._check_option(item) == False: continue # Check for user defaults # and change status accordingly if os.path.exists(self.wf.datafile('user_defaults.json')): item['status'] = False defs = utils.json_read(self.wf.datafile('user_defaults.json')) if item['flag'] in defs: item['status'] = True # Catch any pre-set options if item['flag'] in runner_opts: # get item's pre-set status value item['status'] = next((val for key, val in self.runner.items() if key == item['flag']), None) # Prepare item subtitle and icon subtitle = 'Type: {}'.format(item['type']) icon = 'icons/pandoc.png' if item['status'] != False: icon = 'icons/pandoc_on.png' # Add item to Alfred results self.wf.add_item(item['flag'], subtitle, arg=item['flag'], valid=True, icon=icon)
def _store_template_info(self, key, value): """Store dictionary info for new user template. """ tmps = utils.json_read(self.wf.datafile('user_templates.json')) if tmps: if key == 'name': d = [{key: value}] tmps.extend(d) else: for temp in tmps: if len(temp.keys()) != 3: temp.update({key: value}) break new = tmps else: new = [{key: value}] utils.json_write(new, self.wf.datafile('user_templates.json')) return True
def search_options(self, data): """Search `options`. """ results = self._filter(data, lambda x: ' '.join([x['full'], x['type']])) # Get all option keys already assigned runner_opts = [] if self.runner is not None: runner_opts = [k for k in self.runner.keys() if k not in RUNNER_KEYS] # Prepare Alfred feedback for item in results: if self._check_option(item) == False: continue # Check for user defaults # and change status accordingly if os.path.exists(self.wf.datafile('user_defaults.json')): item['status'] = False defs = utils.json_read(self.wf.datafile('user_defaults.json')) if item['flag'] in defs: item['status'] = True # Catch any pre-set options if item['flag'] in runner_opts: # get item's pre-set status value item['status'] = next((val for key, val in self.runner.items() if key == item['flag']), None) # Prepare item subtitle and icon subtitle = 'Type: {}'.format(item['type']) icon = 'icons/pandoc.png' if item['status'] != False: icon = 'icons/pandoc_on.png' # Add item to Alfred results self.wf.add_item(item['flag'], subtitle, arg=item['flag'], valid=True, icon=icon)
def read_messages(self, server_id, ch_id): ch_dir = self._get_ch_dir(server_id, ch_id) file_number = 0 while True: file_number += 1 file_contents = None try: file_contents = utils.json_read(ch_dir + str(file_number) + ".json") except FileNotFoundError: break for msg_dict in file_contents: msg_dict["t"] = dateutil.parser.parse(msg_dict["t"]) yield msg_dict # After having read all files, output buffered messages. try: for msg_dict in self._data[server_id][ch_id]: yield msg_dict except KeyError: pass
import vk import os import sys from functools import lru_cache WAIT_TIME = u.WAIT_TIME # Intialize constants # Initialize vk api if not os.path.exists('params.json'): u.exit_with_error( "Файл params.json не найден; Создайте файл и повторите попытку") sys.exit(1) params = u.json_read('params.json') session = vk.AuthSession(**params) api = vk.API(session, v='4.104') _fwd_id = 0 # Initialize functions # Returns FN LN by user object def gen_name(user): return user['first_name'] + ' ' + user['last_name'] def gen_dir_name(user): return u.transliterate(user['first_name']) + '_' + u.transliterate( user['last_name']) + '_' + str(user['uid'])
async def _fill_buffers(self): await self._client.set_temp_game_status("filling cache buffers.") for server in self._client.servers: ch_dict = None try: ch_dict = self._data[server.id] except KeyError: ch_dict = {} self._data[server.id] = ch_dict for ch in server.channels: if ch.type is discord.ChannelType.voice: continue print("MessageCache caching messages in #" + ch.name) # TODO: Rename these variable names. # TODO: Turn this into a function? (see duplicated code...) ch_dir = self._get_ch_dir(server.id, ch.id) ch_json_filepath = ch_dir + self._CH_JSON_FILENAME ch_json_data = None try: ch_json_data = utils.json_read(ch_json_filepath) except FileNotFoundError: ch_json_data = {} # TODO: Turn this into a function? (see duplicated code...) ch_stored_timestamp = None try: ch_stored_timestamp = dateutil.parser.parse(ch_json_data["last message timestamp"]) except KeyError: ch_stored_timestamp = datetime.datetime(datetime.MINYEAR, 1, 1) ch_json_data["last message timestamp"] = ch_stored_timestamp.isoformat() utils.json_write(ch_json_filepath, data=ch_json_data) # This will now fill a buffer all messages of a channel. # TODO: Consider filling a file, then reading off the file. msg_buffer = [] try: async for msg in self._client.logs_from(ch, limit=ARBITRARILY_LARGE_NUMBER): if msg.timestamp <= ch_stored_timestamp: break # Insert in front since we're reading messages starting from most recent. msg_buffer.insert(0, self._message_dict(msg)) except discord.errors.Forbidden: print("MessageCache unable to read #" + ch.name) continue ch_dict[ch.id] = msg_buffer # Move every 5000 messages to disk. while len(ch_dict[ch.id]) >= 5000: self._move_to_disk(server.id, ch.id, messages=5000) # Move every 1000 messages to disk. while len(ch_dict[ch.id]) >= 1000: self._move_to_disk(server.id, ch.id, messages=1000) # Now move every 200 messages to disk. while len(ch_dict[ch.id]) >= 200: self._move_to_disk(server.id, ch.id, messages=200) await self._client.remove_temp_game_status() return
def _move_to_disk(self, server_id, ch_id, messages=None): print("MessageCache moving messages to disk.") ch_dir = self._get_ch_dir(server_id, ch_id) ch_json_filepath = ch_dir + self._CH_JSON_FILENAME # TODO: Turn this into a function? (see duplicated code...) ch_json_data = None try: ch_json_data = utils.json_read(ch_json_filepath) except FileNotFoundError: ch_json_data = {} # TODO: Turn this into a function? (see duplicated code...) ch_stored_timestamp = None try: ch_stored_timestamp = dateutil.parser.parse(ch_json_data["last message timestamp"]) except KeyError: ch_stored_timestamp = datetime.datetime(datetime.MINYEAR, 1, 1) ch_json_data["last message timestamp"] = ch_stored_timestamp.isoformat() utils.json_write(ch_json_filepath, data=ch_json_data) ch_dict = self._data[server_id] # Split off the messages to be stored. if messages is None: to_store = ch_dict[ch_id] ch_dict[ch_id] = [] else: to_store = ch_dict[ch_id][:messages] ch_dict[ch_id] = ch_dict[ch_id][messages:] latest_message = to_store[-1:][0] latest_timestamp_isoformat = latest_message["t"].isoformat() ch_json_data["last message timestamp"] = latest_timestamp_isoformat for msg_dict in to_store: # TODO: I still don't know what's causing this to be a string... # This temporary fix will have to do for now. if isinstance(msg_dict["t"], str): print("WARNING: msg_dict[t] is already a string. contents: " + msg_dict["t"]) else: msg_dict["t"] = msg_dict["t"].isoformat() # Make serializable # Check the highest numbered json file. highest_json_file_number = 0 for file_name in os.listdir(ch_dir): if file_name.endswith(".json"): file_number = None try: file_number = int(file_name[:-5]) except ValueError: continue if file_number > highest_json_file_number: highest_json_file_number = file_number # Store data in the next available json file number file_name = str(highest_json_file_number + 1) + ".json" utils.json_write(ch_dir + file_name, data=to_store) # Save latest message timestamp. utils.json_write(ch_json_filepath, data=ch_json_data) return
def load(f): return json_read(f) # ----------- INSIDE ALGORITHM ------------- Semiring = namedtuple('Semiring', ['inject','project','times','plus','unit','zero'])
def main(): now = datetime.datetime.today() # datetime.datetime(2018, 9, 5) today = now.strftime("%Y-%m") today2 = now.strftime("%d. %b %Y") today4 = utils.half_semester(now) prefix = "cache/" # + utils.half_semester_filename(now) + "-" folder = "gh-pages/" pflicht = utils.json_read(prefix + "pre-tucan-pflicht.json") fields = utils.json_read(prefix + "pre-inferno.json") #nebenfach = utils.json_read("nebenfach.json") # back = utils.groupby(((course, major +" · "+ category) # for major,v in nebenfach.items() # for category,v in v.items() # for module in v # for course in module), key=lambda x:x[0]) # back = {k:["Y Nebenfach · " + " &<br> ".join(i[1] for i in v),""] for k,v in back} # fields = [back] + list(fields.values()) # print(json.dumps(fields, indent=2)) with open("page.html") as f: page_tmpl = f.read() with open("index.html") as f: index_tmpl = f.read() with open("dist/main.js") as f: code_tmpl = f.read() with open("style.css") as f: style_tmpl = f.read() filename = lambda reg: "".join(c for c in reg if c.isalnum()) regulations = [(k, k.replace("B.Sc.", "Bachelor").replace( "M.Sc.", "Master").replace(" (2015)", ""), filename(k) + ".html") for k in fields.keys() if k.endswith(" (2015)")] + [ # other FBs? ("BauUmwelt", "FB 13 Bau, Umwelt", "BauUmwelt.html") ] with open(folder + "/index.html", "w") as f: f.write( pystache.render( index_tmpl, { "list": [{ 'href': href, 'title': today4 + " " + display_regulation } for regulation, display_regulation, href in regulations if display_regulation.endswith(" Informatik") if not display_regulation.startswith("FB ")], "experimentallist": [{ 'href': href, 'title': today4 + " " + display_regulation } for regulation, display_regulation, href in regulations if not display_regulation.endswith(" Informatik") if not display_regulation.startswith("FB ")], "speciallist": [{ 'href': href, 'title': today4 + " " + display_regulation } for regulation, display_regulation, href in regulations if display_regulation.startswith("FB ")], })) with open(folder + "/main.js", "w") as f: f.write(code_tmpl) with open(folder + "/style.css", "w") as f: f.write(style_tmpl) for regulation, display_regulation, href in regulations: print(prefix + "-" + filename(regulation) + ".json") dates = utils.json_read(prefix + "-" + filename(regulation) + ".json") if not dates: continue # if file exists data = [ clean(module_id, module, fields, regulation) for module_id, module in dates.items() ] data.sort(key=lambda x: (x['category'], x['id'])) # -int(x['credits']) with open("style.css") as f: css_style = f.read() js_data = json.dumps(data, indent=" ") with open(folder + "/" + href, "w") as f: f.write( pystache.render( page_tmpl, { "today": today, "today2": today2, "today4": today4, "regulation_short": display_regulation, "js_data": js_data, "css_style": css_style, }))
def main() -> None: prefix = "cache/" now = datetime.datetime.today() time_ym = now.strftime("%Y-%m") time_dmy = now.strftime("%d. %b %Y") semester = utils.json_read(prefix + "current_semester.json", None) semester = semester[0] +" "+ semester[1] folder = "gh-pages/" pflicht: List[Tuple[str, str]] = [] fields: Dict[str, Dict[str, Tuple[str, str]]] = {} pflicht = utils.json_read(prefix + "pre-tucan-pflicht.json", pflicht) fields = utils.json_read(prefix + "pre-inferno.json", fields) #nebenfach = utils.json_read("nebenfach.json") # back = utils.groupby(((course, major +" · "+ category) # for major,v in nebenfach.items() # for category,v in v.items() # for module in v # for course in module), key=lambda x:x[0]) # back = {k:["Y Nebenfach · " + " &<br> ".join(i[1] for i in v),""] for k,v in back} # fields = [back] + list(fields.values()) # print(json.dumps(fields, indent=2)) # dist/main.js with npm; code.orig.js without npm if os.path.exists("dist/main.js"): CODE_FILE = "dist/main.js" else: CODE_FILE = "code.orig.js" page_tmpl = utils.file_read("page.html") index_tmpl = utils.file_read("index.html") code_tmpl = utils.file_read(CODE_FILE) style_tmpl = utils.file_read("style.css") def filename(reg: str) -> str: return "".join(c for c in reg if c.isalnum()) regulations = [ (k, k.replace("B.Sc.", "Bachelor") .replace("M.Sc.", "Master") .replace(" (2015)", ""), filename(k) + ".html") for k in fields.keys() if k.endswith(" (2015)") ] + [ # other FBs? ("BauUmwelt", "FB 13 Bau, Umwelt", "BauUmwelt.html") ] listy = [ {'href': href, 'title': semester +" "+ display_regulation} for regulation, display_regulation, href in regulations if display_regulation.endswith(" Informatik") if not display_regulation.startswith("FB ") ] experimentallist = [ {'href': href, 'title': semester +" "+ display_regulation} for regulation, display_regulation, href in regulations if not display_regulation.endswith(" Informatik") if not display_regulation.startswith("FB ") ] speciallist = [ {'href': href, 'title': semester +" "+ display_regulation} for regulation, display_regulation, href in regulations if display_regulation.startswith("FB ") ] index_data = { "list": listy, "experimentallist": experimentallist, "speciallist": speciallist, } utils.file_write(folder + "/index.html", stache(index_tmpl, index_data)) utils.file_write(folder + "/main.js", code_tmpl) utils.file_write(folder + "/style.css", style_tmpl) print(regulations) for regulation, display_regulation, href in regulations: print(prefix + "-" + filename(regulation) + ".json") modules: Dict[str, Module] = {} modules = utils.json_read(prefix + "-" + filename(regulation) + ".json", modules) if modules == []: continue # if file exists data = [clean(module_id, module, fields, regulation) for module_id, module in modules.items()] data.sort(key=lambda x: (x['category'], x['id'])) # -int(x['credits']) js_data = json.dumps(data, indent=1) page_data = { "today": time_dmy, "semester": semester, "regulation": display_regulation, "js_data": js_data, "content": generate_page(data) } utils.file_write(folder + "/" + href, stache(page_tmpl, page_data)) print("finished")
def get_shared_settings(self): try: return utils.json_read(self._shared_settings_filepath) except FileNotFoundError: return None