for i in phrase: if i in LETTERS: if text0 != "": text0 += " " text1 += " " text2 += " " text0 += LETTERS[i][0] text1 += LETTERS[i][1] text2 += LETTERS[i][2] html = e.Pre(style="margin: 0; overflow-x: auto") html.add(text0 + e.Br() + text1 + e.Br() + text2) await msg.reply_htmlbox(html) @command_wrapper(aliases=("meme", "memes", "mims"), is_unlisted=True, allow_pm=False) async def memes(msg: Message) -> None: if msg.room is None or not msg.room.is_private: return await msg.reply(random.choice(MEMES)) with utils.get_data_file("letters.json").open(encoding="utf-8") as f: LETTERS: dict[str, list[str]] = json.load(f) with utils.get_data_file("memes.json").open(encoding="utf-8") as f: MEMES: list[str] = json.load(f)
async def csv_to_sqlite(conn: Connection) -> None: latest_veekun_commit = "" try: latest_veekun_commit = (subprocess.run( [ "git", "rev-list", "-1", "HEAD", "--", "data/veekun", "databases/veekun.py", "tasks/veekun.py", ], cwd=join(dirname(__file__), ".."), capture_output=True, check=True, ).stdout.decode().strip()) db = Database.open("veekun") with db.get_session() as session: stmt = select(v.LatestCommit.commit_id) if session.scalar(stmt) == latest_veekun_commit: return # database is already up-to-date, skip rebuild except ( subprocess.SubprocessError, # generic subprocess error FileNotFoundError, # git is not available OperationalError, # table does not exist ): pass # always rebuild on error print("Rebuilding veekun database...") with open(utils.get_config_file("veekun.sqlite"), "wb"): # truncate database pass db = Database.open("veekun") v.Base.metadata.create_all(db.engine) tables_classes = { obj.__tablename__: obj for name, obj in inspect.getmembers(v) if inspect.isclass(obj) and obj.__module__ == v.__name__ and hasattr(obj, "__tablename__") } with db.get_session() as session: if latest_veekun_commit: session.add(v.LatestCommit(commit_id=latest_veekun_commit)) for table in v.Base.metadata.sorted_tables: tname = table.key file_name = utils.get_data_file("veekun", f"{tname}.csv") if isfile(file_name): with open(file_name, encoding="utf-8") as f: csv_data = csv.DictReader(f) csv_keys = csv_data.fieldnames if csv_keys is not None: data = [dict(i) for i in csv_data] if hasattr(table.columns, "name_normalized"): for row in data: row["name_normalized"] = utils.to_user_id( utils.remove_diacritics(row["name"])) if tname == "locations": for row in data: if num := re.search(r"route-(\d+)", row["identifier"]): row["route_number"] = num[1] bulk_insert_stmt = insert(tables_classes[tname]) session.execute(bulk_insert_stmt, data) if "identifier" in csv_keys: bulk_update_stmt = (update( tables_classes[tname]).values( identifier=func.replace( tables_classes[tname].identifier, "-", "")).execution_options( synchronize_session=False)) session.execute(bulk_update_stmt)
# TODO: remove annotation stmt: Select = ( select(d.EightBall).filter_by(roomid=room.roomid).order_by(d.EightBall.answer) ) html = HTMLPageCommand( user, room, "eightballanswers", stmt, title="8ball answers", fields=[("Answer", "answer")], actions=[ ( "removeeightballanswerid", ["_roomid", "id", "_page"], False, "trash", "Delete", ) ], ) html.load_page(page) return html.doc with open(utils.get_data_file("eightball.json"), encoding="utf-8") as f: DEFAULT_ANSWERS: dict[str, list[str]] = json.load(f)