async def iterm_focus_monitor(connection): app = await iterm2.async_get_app(connection) # input_lang = 'U.S.' async with iterm2.FocusMonitor(connection) as monitor: print("Focus monitoring online") while True: update = await monitor.async_get_next_update() window = app.current_terminal_window ## # zp("ecdbg {update.__dict__!s}") # if update.window_changed: # zp("ecdbg {update.window_changed.__dict__!s}") # zp("ecdbg {update.window_changed.event.__dict__!s}") # zp("ecdbg {window.__dict__!s}") ## if (update.active_session_changed or update.selected_tab_changed or update.window_changed) and window and window.current_tab: # embed(using='asyncio') if update.window_changed: focus=update.window_changed.event.name if focus == 'TERMINAL_WINDOW_BECAME_KEY': # input_lang = z('input-lang-get-darwin').outrs # z('input-lang-set en') z('input-lang-push en') else: # z('input-lang-set {input_lang}') z('input-lang-pop') zp('reval-ec redis-cli set iterm_focus {focus} 2>&1') zp('reval-ec redis-cli set iterm_active_session {window.current_tab.active_session_id} 2>&1')
async def callback(event: events.callbackquery.CallbackQuery.Event): # We can edit the event to edit the clicked message. chat = await event.get_chat() pl = str(event.data, "utf-8") # embed2() msg = await borg.get_messages(chat, ids=event.message_id) msg_id = event.message_id print(f"pl: {pl}") # await event.reply(f'pl: {pl}\n\n{event.__dict__}\n\n{event.query.__dict__}') m_zsh = p_zsh.match(pl) if pl.startswith("zsh_"): key = create_key(pl) results = list( z("""jfromkey {key}""").iter0() ) # TODO inject data from event, e.g., the sender's name out = results[0] # contains both stdout and stderr jaction = results[1] if jaction == "edit": # await event.edit(out) # this loses the buttons await msg.edit(out) elif jaction == "toast": await event.answer(message=out) else: await discreet_send(event, out, msg) elif m_zsh: res: CmdResult = z(m_zsh.group(1)) await discreet_send(event, res.outerr, msg) else: await event.reply(pl) await event.answer() # does nothing if we answered before
async def discreet_send(client, receiver, message, file=None, force_document=False, parse_mode=None, reply_to=None, link_preview=False): message = message.strip() last_msg = reply_to if file and len(file) == 1: file = file[0] if len(message) == 0: if file: last_msg = await client.send_file(receiver, file, reply_to=(last_msg), allow_cache=False) return last_msg else: length = len(message) if length <= 12000: s = 0 e = 4000 while (length > s): last_msg = await client.send_message( receiver, message[s:e], file=file, force_document=force_document, parse_mode=parse_mode, link_preview=link_preview, reply_to=(last_msg)) s = e e = s + 4000 else: from brish import z f = z(''' local f="$(gmktemp --suffix .txt)" ec {message} > "$f" ec "$f" ''').outrs last_msg = await client.send_file( receiver, f, reply_to=last_msg, allow_cache=False, caption= 'This message is too long, so it has been sent as a text file.' ) z('command rm {f}') if file: last_msg = await client.send_file(receiver, file, reply_to=(last_msg), allow_cache=False) return last_msg
def html2org(html): # tmp = "tmp.html" tmp = z("mktemp").outrs res = z("cat > {tmp}", cmd_stdin=html) assert res res = z("html2org {tmp}") assert res z("command rm {tmp}") return res.outrs
async def _(event): # Doesn't trigger with whitespace. So we use base64. Note that url-encoding is useless. # Even with base64 there is a max len limit!! # TODO So we should implement a redis-based ID-to-cmd. We can use the first char to set interpretation mode, since just the magnet hash is less than max length. This way we will have the best of both worlds. # embed2() # event.edit(f".a {event.pattern_match.group(1)}") cmd = event.pattern_match.group(1) cmdd = z("<<<{cmd} base64 -D").outrs print(f"start received: {cmdd}") await aget(event, command=cmdd)
async def discreet_send(event, message, reply_to=None, quiet=False, link_preview=False): message = message.strip() if quiet or len(message) == 0: return reply_to else: length = len(message) last_msg = reply_to if length <= 12000: s = 0 e = 4000 while length > s: last_msg = await event.respond( message[s:e], link_preview=link_preview, reply_to=(reply_to if s == 0 else last_msg), ) s = e e = s + 4000 else: chat = await event.get_chat() f = z(""" local f="$(gmktemp --suffix .txt)" ec {message} > "$f" ec "$f" """).outrs async with borg.action(chat, "document") as action: last_msg = await borg.send_file( chat, f, reply_to=reply_to, allow_cache=False, caption= "This message is too long, so it has been sent as a text file.", ) z("command rm {f}") return last_msg
def search_torrentz_eu(search_query): r = [] url = "https://torrentz2.eu/search?safe=1&f=" + search_query + "" # scraper = cfscrape.create_scraper() # returns a CloudflareScraper instance # raw_html = scraper.get(url).content raw_html = z("curlfull.js {url}").out # print(f"tz2 html: {raw_html}") soup = BeautifulSoup(raw_html, "html.parser") results = soup.find_all("div", {"class": "results"}) # print(results) if len(results) > 0: results = results[0] items = results.find_all("dl") for item in items: # print(item) """The content scrapped on 23.06.2018 15:40:35 """ dt = item.find_all("dt")[0] dd = item.find_all("dd")[0] # try: link_and_text = dt.find_all("a")[0] link = link_and_text.get("href")[1:] title = link_and_text.get_text() span_elements = dd.find_all("span") date = span_elements[1].get_text() size = span_elements[2].get_text() seeds = span_elements[3].get_text() peers = span_elements[4].get_text() # r.append({ "title": title, "hash": link, "date": date, "size": size, "seeds": seeds, "peers": peers, }) except: pass return r
async def _(event): if event.fwd_from: return start = datetime.now() # await event.edit("Processing ...") input_type = event.pattern_match.group(1) or "torrentz2.eu" # or "idop.se" input_str = event.pattern_match.group(2) logger.info(f"{input_type}: {input_str}") # pylint:disable=E0602 search_results = [] # ix() ; embed(using='asyncio') if input_type == "torrentz2.eu": search_results = search_torrentz_eu(input_str) elif input_type == "idop.se": search_results = search_idop_se(input_str) # logger.info(search_results) # pylint:disable=E0602 output_str = "" i = 0 for result in search_results: if i > 10: break magnet = z('hash2magnet {result["hash"]}').outrs message_text = ("👉 <a href=https://t.me/TorrentSearchRoBot?start=" + result["hash"] + ">" + result["title"] + ": " + "</a>" + " \r\n") # message_text += "<a href=https://t.me/spiritwellbot?start=" + z('base64', cmd_stdin=zs('magnet2torrent {magnet}')).outrs + ">" +'Get hash torrent!' + ": " + "</a>" + " \r\n" # Telegram's HTML doesn't support magnet hrefs. message_text += " Hash Magnet: " + magnet + "\r\n" message_text += " Size: " + result["size"] + "\r\n" # message_text += " Uploaded " + result["date"] + "\r\n" message_text += (" Seeds: " + result["seeds"] + "\r\n Peers: " + result["peers"] + " \r\n") message_text += "\r\n" output_str += message_text i = i + 1 end = datetime.now() ms = (end - start).seconds await event.reply( f"Scrapped {input_type} for {input_str} in {ms} seconds. Obtained Results: \n {output_str}", link_preview=False, parse_mode="html", )
def meta_get_props(c): meta = ":PROPERTIES:" if c.author: # can be None when they are deleted meta += f"\n:Author: {c.author.name}" if hasattr(c, "score"): meta += f"\n:Score: {c.score}" if hasattr(c, "created_utc") and c.created_utc: meta += f"\n:DateUnix: {c.created_utc}" res = z('gdate -d "@"{c.created_utc} +"%Y-%b-%d"') if res: meta += f"\n:DateShort: {res.outrs}" if getattr(c, "link_flair_text", None): meta += f"\n:FlairText: {c.link_flair_text}" meta += "\n:END:\n" return meta
def chooseAct(fuzzyChoice: str): ## # https://github.com/seatgeek/fuzzywuzzy/issues/251 : the token versions are somewhat broken # https://github.com/maxbachmann/RapidFuzz/issues/76 # res = process.extractOne(fuzzyChoice, fuzzy_choices, scorer=fuzz.WRatio, processor=(lambda x: x.replace('_',' ')))[0] # fuzz.partial_ratio ## # res = subs_fuzzy.get(fuzzyChoice) # if res: # res = res[0][1] ## res = z("fzf --filter {fuzzyChoice} | ghead -n1", cmd_stdin=fuzzy_choices_str).outrs if not res: res = subs_fuzzy.get(fuzzyChoice) if res: res = res[0][1] ## if res: if res in subs: res = subs[res] return res return fuzzyChoice
from brish import z, zp, Brish NI = True name = "A$ron" z("echo Hello {name}") t1 = "" def test1(): assert t1 == "Hello A$ron" return True NI or test1() alist = ["# Fruits", "1. Orange", "2. Rambutan", "3. Strawberry"] z("for i in {alist} ; do echo $i ; done") t2 = "" def test2(): assert t2 == """# Fruits 1. Orange 2. Rambutan 3. Strawberry""" NI or test2()
def get_results(command: str, json_mode: bool = True): cwd = dl_base + "Inline " + str(uuid4()) + "/" Path(cwd).mkdir(parents=True, exist_ok=True) res = z( """ if cd {cwd} ; then {command:e} else echo Inline Query: cd failed >&2 fi """, fork=True, ) # @design We might want to add a timeout here. We also need a file cacher ... Easier yet, just cache the results array and reset it using our 'x' command out = res.outerr if WHITESPACE.match(out): out = f"The process exited {res.retcode}." out_j = None results = [] if json_mode: try: out_j = json.loads(res.out) except: pass if out_j and not isinstance(out_j, str) and isinstance(out_j, Iterable): print(res.err) if True: for item in out_j: if isinstance(item, dict): tlg_title = item.get("tlg_title", "") tlg_preview = bool(item.get("tlg_preview", "y")) tlg_video = item.get("tlg_video", "") # Mime type of the content of video url, “text/html” or “video/mp4”. tlg_video_mime = item.get("tlg_video_mime", "video/mp4") tlg_img = item.get("tlg_img", "") tlg_img_thumb = item.get("tlg_img_thumb", "") or tlg_img tlg_content = item.get("tlg_content", item.get("caption", "")) tlg_parsemode = item.get("tlg_parsemode", "").lower() pm = DEFAULT_NONE if tlg_parsemode == "md2": pm = ParseMode.MARKDOWN_V2 elif tlg_parsemode == "md": pm = ParseMode.MARKDOWN elif tlg_parsemode == "html": pm = ParseMode.HTML print(f"Parse mode: {pm}, preview: {tlg_preview}") if tlg_img: # There is a bug that makes, e.g., `@spiritwellbot kitsu-getall moon 2 fin` show only two returned results, even though we return 10 results. Idk what's the cause. print( f"tlg_img found: {tlg_title}: {tlg_img} , {tlg_img_thumb}" ) results.append( InlineQueryResultPhoto( id=uuid4(), photo_url=tlg_img, thumb_url=tlg_img_thumb, title=f"{tlg_title}", caption=tlg_content[:MEDIA_MAX_LENGTH], parse_mode=pm, )) elif tlg_video: # test @spiritwellbot ec '[{"tlg_title":"f","tlg_video":"https://files.lilf.ir/tmp/Tokyo%20Ghoul%20AMV%20-%20Run-rVed44_uz8s.mp4"}]' fin print(f"tlg_video found: {tlg_title}: {tlg_video}") results.append( InlineQueryResultVideo( id=uuid4(), video_url=tlg_video, mime_type=tlg_video_mime, # To bypass telegram.error.BadRequest: Video_thumb_url_empty thumb_url= (tlg_img_thumb or "https://media.kitsu.io/anime/cover_images/3936/original.jpg?1597696323" ), title=f"{tlg_title}", caption=tlg_content[:MEDIA_MAX_LENGTH], parse_mode=pm, )) elif tlg_title: print(f"tlg_title found: {tlg_title}") results.append( InlineQueryResultArticle( id=uuid4(), title=tlg_title, thumb_url=tlg_img_thumb, input_message_content=InputTextMessageContent( tlg_content[:MAX_LENGTH], disable_web_page_preview=(not tlg_preview), parse_mode=pm, ), )) # @design We can add an else clause and go to the normal (json-less) mode below else: results = [ InlineQueryResultArticle( id=uuid4(), # Telegram truncates itself, so this is redundant. title=out[:150], input_message_content=InputTextMessageContent( out[:MAX_LENGTH], disable_web_page_preview=False), ) ] files = list(Path(cwd).glob("*")) files.sort() for f in files: if not f.is_dir(): file_add = f.absolute() base_name = str(os.path.basename(file_add)) ext = f.suffix file = open(file_add, "rb") uploaded_file = updater.bot.send_document(tmp_chat, file) file.close() if uploaded_file.document: # print(f"File ID: {uploaded_file.document.file_id}") results.append( InlineQueryResultCachedDocument( id=uuid4(), title=base_name, document_file_id=uploaded_file.document.file_id, )) else: print("BUG?: Uploaded file had no document!") z("command rm -r {cwd}") print(f"len(results): {len(results)}") return results
level=logging.INFO) logger = logging.getLogger(__name__) ## MAX_LENGTH = 4050 # https://stackoverflow.com/questions/46011661/how-to-send-large-size-of-the-caption-on-telegram-bot-using-c MEDIA_MAX_LENGTH = 1000 PAF = re.compile(r"(?im)^(?:\.a(n?)\s+)?((?:.|\n)*)\s+fin$") PDI = re.compile(r"(?im)^\.di\s+(\S+)(?:\s+(\S*))?\s+fin$") PC_KITSU = re.compile(r"(?im)^\.ki\s+(.+)$") PC_GOO = re.compile(r"(?im)^\.(g|d|as)\s+(.+)$") WHITESPACE = re.compile(r"^\s*$") dl_base = os.getcwd() + "/dls/" ## # @todoc A throwaway group/channel for storing files. (I use TMPC.) tmp_chat = int(z('ecn "${{borg_tmpc:--1001215308649}}"').outrs) # -1001496131468 (old TMPC) ## lock_inline = RLock() ## def start(update, context): """Send a message when the command /start is issued.""" update.message.reply_text("Hi!") def help_command(update, context): """Send a message when the command /help is issued.""" update.message.reply_text("Help!")
async def give_contacts(cwd, **kwargs): c = z("jq > {cwd}/contacts.json", cmd_stdin=result.to_json())
def main(): ## inputs brish_mode = False if len(sys.argv) >= 2: # otherwise reads stdin query = sys.argv[1] if query in ["-h", "--help"]: print( "Usage: ddg2json < HTML_of_the_query_page_of_DuckDuckGo\nOutput: JSON of the results in that HTML" ) exit(0) brish_mode = True ## # Enable logging logging.basicConfig( format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO, ) logger = logging.getLogger(__name__) ## html = None if brish_mode: if not os.environ.get("NIGHTDIR"): logger.error( "You are trying to use the brish_mode, which needs my personal scripts loaded into your system (which you do not have currently). Aborting." ) exit(1) from brish import z # import urllib.parse # query_encoded = urllib.parse.quote(query) zres = z("ddg-html {query}") if not zres: logger.error(f"Could not download page for query '{query}'") exit(1) html = zres.outrs else: html = sys.stdin.read() soup = BeautifulSoup(html, features="lxml") results = soup.find("div", id="links") if not results: logger.error(f"Did not find div with id 'links' for query '{query}'") exit(1) results = results.find_all("div", "result__body") # embed() ; exit(0) processed = [] for r in results: try: link = r.find("a", "result__a") if not link: continue url = link.get("href") if url.startswith("https://duckduckgo.com") or ( not url.startswith("http")): continue title = re.sub(r"\s+", ' ', link.getText(strip=False)).strip() snippet = "" try: # snippet = r.find("div", "result__snippet").getText(strip=True) snippet = r.find( "div", "result__snippet").getText(strip=False).strip() except: pass processed.append({"title": title, "url": url, "abstract": snippet}) except: logger.warning(traceback.format_exc()) print(json.dumps(processed))
#!/usr/bin/env python3 # --noauth_local_webserver try: from ipydex import IPS, ip_syshook, ST, activate_ips_on_exception, dirsearch # activate_ips_on_exception() from brish import z, zq, zs, zp z('cdm ~/tmp/delme/') import datetime now = datetime.datetime.now() cutoff_date = (now - datetime.timedelta(days=7)).strftime("%Y/%m/%d") import os.path import traceback from IPython import embed from plumbum import local import tempfile import base64 from email.mime.audio import MIMEAudio from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import mimetypes import os import datetime import re import copy import sys from bs4 import BeautifulSoup, SoupStrainer
import os import re from brish import z, zp from IPython import embed import nest_asyncio def embed2(): nest_asyncio.apply() embed(using='asyncio') bundle = "com.googlecode.iterm2" if not AppKit.NSRunningApplication.runningApplicationsWithBundleIdentifier_(bundle): AppKit.NSWorkspace.sharedWorkspace().launchApplication_("iTerm") # if os.environ.get("ITERM2_COOKIE", "") == "": os.environ["ITERM2_COOKIE"] = z("""osascript -e 'tell application "iTerm2" to request cookie' """).outrs import iterm2 tab_actiavte_pat = re.compile(r'tab_activate (?P<index>\d+)') async def tab_actiavte(connection, window, index=4): await window.tabs[index].async_activate() async def handle_client_factory(connection, ): app = await iterm2.async_get_app(connection) window = app.current_terminal_window async def handle_client(reader, writer): request = None request = (await reader.read(255)).decode('utf8') m = tab_actiavte_pat.match(request) response = "" try:
async def _process_msg(m0, text_input=False, reload_on_failure=True, out="", received_at=None): global starting_anchor m0_id = m0.id def set_msg_act(some_act): msg2act[m0_id] = some_act.id async def edit(text: str, truncate=True, **kwargs): try: # if not text: # might be sending files # return text_raw = text if len(text) > 4000: if truncate: text = f"{text[:4000]}\n\n..." else: text = text[:4000] await borg.edit_message(m0, text, **kwargs) if not truncate: await reply(text_raw[4000:] ) # kwargs should not apply to a mere text message except telethon.errors.rpcerrorlist.MessageNotModifiedError: pass async def reply(text: str, **kwargs): if not text: # files are send via send_file return text = text.strip() if len(text) > 4000: await m0.reply(text[:4000], **kwargs) await reply(text[4000:] ) # kwargs should not apply to a mere text message else: await m0.reply(text, **kwargs) async def send_file(file, **kwargs): if file: # await borg.send_file(timetracker_chat, file, allow_cache=False, **kwargs) await send_files(timetracker_chat, file, **kwargs) async def warn_empty(): await m0.reply("The empty database has no last act.") async def process_reminders(text): if text in reminders_immediate: rem = reminders_immediate[text] out_add(rem, prefix="\n🌈 ") await edit(out) # await reply(rem) choiceConfirmed = False delayed_actions = [] delayed_actions_special = [] def out_add(text, prefix="\n\n"): nonlocal out if text: if out: out += prefix + text else: out = text def text_sub(text): nonlocal choiceConfirmed nonlocal delayed_actions # @redundant as we do not assign to it text = text.strip() if not text: choiceConfirmed = True return out # @badDesign @todo3 these suffixes are only relevant for adding new acts and renaming them, but they are acted on globally ... while ( len(text) >= 2 and text[-1] in suffixes ): # suffixes should leave some prefix behind, hence the length check suffix = text[-1] action = suffixes[suffix] if action: delayed_actions.append(action) else: delayed_actions_special.append(suffix) text = text[:-1] if not text: choiceConfirmed = True return out if not text.startswith("."): text = text.lower() # iOS capitalizes the first letter if text in subs: choiceConfirmed = True text = subs[text] if text in subs_additional: choiceConfirmed = True if text in subs_commands: choiceConfirmed = True text = subs_commands[text] ## MOVED to text_sub_finalize # if not choiceConfirmed: # if not text.startswith("."): # tokens = list(text.split('_')) # if len(tokens) > 1: # tokens[0] = text_sub_full(tokens[0]) # choiceConfirmed = True # text = '_'.join(tokens) ## return text def text_sub_finalize(text): nonlocal choiceConfirmed nonlocal delayed_actions if text.startswith("."): text = text[1:] add_user_choice(text) elif not choiceConfirmed: if not ("_" in text): text = text.replace(" ", "_") tokens = list(text.split("_")) if len(tokens) > 1: tokens[0] = text_sub_full(tokens[0]) text = "_".join(tokens) add_user_choice(text) else: text = chooseAct(text) for action in delayed_actions: mode, c = action if mode == 0: pre = f"{c}_" if not text.startswith(pre): text = f"{pre}{text}" elif mode == 1: post = f"_{c}" if not text.endswith(post): text += post return text def text_sub_full(text, reset_delayed_actions=True): nonlocal choiceConfirmed nonlocal delayed_actions tmp = choiceConfirmed # out of caution choiceConfirmed = False if reset_delayed_actions: tmp2 = delayed_actions delayed_actions = [] # @warn delayed_actions_special currently does not reset because I don't think it matters res = text_sub_finalize(text_sub(text)) choiceConfirmed = tmp if reset_delayed_actions: delayed_actions = tmp2 return res try: if text_input == False: # not None, but explicit False text_input = m0.text elif not text_input: return out if text_input.startswith( "#" ): # if the input starts with a comment, discard whole input return out async def multi_commands(text_input): nonlocal out text_inputs = text_input.split("\n") if len(text_inputs) > 1: for text_input in text_inputs: out = await _process_msg( m0, text_input=text_input, reload_on_failure=reload_on_failure, out=out, received_at=received_at, ) return True, out return False, False done, res = await multi_commands(text_input) if done: return res m0_text_raw = z("per2en", cmd_stdin=text_input).outrs m0_text = text_sub(m0_text_raw) done, res = await multi_commands(m0_text) if done: return res print(f"TT got: {repr(text_input)} -> {repr(m0_text)}") if (not text_input or text_input.startswith("#") or text_input.isspace()): # comments :D # out_add("comment") return out elif m0_text == "man": out_add( yaml.dump(suffixes) + "\n" + yaml.dump(subs_commands) + "\n" + yaml.dump(subs) + "\n" + yaml.dump(list(subs_additional)) + "\n" + yaml.dump(sorted(user_choices))) await edit(out, truncate=False) return out elif m0_text == ".l": await reload_tt() out_add("reloaded") return out elif m0_text == ".error": raise Exception(".error invoked") return "@impossible" if not received_at: # None, "" are both acceptable as null received_at = datetime.datetime.today() else: print(f"_process_msg: received_at={received_at}") pass rep_id = m0.reply_to_msg_id last_act = None if rep_id: act_id = msg2act.get(rep_id, None) if not act_id: out_add( f"The message you replied to did not have its id stored in msg2act." ) await edit(out) return out else: q = Activity.select().where( Activity.id == act_id ) # this can still be a new record if the record we are trying to get was the last one when it was deleted, as the ids just increment from the last one and are not unique when deletion is concerned if q.exists(): last_act = q.get() else: out_add( f"The message you replied to has had its associated act deleted!" ) await edit(out) return out else: # last_act_query = Activity.select().order_by(Activity.end.desc()) last_act_query = (Activity.select().where( Activity.end <= received_at).order_by(Activity.end.desc())) last_act = None if last_act_query.exists(): last_act = last_act_query.get() if m0_text in (".show", ".sh"): out_add(f"last_act: {last_act}") await edit(out) return out m = del_pat.match(m0_text) if m: del_count = 0 if m.group(1): cutoff = received_at - datetime.timedelta( minutes=float(m.group(1) or 5)) ## # (Activity.end > cutoff) | del_count = (Activity.delete().where( (Activity.start > cutoff & Activity.start <= received_at)).execute()) ## out_add(f"Deleted the last {del_count} activities") elif last_act: out_add(f"Deleted the last act: {last_act}") del_count = last_act.delete_instance() if del_count != 1: # @impossible out_add( f"ERROR: Deletion has failed. Deleted {del_count}.") await edit(out) return out if m0_text == "w": starting_anchor = received_at out_add(f"Anchored to {starting_anchor}") await edit(out) return out if m0_text == "debugme": Activity.delete().where(Activity.name == "dummy").execute() Activity( name="dummy", start=(received_at - datetime.timedelta(days=6 * 30, hours=7)), end=(received_at - datetime.timedelta(days=6 * 30)), ).save() Activity( name="dummy", start=(received_at - datetime.timedelta(days=1 * 30, hours=3)), end=(received_at - datetime.timedelta(days=1 * 30)), ).save() Activity( name="dummy", start=(received_at - datetime.timedelta(days=10 * 30, hours=10)), end=(received_at - datetime.timedelta(days=10 * 30)), ).save() out_add("DEBUG COMMAND") await edit(out) return out m = out_pat.match(m0_text) if m: output_mode = int(m.group("mode") or 1) treemap_enabled = bool(int(m.group("treemap") or 1)) repeat = int(m.group("repeat") or 0) cmap = m.group("cmap") hours = m.group("t") res = None async def send_plots(out_links, out_files): out_links = "\n".join(out_links) out_add(out_links, prefix="\n") await edit(f"{out}", parse_mode="markdown") ## if False: # send as album await send_file(out_files) else: for f in out_files: await send_file(f) ## async def report(hours=None, output_mode=1, received_at=None, title=None): if not received_at: out_add("report: received_at is empty") await edit(f"{out}", parse_mode="markdown") return if output_mode in (3, ): out_add("Generating stacked area plots ...") await edit(f"{out}", parse_mode="markdown") days = float(hours or 7) a = stacked_area_get_act_roots( repeat=(repeat or 20), interval=datetime.timedelta(days=days), received_at=received_at, ) try: lock_tt.release() out_links, out_files = await visualize_stacked_area( a, days=days, cmap=cmap) await send_plots(out_links, out_files) finally: await lock_tt.acquire() if hours: res = activity_list_to_str_now( delta=datetime.timedelta(hours=float(hours)), received_at=received_at, ) else: low = received_at.replace(hour=DAY_START, minute=0, second=0, microsecond=0) if low > received_at: low = low - datetime.timedelta(days=1) res = activity_list_to_str(low, received_at) if timedelta_total_seconds( res["acts_agg"].total_duration) == 0: out_add("report: acts_agg is zero.") await edit(f"{out}", parse_mode="markdown") return if output_mode in (0, 1): out_add(res["string"]) await edit(f"{out}", parse_mode="markdown") if output_mode in (1, 2): out_add(f"Generating plots ...", prefix="\n") await edit(f"{out}", parse_mode="markdown") try: lock_tt.release() out_links, out_files = await visualize_plotly( res["acts_agg"], title=title, treemap=treemap_enabled) await send_plots(out_links, out_files) finally: await lock_tt.acquire() fake_received_at = received_at for i in range(0, repeat + 1): title = None if repeat > 0 and not (output_mode in (3, )): title = f"Reporting (repeat={i}, hours={hours}, received_at={fake_received_at}):" if i > 0: out_add(title) # await reply(title) await report( hours=hours, output_mode=output_mode, received_at=fake_received_at, title=title, ) if output_mode in (3, ): break fake_received_at = fake_received_at - datetime.timedelta( hours=float(hours or 24)) fake_received_at = fake_received_at.replace(hour=(DAY_START - 1), minute=59, second=59, microsecond=0) return out m = habit_pat.match(m0_text) if m: habit_name = m.group("name") habit_name = habit_name.split(";") habit_name = [ name.strip() for name in habit_name if name and not name.isspace() ] # habit_name = [text_sub_full(name) for name in habit_name] out_add(f"{'; '.join(habit_name)}") habit_mode = int(m.group("mode") or 0) habit_max = int(m.group("max") or 0) habit_delta = datetime.timedelta(days=float(m.group("t") or 30)) # days correct_overlap = True day_start = DAY_START negative_previous_year = True colorscheme1 = "BuGn_9" colorscheme2 = "Blues_9" if habit_mode == 2: correct_overlap = False day_start = 15 negative_previous_year = False # colorscheme1 = 'PuRd_9' # colorscheme2 = 'PuBu_9' ## colorscheme1 = "PuRd_9" colorscheme2 = "YlGnBu_9" ## neutral_start = 23 neutral_start = 24 + 0 # neutral_start = 24 + 3 colorscheme1 = m.group("cs1") or colorscheme1 colorscheme2 = m.group("cs2") or colorscheme2 habit_data = activity_list_habit_get_now( habit_name, delta=habit_delta, mode=habit_mode, day_start=day_start, correct_overlap=correct_overlap, received_at=received_at, ) def raw_acts_to_start_offset(habit_data): tmp = habit_data habit_data = dict() for date, act_list in tmp.items(): if len(act_list) == 0: start_offset = 0 else: s = act_list[0].start s = s - s.replace( hour=0, minute=0, second=0, microsecond=0) s = s.total_seconds() / 3600.0 if s < day_start: s += 24 start_offset = s - neutral_start habit_data[date] = round(start_offset, 1) return habit_data if habit_mode == 2: habit_data = raw_acts_to_start_offset(habit_data) out_add(f"{yaml.dump(habit_data)}") habit_data.pop(received_at.date(), None) def mean(numbers): numbers = list(numbers) return float(sum(numbers)) / max(len(numbers), 1) average = mean(v for k, v in habit_data.items()) out_add(f"average: {round(average, 1)}", prefix="\n") await edit(out) ## # ~1 day(s) left empty as a buffer habit_delta = datetime.timedelta(days=364) habit_data = activity_list_habit_get_now( habit_name, delta=habit_delta, mode=habit_mode, day_start=day_start, correct_overlap=correct_overlap, fill_default=False, received_at=received_at, ) if habit_mode == 2: habit_data = raw_acts_to_start_offset(habit_data) img = z("gmktemp --suffix .png").outrs resolution = 100 # * we can increase habit_max by 1.2 to be able to still show overwork, but perhaps each habit should that manually # * calendarheatmap is designed to handle a single year. Using this `year=received_at.year` hack, we can render the previous year's progress as well. (Might get us into trouble after 366-day years, but probably not.) plot_data = { str(k.replace(year=received_at.year)): (1 if (not negative_previous_year or k.year == received_at.year) else -1) * int( max(-resolution, min(resolution, resolution * (v / habit_max)))) for k, v in habit_data.items() } plot_data_json = json.dumps(plot_data) # await reply(plot_data_json) try: lock_tt.release() # await reply("lock released") res = await za( "calendarheatmap -maxcount {resolution} -colorscale {colorscheme1} -colorscalealt {colorscheme2} -highlight-today '#00ff9d' > {img}", cmd_stdin=plot_data_json, ) if res: await send_file(img) else: await reply( f"Creating heatmap failed with {res.retcode}:\n\n{res.outerr}" ) return out finally: await lock_tt.acquire() m = back_pat.match(m0_text) if m: if last_act != None: mins = float(m.group(1) or 20) # supports negative numbers, too ;D last_act.end -= datetime.timedelta(minutes=mins) res = f"{str(last_act)} (Pushed last_act.end back by {mins} minutes)" if last_act.end < last_act.start: out_add(f"Canceled: {res}") await edit(out) return out last_act.save() set_msg_act(last_act) out_add(res) await edit(out) return out else: await warn_empty() return m = rename_pat.match(m0_text) if m: if last_act != None: last_act.name = text_sub_full(m.group(1), reset_delayed_actions=False) last_act.save() set_msg_act(last_act) out_add(f"{str(last_act)} (Renamed)") await edit(out) await process_reminders(last_act.name) return out else: await warn_empty() return async def update_to_now(): amount = received_at - last_act.end last_act.end = received_at last_act.save() set_msg_act(last_act) out_add( f"{str(last_act)} (Updated by {int(round(amount.total_seconds()/60.0, 0))} minutes)" ) await edit(out) await process_reminders(last_act.name) return out if m0_text == ".": if last_act != None: return await update_to_now() else: await warn_empty() return if m0_text == "..": # @perf @todo2 this is slow, do it natively out_add(z("borg-tt-last 10").outerr) await edit(out) return out m0_text = text_sub_finalize(m0_text) start: datetime.datetime if "+" in delayed_actions_special: start = received_at # @warn unless we update last_act_query to also sort by start date, or add an epsilon to either the new act or last_act, the next call to last_act_query might return either of them (theoretically). In practice, it seems last_act is always returned and this zero-timed new act gets ignored. This is pretty much what we want, except it makes it hard to correct errors with `.del` etc. if last_act != None: await update_to_now() else: if starting_anchor == None: if last_act == None: await m0.reply( "The database is empty and also has no starting anchor. Create an anchor by sending 'w'." ) return else: start = last_act.end else: start = starting_anchor starting_anchor = None act = Activity(name=m0_text, start=start, end=received_at) act.save() set_msg_act(act) out_add(str(act)) await edit(out) await process_reminders(act.name) return out except: err = "\nJulia encountered an exception. :(\n" + traceback.format_exc() logger.error(err) out_add(err) if reload_on_failure: out_add("Reloading ...\n") await edit(out, truncate=False) await reload_tt() return await borg._plugins["timetracker"]._process_msg( m0, reload_on_failure=False, text_input=text_input, out=out, received_at=received_at, ) else: await edit(out) return out
telethon.client.uploads._resize_photo_if_needed = _resize_photo_if_needed ## dl_base = os.getcwd() + "/dls/" # pexpect_ai = aioify(obj=pexpect, name='pexpect_ai') pexpect_ai = aioify(pexpect) # os_aio = aioify(obj=os, name='os_aio') os_aio = aioify(os) # subprocess_aio = aioify(obj=subprocess, name='subprocess_aio') subprocess_aio = aioify(subprocess) borg: TelegramClient = None # is set by init admins = [ # "Arstar", 195391705, ] if z('test -n "$borg_admins"'): for admin in list(z("arr0 ${{(s.,.)borg_admins}}").iter0()): try: admin = int(admin) except: pass admins.append(admin) # Use chatids instead. Might need to prepend -100. adminChats = [ "1353500128", "1185370891", # HEART ] brish_count = int(os.environ.get("borg_brish_count", 16)) executor = ThreadPoolExecutor(max_workers=(brish_count + 16))
from brish import z from uniborg.util import force_async import logging try: logger = logger or logging.getLogger(__name__) except: logger = logging.getLogger(__name__) ## import asyncio lock_tt = asyncio.Lock() msg2act = dict() ## DAY_START = 5 is_local = bool(z("isLocal")) # is_linux = bool(z("isLinux")) ## from peewee import * import os from datetime import timedelta from pathlib import Path def timedelta_dur(end, start): return end - start # Path.home().joinpath(Path("cellar")) db_path = Path( z('print -r -- "${{attic_private_dir:-$HOME/tmp}}/timetracker.db"').outrs)
lv_c += 1 process_comment(f, c.replies, lv_c, shortname) if l != i: f.write("\n") def utf8len(s): return len(s.encode('utf-8')) for result in results: try: # embed() ; exit() # if not "looking at this sub" in result.title: # continue f_name = z("ecn {result.title} | str2filename").outrs f_name = f_name[0:230] # [[id:a36bb01f-9b9b-40c9-816a-c762281c43c3][filesystem/filenames.org:maximum allowed length for filenames and paths]] if utf8len(f_name) > 240: f_name = f_name[0:100] if utf8len(f_name) > 240: f_name = f_name[0:60] shortname = f"{f_name}.{result.id}" f_name = f"posts/{shortname}.org" z("ensure-dir {f_name}") with open(f_name, "w") as f: lv = 1 f.write(f"#+TITLE: {result.title}\n\n")
def process_comment(f, comments, lv, shortname): while True: try: comments.replace_more(limit=None) break except DuplicateReplaceException: print(traceback.format_exc()) break except: print(traceback.format_exc()) time.sleep(1) l = len(comments) - 1 shortname_orig = shortname for i, c in enumerate(comments): lv_c = lv shortname = shortname_orig # if isinstance(c, MoreComments): # pass ## # meta = meta_get(c) # meta+=": " ## meta = meta_get_props(c) # Properties are key--value pairs. When they are associated with a single entry or with a tree they need to be inserted into a special drawer (see [[https://orgmode.org/manual/Drawers.html#Drawers][Drawers]]) with the name ‘=PROPERTIES=', which has to be located right below a headline, and its planning line (see [[https://orgmode.org/manual/Deadlines-and-Scheduling.html#Deadlines-and-Scheduling][Deadlines and Scheduling]]) when applicable. # # Still, putting the props after the heading is no fun; We can rename our drawer to :METADATA:, but why bother? ## head = "EMPTY_COMMENT" # @todo3 using IDs creates too long paths. It's better if just use a counter that goes from 0 to N. c_id_old = c.id or z("uuidm").outrs[0:6] ## if lv_c <= 4: c_id = f"{i}_{c_id_old}" else: c_id = i # using this in different runs is unreliable, as the comments ordering can change. But since we use the comments' ID as their filenames, this won't result in data loss, but it can cause data duplication and a flawed comment hierarchy. # workarounds: # - delete the indices directory and re-run the whole scraping from scratch on every update # - @done use the first-n-level comments' ID as well ## shortname += f"/{c_id}" if c.body_html: head = (html2org(c.body_html) or "EMPTY_COMMENT") index_file = f'indices/{shortname}/{c_id_old}.org' z("ensure-dir {index_file}") with open(index_file, "w") as f2: f2.write(f"{meta}\n{head}") if head.startswith("#+"): # do not put blocks in headings (e.g., #+begin_quote) author = "deleted" if c.author: author = c.author.name or author head = f"u/{author}:\n{head}" head = head or "_" # empty headers are invalid org-mode f.write("\n" + stars(lv_c) + head + "\n" + meta) lv_c += 1 process_comment(f, c.replies, lv_c, shortname) if l != i: f.write("\n")
def fig_export( fig, exported_name, html_export=True, png_export=True, svg_export=True, pdf_export=True, width=600, height=400, scale=4, ): out_links = [] out_files = [] if html_export: exported_html = f"./plots/{exported_name}.html" z("ensure-dir {exported_html}") fig.write_html(exported_html, include_plotlyjs="cdn", include_mathjax="cdn") # fig.write_html("./plots/exported_full.html") z("isDarwin && open {exported_html}") is_local or out_links.append(z("jdl-private {exported_html}").outrs) if png_export: exported_png = f"./plots/{exported_name}.png" z("ensure-dir {exported_png}") fig.write_image(exported_png, width=width, height=height, scale=scale) out_files.append(exported_png) if svg_export: # svg needs small sizes exported_svg = f"./plots/{exported_name}.svg" z("ensure-dir {exported_svg}") fig.write_image(exported_svg, width=width, height=height, scale=1) out_files.append(exported_svg) if pdf_export: exported_pdf = f"./plots/{exported_name}.pdf" z("ensure-dir {exported_pdf}") fig.write_image(exported_pdf, width=width, height=height, scale=1) out_files.append(exported_pdf) return out_links, out_files
def zn(*a, getframe=3, **kw): # runs my personal commands if os.environ.get("NIGHTDIR"): return z(*a, **kw, getframe=getframe) else: return None