from . import config from .text import shrink vocab_alternate = config.load_config_json("vocab_alternate.json", dict) vocab_alternate.update( {f"shrink {k}": v for k, v in shrink.shrink_map.items()}) ctx = Context("vocab") try: vocab_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "vocab.json") with resource.open(vocab_path) as fh: vocab = json.load(fh) except FileNotFoundError: vocab = [] def add_vocab(words): global vocab vocab += [re.sub("[^a-zA-Z0-9]+", "", w) for w in words] vocab = sorted(list(set(vocab))) with open(vocab_path, "w") as f: json.dump(vocab, f, indent=0) ctx.vocab = vocab + list(vocab_alternate.keys()) ctx.vocab_remove = config.load_config_json("vocab_remove.json", list)
import re import subprocess import time from talon import applescript import talon.clip as clip from talon.api import ffi from talon.voice import Key, press, Str, Context from user.utility import text from user.mouse import delayed_click ctx = Context("terminal", bundle="com.googlecode.iterm2") ctx.vocab = [ 'docker', 'talon', ] ctx.vocab_remove = ['doctor', 'Doctor'] subdirs = {} def slugify(value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ value = re.sub("[^\w\s-]", "", value).strip().lower() value = re.sub("[-\s]+", " ", value) return value
insert("".join(result)) def add_jargon(key, meaning): global jargon_substitutions, ctx jargon_substitutions[key] = meaning resource.write(jargon_path, json.dumps(jargon_substitutions, indent=2)) v = list(ctx.vocab) v.append(key) ctx.vocab = v def learn_jargon(m): with clip.capture() as s: press("cmd-c", wait=2000) meaning = s.get() # type: str if meaning: meaning = meaning.strip() key = " ".join(parse_words(m)) app.notify(f"learned {key}={meaning}") add_jargon(key, meaning) ctx = Context("formatters") ctx.vocab = vocab + list(jargon_substitutions.keys()) ctx.keymap({ f"({' | '.join(formatters)})+ [<dgndictation>] [over]": format_text, "sponge [<dgndictation>] [over]": sponge_format, "create jargon <dgndictation> [over]": learn_jargon, })
Key('cmd-b'), 'italics': Key('cmd-i'), 'underline': Key('cmd-u'), }) ctx.keymap(keymap) ctx.vocab = [ 'Talon', 'talon', 'Netlight', 'refactoring', 'Refactoring', 'Array', 'array', 'undefined', 'Undefined', 'Back-end', 'back-end', ] ctx.vocab_remove = [ 'tallow', 'Tallow', 'tyler', 'Tyler', 'mark', 'Mark', ]
ctx.vocab = [ # important to keep this in alphabetical order! # 'batch effects', 'Alon', 'amplicon', 'anova', 'bias', 'biostat', 'Bryan', 'CAGs', 'clinicians', 'column', 'columns', 'column space', 'conda', 'conda install', 'conda activate', 'confirmatory', 'contig', 'contigs', 'corncob', 'covariate', 'Daniela', 'deconvolve', 'department', 'denoising', 'devtools', 'divnet', 'eigendecomposition', 'estimand', 'gene', 'generalized', 'genome', 'genomes', 'genus', 'Gitana', 'github', 'grant', 'grants', 'heteroskedastic', 'homeworks', 'homoskedastic', 'idempotent', 'identifiability', 'inverse', 'inverses', 'lambda', 'MAG', 'MAGs', 'Mauricio', 'Meren', 'metadata', 'metagenome', 'metagenomic', 'metagenomics', 'metaphlan', 'MGS', 'microbiome', 'misc', 'missingness', 'misspecified', 'misspecify', 'mock', 'modeling', 'omics', 'orthonormal' 'OTU', 'pangenomic', 'pangenomics', 'pangenome', 'penalization', 'Perlman', 'phyla', 'phylum', 'phyloseq', 'phylodivnet', 'Poisson', 'Prevotella', 'p-value', 'p-values', 'quantile', 'quantiles', 'rarefying', 'reparameterize', 'reparameterizing', 'reparameterization', 'resample', 'resamples', 'rightarrow', 'row', 'semidefinite', 'semiparametric', 'stat', 'talon', 'taxa', 'taxon', 'tibble', 'tidyverse', # 'theorem', 'theorems', 'underdispersion', 'unifrac', # Add American English :( 'analyze', 'analyzing', 'generalize', 'generalized', 'itemize', 'normalize', 'organize', 'organized', 'rigorous', ]
def copy_bundle(m): bundle = ui.active_app().bundle clip.set(bundle) app.notify("Copied app bundle", body="{}".format(bundle)) ctx = Context("standard") ctx.vocab = [ "docker", "talon", "pragma", "pragmas", "vim", "configs", "spotify", "upsert", "utils", ] keymap = {} keymap.update({ "dragon words": "<dgnwords>", "dragon dictation": "<dgndictation>", "slap": [Key("cmd-right enter")], "cd": "cd ",
from talon.voice import Context, ContextGroup, Key from ..utils import text def delay(amount): return lambda _: time.sleep(amount) # I don't want to interleave jira navigation with other commands, like dictation. group = ContextGroup("jira") ctx = Context("jira", func=lambda app, win: win.title.endswith(" JIRA2"), group=group) ctx.vocab = ["sub-task", "Dwight"] ctx.keymap({ "dashboard": Key("g d"), "boards": Key("g b"), "issues": Key("g i"), "find": Key("/"), "create": Key("c"), "assign [to] <dgndictation> [over]": [Key("a"), delay(0.6), text], "assign to me": Key("i"), "comment": Key("m"), "edit": Key("e"), "action <dgndictation> [over]": [Key("."), delay(0.6), text], "submit": Key("ctrl+return"), "copy link": Key("cmd+l cmd+c"), "copy id": Key("cmd+l right alt+shift+left alt+shift+left cmd+c"), })
from talon import ctrl from user.std import text import time def delay(amount): return lambda _: time.sleep(amount) # I don't want to interleave jira navigation with other commands, like dictation. group = ContextGroup("jira") ctx = Context('jira', func=lambda app, win: win.title.endswith(' JIRA2'), group=group) ctx.vocab = [ 'sub-task', 'Dwight', ] ctx.keymap({ 'dashboard': Key('g d'), 'boards': Key('g b'), 'issues': Key('g i'), 'find': Key('/'), 'create': Key("c"), 'assign [to] <dgndictation> [over]': [Key("a"), delay(0.6), text], 'assign to me': Key("i"), 'comment': Key("m"), 'edit': Key("e"), 'action <dgndictation> [over]': [Key("."), delay(0.6), text], 'submit': Key("ctrl+return"), 'copy link': Key("cmd+l cmd+c"), 'copy id': Key("cmd+l right alt+shift+left alt+shift+left cmd+c"),
"state false": i("False"), "state none": i("None"), "item <dgndictation> [over]": [ i(", "), formatted_text(DOWNSCORE_SEPARATED, JARGON), ], "swipe [<dgndictation>] [over]": [ Key("right"), i(", "), formatted_text(DOWNSCORE_SEPARATED, JARGON), ], } ) ctx = Context("golang", func=extension_context(".go")) ctx.vocab = ["nil", "context", "lambda", "init"] ctx.vocab_remove = ["Linda", "Doctor", "annette"] ctx.keymap( { "empty string": i('""'), "is not empty": i('.len != 0'), "variadic": i("..."), "logical and": i(" && "), "logical or": i(" || "), # Many of these add extra terrible spacing under the assumption that # gofmt/goimports will erase it. "state comment": i("// "), "[line] comment <dgndictation>": [ Key("cmd-right"), i(" // "), formatted_text(SENTENCE),
"class <dgndictation>": [i("class "), formatted_text(GOLANG_PUBLIC), i(":\n")], "state (past | pass)": i("pass"), "state true": i("True"), "state false": i("False"), "item <dgndictation>": [i(", "), formatted_text(DOWNSCORE_SEPARATED)], }) ctx = Context("golang", func=extension_context(".go")) ctx.vocab = [ 'nil', 'context', ] ctx.keymap({ # Many of these add extra terrible spacing under the assumption that # gofmt/goimports will erase it. "state comment": i(" // "), "line comment <dgndictation>": [Key("cmd-right"), i(" // "), formatted_text(SENTENCE)], "state context": i("ctx"), "state (funk | func | fun)": i("func "), "function <dgndictation>": [i("func "),
def press_keys(m): mods = get_modifiers(m) keys = get_keys(m) if mods: press("-".join(mods + [keys[0]])) keys = keys[1:] for k in keys: press(k) ctx = Context("basic_keys") ctx.vocab = [ "async", "PubNub", "scala", "int", "fullstack", "cognito", "auth", "email", "arn", "app", "gen", "impl" ] ctx.keymap({ "(uppercase | ship | sky) {basic_keys.alphabet}+ [(lowercase | sunk)]": uppercase_letters, "{basic_keys.modifiers}* {basic_keys.alphabet}+": press_keys, "{basic_keys.modifiers}* {basic_keys.digits}+": press_keys, "{basic_keys.modifiers}* {basic_keys.keys}+": press_keys, "(go | {basic_keys.modifiers}+) {basic_keys.arrows}+": press_keys, "number {basic_keys.digits}+ [over]": press_keys,
from talon.voice import Context from . import config from .text import shrink vocab_alternate = config.load_config_json("vocab_alternate.json", dict) vocab_alternate.update( {f"shrink {k}": v for k, v in shrink.shrink_map.items()}) ctx = Context("vocab") ctx.vocab = config.load_config_json("vocab.json", list) + list( vocab_alternate.keys()) ctx.vocab_remove = config.load_config_json("vocab_remove.json", list)
from talon.voice import Context ctx = Context('vocab_demo1', bundle='com.apple.Terminal') ctx.vocab = [ 'docker', 'talon', ] ctx.vocab_remove = ['doctor', 'Doctor'] ctx2 = Context('vocab_demo2', bundle='com.tinyspeck.slackmacgap') ctx2.vocab = [ 'Talon', ]
from talon.voice import Context, Key ctx = Context("go") ctx.vocab = [ "golang", "func", ] ctx.keymap({ "go format": "gofmt", "go var": "var ", "go error": "err", "go [short] assign": " := ", })
ctx.vocab = [ "photocrop", "liveness", "uncomment", "mock", "toph", "async", "alembic", "kubernetes", "mutex", "semaphore", "recurse", "f**k", "f*****g", "f****d", "shit", "spotify", "unix", "linux", "inline", "crontab", "birding", "warframe", "regex", "leet", "leetcode", "destructure", "destructured", "redis", "destructuring", "newline", "credstash", ]
ctx.vocab = [ # important to keep this in alphabetical order! Here's some R code to print and sort words: # sortedWords = sort(allWords) # test = paste0("'", paste(sortedWords, collapse="', '"), "'") # 'batch effects', # 'bal', # 'Bayes', # 'biostatistics', # 'Bryan', # # 'CAGs', # 'Cholesky', # 'cor', # 'corncob', # 'cov', # # 'dat', # 'deg', # 'dev', # 'dist', # 'dists', # # 'est', # 'exp', # 'expit', # # 'fin', # 'Frequentist', # # 'gen', # 'gene', # 'genome', # 'genomes', # 'genus', # # 'ind', # 'inla', # # 'kriging', # # 'lat', # 'LatticeKrig', # 'len', # 'lim', # 'logit', # 'lon', # 'Lostreia', # 'Lostreian', # # 'mat', # 'metagenome', # 'metagenomic', # 'metagenomics', # 'mfrow', # 'mock', # 'modeling', # 'mort', # 'MSE', # # 'ncol', # 'nonparametric', # 'nrow', # # 'Okada', # 'OTU', # # 'p-value', # 'p-values', # 'parameterizations', # 'pred', # 'preds', # 'psd', # # 'reparameterization', # 'res', # 'resids', # 'rev', # # 'seq', # 'semiparametric', # 'SRS', # # 'talon', # 'taxa', # 'taxon', # 'tibble', # # 'val', # 'vec', # # # # # # 'reg', # 'surv', # 'hist', # 'num', # 'freq', # 'prob', # 'probs', # 'dens', # 'geo', # 'sub', # 'subs', # 'XLab', # 'YLab', # 'XLim', # 'YLim', # 'ZLim', # 'CLim', # 'CEX', # 'clust', # 'ang', # 'BYM', # 'XTable', # 'SD', # 'SDs', # 'SE', # 'SEs', # 'distn', # 'WD', # 'TMP', # 'col', # 'res', # 'interp', # 'fac', # 'GGPlot', # 'RGDal', # 'SP', # 'util', # 'CRPS', # 'ABLine', # 'samp', # 'proj', # 'val', # 'vals', # 'SApply', # 'MApply', # 'LApply', # 'inv', # 'PTS', # 'strat', # 'strata', # 'urb', # 'CRS', # 'ATTR', # 'UTM', # 'dir', # 'LKInla', # 'LK', # 'eff', # 'DOF', # 'SQ', # 'init', # 'par', # 'obs', # 'coef', # 'coefs', # 'marg', # 'lin', # 'quant', # 'comp', # 'med', # 'CMD', # 'arg', # 'args', # 'unif', # 'sim', # 'sims', # 'LTY', # 'hyperpar', # 'hyperpars', # 'hyperparameter', # 'hyperparameters', # 'CBind', # 'RBind', # 'int', # 'inf', # 'eps', # 'CI', # 'EA', # "argmin", # "argmax", # "argsup", # "arginf", # "sup", # "cdot", # "nabla", # "varphi", # "RM", # "paleoseismic", # "jonno", # "krig", # "Guttorp", # "Wakefield", # "Szpiro", # "QERM", # "MKL", # "RA", # "RAs", # "TA", # "TAs", # "NOAA", # "krig", # "diag", # "cont", # "undebug", # "Nychka", # "Farah", # "youdub", # "IHME", # "ICAR", # "approx", # "asymptotics", # "git", # "reparameterized", # "reparameterize", # "mbg", # "gbd", # "lbd", # "quartic", # "quintic", # "covs", # "lims", # "interpolator", # "prec", # "param", # "quantile", # "sims", # "GAM", # "TMB", # "pwd", # "MLE", # "latex", # 'paigejo', 'ABLine', 'ang', 'approx', 'arg', 'arginf', 'argmax', 'argmin', 'args', 'argsup', 'asymptotics', 'ATTR', 'bal', 'Bayes', 'biostatistics', 'Bryan', 'BYM', 'CAGs', 'CBind', 'cdot', 'CEX', 'Cholesky', 'CI', 'CLim', 'clust', 'CMD', 'coef', 'coefs', 'col', 'comp', 'cont', 'cor', 'corncob', 'cov', 'covs', 'CRPS', 'CRS', 'dat', 'deg', 'dens', 'dev', 'diag', 'dir', 'dist', 'distn', 'dists', 'DOF', 'EA', 'eff', 'eps', 'est', 'exp', 'expit', 'fac', 'Farah', 'fin', 'freq', 'Frequentist', 'GAM', 'gbd', 'gen', 'gene', 'genome', 'genomes', 'genus', 'geo', 'GGPlot', 'git', 'Guttorp', 'hist', 'hyperpar', 'hyperparameter', 'hyperparameters', 'hyperpars', 'ICAR', 'IHME', 'ind', 'inf', 'init', 'inla', 'int', 'interp', 'interpolator', 'inv', 'jitter', 'jittered', 'jonno', 'krig', 'krig', 'kriging', 'LApply', 'lat', 'latex', 'LatticeKrig', 'lbd', 'len', 'lim', 'lims', 'lin', 'LK', 'LKInla', 'logit', 'lon', 'Lostreia', 'Lostreian', 'LTY', 'MApply', 'marg', 'mat', 'mbg', 'med', 'metagenome', 'metagenomic', 'metagenomics', 'mfrow', 'MKL', 'MLE', 'mock', 'modeling', 'mort', 'MSE', 'nabla', 'ncol', 'NOAA', 'nonparametric', 'nrow', 'num', 'Nychka', 'obs', 'Okada', 'OTU', 'p-value', 'p-values', 'paigejo', 'paleoseismic', 'par', 'param', 'parameterizations', 'prec', 'pred', 'preds', 'prob', 'probs', 'proj', 'psd', 'PTS', 'pwd', 'QERM', 'quant', 'quantile', 'quartic', 'quintic', 'RA', 'RAs', 'RBind', 'reg', 'reparameterization', 'reparameterize', 'reparameterized', 'res', 'res', 'resids', 'rev', 'RGDal', 'RM', 'samp', 'SApply', 'SD', 'SDs', 'SE', 'semiparametric', 'seq', 'SEs', 'sim', 'sims', 'sims', 'SP', 'SQ', 'SRS', 'strat', 'strata', 'sub', 'subs', 'sup', 'surv', 'Szpiro', 'TA', 'talon', 'TAs', 'taxa', 'taxon', 'tibble', 'TMB', 'TMP', 'undebug', 'unif', 'urb', 'util', 'UTM', 'val', 'val', 'vals', 'varphi', 'vec', 'Wakefield', 'WD', 'XLab', 'XLim', 'XTable', 'YLab', 'YLim', 'youdub', 'ZLim', ]
# See https://iterm2.com/documentation-shell-integration.html # To update available directories on tab changes, `list` again. import os import re import talon.clip as clip from talon import applescript from talon.voice import Context, Key, Str, press from .. import utils from ..misc.basic_keys import alphabet from ..misc.mouse import delayed_click ctx = Context("terminal", bundle="com.googlecode.iterm2") ctx.vocab = ["docker", "talon"] ctx.vocab_remove = ["doctor", "Doctor"] try: from ..text.homophones import all_homophones # Map from every homophone back to the row it was in. homophone_lookup = { item.lower(): words for canon, words in all_homophones.items() for item in words } except ImportError: homophone_lookup = { "right": ["right", "write"], "write": ["right", "write"] }
from talon.voice import Context, Key ctx = Context("perl") ctx.vocab = [ "params", "perltidy", "githook", "undef", "perl", "diag", "plack", "rehash", "mojolicious", ] ctx.keymap( { "perl hash bang": "#!/usr/bin/env perl\n", "log for pearl": "Log4perl", "perl env": "plenv ", "see pan (m | em | minus)": "cpanm ", "(warren | worn | warn)": "warn ", "use pragmas": "use strict;\nuse warnings;\n", "use dumper": "use Data::Dumper;", "perl use": "use ", "perl require": "require ", "perl local": "local ", "perl my": "my ", "perl keys": "keys ",
def copy_bundle(_): bundle = ui.active_app().bundle clip.set(bundle) app.notify("Copied app bundle", body="{}".format(bundle)) def type_number(m): # noinspection PyProtectedMember count = text_to_number([parse_word(w) for w in m._words[1:]]) insert(str(count)) ctx = Context("input") ctx.vocab = vocab ctx.keymap( { "over": delay(0.3), "literal <dgndictation>++": text, "say <dgndictation> [over]": text, # "sentence <dgndictation> [over]": sentence_text, # Formatters. # "comma <dgndictation> [over]": [", ", text], # "period <dgndictation> [over]": [". ", text], # "more <dgndictation> [over]": [" ", text], "more <dgndictation> [over]": text_with_leading(" "), "word <dgnwords>": word, f"numeral {numerals}": type_number, "slap": [Key("cmd-right enter")], "cape": [Key("escape")], "pa": [Key("space")],
send_idea_command("action EditorPaste") finally: clip.set(old_clip) def is_real_jetbrains_editor(app, window): if app.bundle not in port_mapping: return False # XXX Expose "does editor have focus" as plugin endpoint. # XXX Window title empty in full screen. return "[" in window.title or len(window.title) == 0 # group = ContextGroup("jetbrains") ctx = Context("jetbrains", func=is_real_jetbrains_editor) # , group=group) ctx.vocab = ["docker", "GitHub"] ctx.vocab_remove = ["doctor", "Doctor"] ctx.keymap( { "complete": idea("action CodeCompletion"), "smarter": idea("action SmartTypeCompletion"), "finish": idea("action EditorCompleteStatement"), "zoom": idea("action HideAllWindows"), "find (usage | usages)": idea("action FindUsages"), "(refactor | reflector) [<dgndictation>]": [ idea("action Refactorings.QuickListPopupAction"), text, ], "fix [this]": idea("action ShowIntentionActions"), "fix next [error]": [ idea("action GotoNextError"),
from talon import applescript, keychain, tap, ui from talon_plugins import microphone from ..utils import add_vocab, delay, select_last_insert, text def learn_selection(_): with clip.capture() as s: press("cmd-c", wait=2000) words = s.get().split() add_vocab(words) print("Learned " + ",".join(words)) ctx = Context("misc") ctx.vocab = ["Jira"] ctx.keymap({ "learn selection": learn_selection, "(alfred | launch)": Key("cmd-space"), "(alfred | launch) <dgndictation> [over]": [Key("cmd-space"), delay(0.4), text], "correct": select_last_insert, "toggle dark": lambda _: subprocess.check_call( ["open", "/System/Library/CoreServices/ScreenSaverEngine.app"]), "terminal": lambda _: [ui.launch(bundle="com.googlecode.iterm2")], # "focus GoLand": lambda _: [ui.launch(bundle="com.jetbrains.goland")],
from talon.voice import Context, Key from ..utils import is_filetype, snake_text, caps_text, camel_case, text FILETYPES = (".go", ) PYTHON_ALIAS = "( pie | pipe )" GO_ALIAS = "go" # ctx = Context("go", func=is_filetype(FILETYPES)) ctx = Context("go") ctx.vocab = ["goroutine", "nil", "golang", "waitgroup"] ctx.keymap({ GO_ALIAS + "pirate": "return ", # Symbols GO_ALIAS + " sign": " := ", GO_ALIAS + " chan": "chan ", GO_ALIAS + " var": "var ", GO_ALIAS + " make": ["make()", Key("left")], GO_ALIAS + " print": ["fmt.Println()", Key("left")], GO_ALIAS + " log": ["log.Printf()", Key("left")], GO_ALIAS + " amp": "&", GO_ALIAS + " format": ["fmt.Sprintf()", Key('left')], GO_ALIAS + " range": "range ", GO_ALIAS + " arrow":